This commit is contained in:
Blake Blackshear 2025-11-17 14:12:10 +00:00 committed by GitHub
commit 774f76f75b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1111 changed files with 80029 additions and 14643 deletions

View File

@ -0,0 +1,6 @@
---
globs: ["**/*.ts", "**/*.tsx"]
alwaysApply: false
---
Never write strings in the frontend directly, always write to and reference the relevant translations file.

View File

@ -23,7 +23,7 @@ jobs:
name: AMD64 Build name: AMD64 Build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -47,7 +47,7 @@ jobs:
name: ARM Build name: ARM Build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -77,42 +77,12 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
jetson_jp5_build:
if: false
runs-on: ubuntu-22.04
name: Jetson Jetpack 5
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push TensorRT (Jetson, Jetpack 5)
env:
ARCH: arm64
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
set: |
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
jetson_jp6_build: jetson_jp6_build:
runs-on: ubuntu-22.04-arm runs-on: ubuntu-22.04-arm
name: Jetson Jetpack 6 name: Jetson Jetpack 6
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -143,7 +113,7 @@ jobs:
- amd64_build - amd64_build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -185,7 +155,7 @@ jobs:
- arm64_build - arm64_build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -203,6 +173,31 @@ jobs:
set: | set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha *.cache-from=type=gha
synaptics_build:
runs-on: ubuntu-22.04-arm
name: Synaptics Build
needs:
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Synaptics build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: synaptics
files: docker/synaptics/synaptics.hcl
set: |
synaptics.tags=${{ steps.setup.outputs.image-name }}-synaptics
*.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi # The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image # build should be the primary arm64 image
assemble_default_build: assemble_default_build:
@ -217,7 +212,7 @@ jobs:
with: with:
string: ${{ github.repository }} string: ${{ github.repository }}
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}

View File

@ -4,43 +4,19 @@ on:
pull_request: pull_request:
paths-ignore: paths-ignore:
- "docs/**" - "docs/**"
- ".github/**" - ".github/*.yml"
- ".github/DISCUSSION_TEMPLATE/**"
- ".github/ISSUE_TEMPLATE/**"
env: env:
DEFAULT_PYTHON: 3.11 DEFAULT_PYTHON: 3.11
jobs: jobs:
build_devcontainer:
runs-on: ubuntu-latest
name: Build Devcontainer
# The Dockerfile contains features that requires buildkit, and since the
# devcontainer cli uses docker-compose to build the image, the only way to
# ensure docker-compose uses buildkit is to explicitly enable it.
env:
DOCKER_BUILDKIT: "1"
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 20.x
- name: Install devcontainer cli
run: npm install --global @devcontainers/cli
- name: Build devcontainer
run: devcontainer build --workspace-folder .
# It would be nice to also test the following commands, but for some
# reason they don't work even though in VS Code devcontainer works.
# - name: Start devcontainer
# run: devcontainer up --workspace-folder .
# - name: Run devcontainer scripts
# run: devcontainer run-user-commands --workspace-folder .
web_lint: web_lint:
name: Web - Lint name: Web - Lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
@ -56,7 +32,7 @@ jobs:
name: Web - Test name: Web - Test
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
@ -76,7 +52,7 @@ jobs:
name: Python Checks name: Python Checks
steps: steps:
- name: Check out the repository - name: Check out the repository
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
@ -99,16 +75,21 @@ jobs:
name: Python Tests name: Python Tests
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU - uses: actions/setup-node@master
uses: docker/setup-qemu-action@v3 with:
- name: Set up Docker Buildx node-version: 20.x
uses: docker/setup-buildx-action@v3 - name: Install devcontainer cli
- name: Build run: npm install --global @devcontainers/cli
run: make - name: Build devcontainer
- name: Run mypy env:
run: docker run --rm --entrypoint=python3 frigate:latest -u -m mypy --config-file frigate/mypy.ini frigate DOCKER_BUILDKIT: "1"
- name: Run tests run: devcontainer build --workspace-folder .
run: docker run --rm --entrypoint=python3 frigate:latest -u -m unittest - name: Start devcontainer
run: devcontainer up --workspace-folder .
- name: Run mypy in devcontainer
run: devcontainer exec --workspace-folder . bash -lc "python3 -u -m mypy --config-file frigate/mypy.ini frigate"
- name: Run unit tests in devcontainer
run: devcontainer exec --workspace-folder . bash -lc "python3 -u -m unittest"

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- id: lowercaseRepo - id: lowercaseRepo
@ -18,7 +18,7 @@ jobs:
with: with:
string: ${{ github.repository }} string: ${{ github.repository }}
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}

1
.gitignore vendored
View File

@ -15,6 +15,7 @@ frigate/version.py
web/build web/build
web/node_modules web/node_modules
web/coverage web/coverage
web/.env
core core
!/web/**/*.ts !/web/**/*.ts
.idea/* .idea/*

View File

@ -1,7 +1,7 @@
default_target: local default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.16.2 VERSION = 0.17.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
BOARDS= #Initialized empty BOARDS= #Initialized empty
@ -14,12 +14,19 @@ push-boards: $(BOARDS:%=push-%)
version: version:
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
echo 'VITE_GIT_COMMIT_HASH=$(COMMIT_HASH)' > web/.env
local: version local: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \ docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag frigate:latest \ --tag frigate:latest \
--load --load
debug: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg DEBUG=true \
--tag frigate:latest \
--load
amd64: amd64:
docker buildx build --target=frigate --file docker/main/Dockerfile . \ docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \ --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \

View File

@ -4,13 +4,13 @@ from statistics import mean
import numpy as np import numpy as np
import frigate.util as util
from frigate.config import DetectorTypeEnum from frigate.config import DetectorTypeEnum
from frigate.object_detection.base import ( from frigate.object_detection.base import (
ObjectDetectProcess, ObjectDetectProcess,
RemoteObjectDetector, RemoteObjectDetector,
load_labels, load_labels,
) )
from frigate.util.process import FrigateProcess
my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0) my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0)
labels = load_labels("/labelmap.txt") labels = load_labels("/labelmap.txt")
@ -91,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess(
) )
for x in range(0, 10): for x in range(0, 10):
camera_process = util.Process( camera_process = FrigateProcess(
target=start, args=(x, 300, detection_queue, events[str(x)]) target=start, args=(x, 300, detection_queue, events[str(x)])
) )
camera_process.daemon = True camera_process.daemon = True

View File

@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM scratch AS go2rtc FROM scratch AS go2rtc
ARG TARGETARCH ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.9/go2rtc_linux_${TARGETARCH}" go2rtc ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
FROM wget AS tempio FROM wget AS tempio
ARG TARGETARCH ARG TARGETARCH
@ -148,6 +148,7 @@ RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/inst
FROM base AS wheels FROM base AS wheels
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
ARG TARGETARCH ARG TARGETARCH
ARG DEBUG=false
# Use a separate container to build wheels to prevent build dependencies in final image # Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \ RUN apt-get -qq update \
@ -177,6 +178,8 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" && python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt COPY docker/main/requirements.txt /requirements.txt
COPY docker/main/requirements-dev.txt /requirements-dev.txt
RUN pip3 install -r /requirements.txt RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source # Build pysqlite3 from source
@ -184,7 +187,10 @@ COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
RUN /build_pysqlite3.sh RUN /build_pysqlite3.sh
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
if [ "$DEBUG" = "true" ]; then \
pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
fi
# Install HailoRT & Wheels # Install HailoRT & Wheels
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \ RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
@ -206,6 +212,7 @@ COPY docker/main/rootfs/ /
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc) # Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
FROM slim-base AS deps FROM slim-base AS deps
ARG TARGETARCH ARG TARGETARCH
ARG BASE_IMAGE
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
# http://stackoverflow.com/questions/48162574/ddg#49462622 # http://stackoverflow.com/questions/48162574/ddg#49462622
@ -224,9 +231,15 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html # Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8 ENV OPENCV_FFMPEG_LOGLEVEL=8
# Set NumPy to ignore getlimits warning
ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
# Set HailoRT to disable logging # Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE ENV HAILORT_LOGGER_PATH=NONE
# TensorFlow error only
ENV TF_CPP_MIN_LOG_LEVEL=3
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies # Install dependencies
@ -243,6 +256,10 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
pip3 install -U /deps/wheels/*.whl pip3 install -U /deps/wheels/*.whl
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
bash -c "bash /deps/install_memryx.sh"
COPY --from=deps-rootfs / / COPY --from=deps-rootfs / /
RUN ldconfig RUN ldconfig

View File

@ -5,6 +5,12 @@ set -euxo pipefail
SQLITE3_VERSION="3.46.1" SQLITE3_VERSION="3.46.1"
PYSQLITE3_VERSION="0.5.3" PYSQLITE3_VERSION="0.5.3"
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
if ! dpkg -l | grep -q libsqlite3-dev; then
echo "Installing libsqlite3-dev for compilation..."
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
fi
# Fetch the pre-built sqlite amalgamation instead of building from source # Fetch the pre-built sqlite amalgamation instead of building from source
if [[ ! -d "sqlite" ]]; then if [[ ! -d "sqlite" ]]; then
mkdir sqlite mkdir sqlite

View File

@ -19,7 +19,9 @@ apt-get -qq install --no-install-recommends -y \
nethogs \ nethogs \
libgl1 \ libgl1 \
libglib2.0-0 \ libglib2.0-0 \
libusb-1.0.0 libusb-1.0.0 \
python3-h2 \
libgomp1 # memryx detector
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
@ -31,6 +33,18 @@ unset DEBIAN_FRONTEND
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
rm /tmp/libedgetpu1-max.deb rm /tmp/libedgetpu1-max.deb
# install mesa-teflon-delegate from bookworm-backports
# Only available for arm64 at the moment
if [[ "${TARGETARCH}" == "arm64" ]]; then
if [[ "${BASE_IMAGE}" == *"nvcr.io/nvidia/tensorrt"* ]]; then
echo "Info: Skipping apt-get commands because BASE_IMAGE includes 'nvcr.io/nvidia/tensorrt' for arm64."
else
echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backbacks.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports
fi
fi
# ffmpeg -> amd64 # ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then if [[ "${TARGETARCH}" == "amd64" ]]; then
mkdir -p /usr/lib/ffmpeg/5.0 mkdir -p /usr/lib/ffmpeg/5.0
@ -78,11 +92,41 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \ apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \ intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04
apt-get -qq install -y ocl-icd-libopencl1
# install libtbb12 for NPU support
apt-get -qq install -y libtbb12
rm -f /usr/share/keyrings/intel-graphics.gpg rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
# install legacy and standard intel icd and level-zero-gpu
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
# needed core package
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb
dpkg -i libigdgmm12_22.5.5_amd64.deb
rm libigdgmm12_22.5.5_amd64.deb
# legacy packages
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-level-zero-gpu-legacy1_1.5.30872.36_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
# standard packages
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb
# npu packages
wget https://github.com/oneapi-src/level-zero/releases/download/v1.21.9/level-zero_1.21.9+u22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-driver-compiler-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-fw-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-level-zero-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
dpkg -i *.deb
rm *.deb
fi fi
if [[ "${TARGETARCH}" == "arm64" ]]; then if [[ "${TARGETARCH}" == "arm64" ]]; then

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -e
# Download the MxAccl for Frigate github release
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
unzip /tmp/mxaccl.zip -d /tmp
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
rm /tmp/mxaccl.zip
# Install Python dependencies
pip3 install -r /opt/mx_accl_frigate/freeze
# Link the Python package dynamically
SITE_PACKAGES=$(python3 -c "import site; print(site.getsitepackages()[0])")
ln -s /opt/mx_accl_frigate/memryx "$SITE_PACKAGES/memryx"
# Copy architecture-specific shared libraries
ARCH=$(uname -m)
if [[ "$ARCH" == "x86_64" ]]; then
cp /opt/mx_accl_frigate/memryx/x86/libmemx.so* /usr/lib/x86_64-linux-gnu/
cp /opt/mx_accl_frigate/memryx/x86/libmx_accl.so* /usr/lib/x86_64-linux-gnu/
elif [[ "$ARCH" == "aarch64" ]]; then
cp /opt/mx_accl_frigate/memryx/arm/libmemx.so* /usr/lib/aarch64-linux-gnu/
cp /opt/mx_accl_frigate/memryx/arm/libmx_accl.so* /usr/lib/aarch64-linux-gnu/
else
echo "Unsupported architecture: $ARCH"
exit 1
fi
# Refresh linker cache
ldconfig

View File

@ -1 +1,4 @@
ruff ruff
# types
types-peewee == 3.17.*

View File

@ -1,24 +1,28 @@
aiofiles == 24.1.* aiofiles == 24.1.*
click == 8.1.* click == 8.1.*
# FastAPI # FastAPI
aiohttp == 3.11.3 aiohttp == 3.12.*
starlette == 0.41.2 starlette == 0.47.*
starlette-context == 0.3.6 starlette-context == 0.4.*
fastapi == 0.115.* fastapi[standard-no-fastapi-cloud-cli] == 0.116.*
uvicorn == 0.30.* uvicorn == 0.35.*
slowapi == 0.1.* slowapi == 0.1.*
joserfc == 1.0.* joserfc == 1.2.*
pathvalidate == 3.2.* cryptography == 44.0.*
pathvalidate == 3.3.*
markupsafe == 3.0.* markupsafe == 3.0.*
python-multipart == 0.0.12 python-multipart == 0.0.20
# Classification Model Training
tensorflow == 2.19.* ; platform_machine == 'aarch64'
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'
# General # General
mypy == 1.6.1 mypy == 1.6.1
onvif-zeep-async == 3.1.* onvif-zeep-async == 4.0.*
paho-mqtt == 2.1.* paho-mqtt == 2.1.*
pandas == 2.2.* pandas == 2.2.*
peewee == 3.17.* peewee == 3.17.*
peewee_migrate == 1.13.* peewee_migrate == 1.13.*
psutil == 6.1.* psutil == 7.1.*
pydantic == 2.10.* pydantic == 2.10.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml git+https://github.com/fbcotter/py3nvml#egg=py3nvml
pytz == 2025.* pytz == 2025.*
@ -27,7 +31,7 @@ ruamel.yaml == 0.18.*
tzlocal == 5.2 tzlocal == 5.2
requests == 2.32.* requests == 2.32.*
types-requests == 2.32.* types-requests == 2.32.*
norfair == 2.2.* norfair == 2.3.*
setproctitle == 1.3.* setproctitle == 1.3.*
ws4py == 0.5.* ws4py == 0.5.*
unidecode == 1.3.* unidecode == 1.3.*
@ -36,16 +40,15 @@ titlecase == 2.4.*
numpy == 1.26.* numpy == 1.26.*
opencv-python-headless == 4.11.0.* opencv-python-headless == 4.11.0.*
opencv-contrib-python == 4.11.0.* opencv-contrib-python == 4.11.0.*
scipy == 1.14.* scipy == 1.16.*
# OpenVino & ONNX # OpenVino & ONNX
openvino == 2024.4.* openvino == 2025.3.*
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64' onnxruntime == 1.22.*
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
# Embeddings # Embeddings
transformers == 4.45.* transformers == 4.45.*
# Generative AI # Generative AI
google-generativeai == 0.8.* google-generativeai == 0.8.*
ollama == 0.3.* ollama == 0.5.*
openai == 1.65.* openai == 1.65.*
# push notifications # push notifications
py-vapid == 1.9.* py-vapid == 1.9.*
@ -53,7 +56,7 @@ pywebpush == 2.0.*
# alpr # alpr
pyclipper == 1.3.* pyclipper == 1.3.*
shapely == 2.0.* shapely == 2.0.*
Levenshtein==0.26.* rapidfuzz==3.12.*
# HailoRT Wheels # HailoRT Wheels
appdirs==1.4.* appdirs==1.4.*
argcomplete==2.0.* argcomplete==2.0.*
@ -71,3 +74,10 @@ prometheus-client == 0.21.*
# TFLite # TFLite
tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64' tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64' tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'
# audio transcription
sherpa-onnx==1.12.*
faster-whisper==1.1.*
librosa==0.11.*
soundfile==0.13.*
# DeGirum detector
degirum == 0.16.*

View File

@ -1,2 +1 @@
scikit-build == 0.18.* scikit-build == 0.18.*
nvidia-pyindex

View File

@ -10,7 +10,7 @@ echo "[INFO] Starting certsync..."
lefile="/etc/letsencrypt/live/frigate/fullchain.pem" lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
tls_enabled=`python3 /usr/local/nginx/get_tls_settings.py | jq -r .enabled` tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled`
while true while true
do do

View File

@ -50,6 +50,38 @@ function set_libva_version() {
export LIBAVFORMAT_VERSION_MAJOR export LIBAVFORMAT_VERSION_MAJOR
} }
function setup_homekit_config() {
local config_path="$1"
if [[ ! -f "${config_path}" ]]; then
echo "[INFO] Creating empty HomeKit config file..."
echo '{}' > "${config_path}"
fi
# Convert YAML to JSON for jq processing
local temp_json="/tmp/cache/homekit_config.json"
yq eval -o=json "${config_path}" > "${temp_json}" 2>/dev/null || {
echo "[WARNING] Failed to convert HomeKit config to JSON, skipping cleanup"
return 0
}
# Use jq to filter and keep only the homekit section
local cleaned_json="/tmp/cache/homekit_cleaned.json"
jq '
# Keep only the homekit section if it exists, otherwise empty object
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
# Convert back to YAML and write to the config file
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
echo '{"homekit": {}}' > "${config_path}"
}
# Clean up temp files
rm -f "${temp_json}" "${cleaned_json}"
}
set_libva_version set_libva_version
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
@ -70,6 +102,10 @@ else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually." echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi fi
# HomeKit configuration persistence setup
readonly homekit_config_path="/config/go2rtc_homekit.yml"
setup_homekit_config "${homekit_config_path}"
readonly config_path="/config" readonly config_path="/config"
if [[ -x "${config_path}/go2rtc" ]]; then if [[ -x "${config_path}/go2rtc" ]]; then
@ -82,5 +118,7 @@ fi
echo "[INFO] Starting go2rtc..." echo "[INFO] Starting go2rtc..."
# Replace the bash process with the go2rtc process, redirecting stderr to stdout # Replace the bash process with the go2rtc process, redirecting stderr to stdout
# Use HomeKit config as the primary config so writebacks go there
# The main config from Frigate will be loaded as a secondary config
exec 2>&1 exec 2>&1
exec "${binary_path}" -config=/dev/shm/go2rtc.yaml exec "${binary_path}" -config="${homekit_config_path}" -config=/dev/shm/go2rtc.yaml

View File

@ -85,7 +85,7 @@ python3 /usr/local/nginx/get_base_path.py | \
-out /usr/local/nginx/conf/base_path.conf -out /usr/local/nginx/conf/base_path.conf
# build templates for optional TLS support # build templates for optional TLS support
python3 /usr/local/nginx/get_tls_settings.py | \ python3 /usr/local/nginx/get_listen_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \ tempio -template /usr/local/nginx/templates/listen.gotmpl \
-out /usr/local/nginx/conf/listen.conf -out /usr/local/nginx/conf/listen.conf

View File

@ -17,7 +17,9 @@ http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" ' log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" ' '$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"'; '"$http_user_agent" "$http_x_forwarded_for" '
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
access_log /dev/stdout main; access_log /dev/stdout main;
@ -71,6 +73,8 @@ http {
vod_manifest_segment_durations_mode accurate; vod_manifest_segment_durations_mode accurate;
vod_ignore_edit_list on; vod_ignore_edit_list on;
vod_segment_duration 10000; vod_segment_duration 10000;
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
vod_hls_mpegts_align_frames off; vod_hls_mpegts_align_frames off;
vod_hls_mpegts_interleave_frames on; vod_hls_mpegts_interleave_frames on;
@ -103,6 +107,10 @@ http {
aio threads; aio threads;
vod hls; vod hls;
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
# Smaller segments, faster generation, better browser compatibility
vod_hls_container_format fmp4;
secure_token $args; secure_token $args;
secure_token_types application/vnd.apple.mpegurl; secure_token_types application/vnd.apple.mpegurl;
@ -272,6 +280,18 @@ http {
include proxy.conf; include proxy.conf;
} }
# Allow unauthenticated access to the first_time_login endpoint
# so the login page can load help text before authentication.
location /api/auth/first_time_login {
auth_request off;
limit_except GET {
deny all;
}
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/stats { location /api/stats {
include auth_request.conf; include auth_request.conf;
access_log off; access_log off;

View File

@ -26,6 +26,10 @@ try:
except FileNotFoundError: except FileNotFoundError:
config: dict[str, Any] = {} config: dict[str, Any] = {}
tls_config: dict[str, Any] = config.get("tls", {"enabled": True}) tls_config: dict[str, any] = config.get("tls", {"enabled": True})
networking_config = config.get("networking", {})
ipv6_config = networking_config.get("ipv6", {"enabled": False})
print(json.dumps(tls_config)) output = {"tls": tls_config, "ipv6": ipv6_config}
print(json.dumps(output))

View File

@ -1,33 +1,45 @@
# intended for internal traffic, not protected by auth
# Internal (IPv4 always; IPv6 optional)
listen 5000; listen 5000;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
{{ if not .enabled }}
# intended for external traffic, protected by auth # intended for external traffic, protected by auth
listen 8971; {{ if .tls }}
{{ if .tls.enabled }}
# external HTTPS (IPv4 always; IPv6 optional)
listen 8971 ssl;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
{{ else }}
# external HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}
{{ else }} {{ else }}
# intended for external traffic, protected by auth # (No tls section) default to HTTP (IPv4 always; IPv6 optional)
listen 8971 ssl; listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
{{ end }} {{ end }}

View File

@ -0,0 +1,44 @@
#!/bin/bash
set -e # Exit immediately if any command fails
set -o pipefail
echo "Starting MemryX driver and runtime installation..."
# Detect architecture
arch=$(uname -m)
# Purge existing packages and repo
echo "Removing old MemryX installations..."
# Remove any holds on MemryX packages (if they exist)
sudo apt-mark unhold memx-* mxa-manager || true
sudo apt purge -y memx-* mxa-manager || true
sudo rm -f /etc/apt/sources.list.d/memryx.list /etc/apt/trusted.gpg.d/memryx.asc
# Install kernel headers
echo "Installing kernel headers for: $(uname -r)"
sudo apt update
sudo apt install -y dkms linux-headers-$(uname -r)
# Add MemryX key and repo
echo "Adding MemryX GPG key and repository..."
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
# Update and install specific SDK 2.1 packages
echo "Installing MemryX SDK 2.1 packages..."
sudo apt update
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
# Hold packages to prevent automatic upgrades
sudo apt-mark hold memx-drivers memx-accl mxa-manager
# ARM-specific board setup
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
echo "Running ARM board setup..."
sudo mx_arm_setup
fi
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
echo "MemryX SDK 2.1 installation complete!"

View File

@ -11,7 +11,8 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
&& pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
RUN rm -rf /rk-wheels/opencv_python-* RUN rm -rf /rk-wheels/opencv_python-*
RUN rm -rf /rk-wheels/torch-* RUN rm -rf /rk-wheels/torch-*

View File

@ -2,7 +2,7 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable # https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ARG ROCM=6.3.3 ARG ROCM=1
ARG AMDGPU=gfx900 ARG AMDGPU=gfx900
ARG HSA_OVERRIDE_GFX_VERSION ARG HSA_OVERRIDE_GFX_VERSION
ARG HSA_OVERRIDE ARG HSA_OVERRIDE
@ -13,16 +13,16 @@ FROM wget AS rocm
ARG ROCM ARG ROCM
ARG AMDGPU ARG AMDGPU
RUN apt update && \ RUN apt update -qq && \
apt install -y wget gpg && \ apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/$ROCM/ubuntu/jammy/amdgpu-install_6.3.60303-1_all.deb && \ wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.0.2/ubuntu/jammy/amdgpu-install_7.0.2.70002-1_all.deb && \
apt install -y ./rocm.deb && \ apt install -y ./rocm.deb && \
apt update && \ apt update && \
apt install -y rocm apt install -qq -y rocm
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && \ RUN cd /opt/rocm-$ROCM/lib && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \ cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* librocroller.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \ mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \
cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
@ -33,7 +33,10 @@ RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
####################################################################### #######################################################################
FROM deps AS deps-prelim FROM deps AS deps-prelim
RUN apt-get update && apt-get install -y libnuma1 COPY docker/rocm/debian-backports.sources /etc/apt/sources.list.d/debian-backports.sources
RUN apt-get update && \
apt-get install -y libnuma1 && \
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers
WORKDIR /opt/frigate WORKDIR /opt/frigate
COPY --from=rootfs / / COPY --from=rootfs / /
@ -44,7 +47,7 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
RUN python3 -m pip config set global.break-system-packages true RUN python3 -m pip config set global.break-system-packages true
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN pip3 uninstall -y onnxruntime-openvino \ RUN pip3 uninstall -y onnxruntime \
&& pip3 install -r /requirements.txt && pip3 install -r /requirements.txt
####################################################################### #######################################################################
@ -61,9 +64,10 @@ COPY --from=rocm /opt/rocm-dist/ /
####################################################################### #######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0 FROM deps-prelim AS rocm-prelim-hsa-override0
ENV HSA_ENABLE_SDMA=0 ENV MIGRAPHX_DISABLE_MIOPEN_FUSION=1
ENV MIGRAPHX_ENABLE_NHWC=1 ENV MIGRAPHX_DISABLE_SCHEDULE_PASS=1
ENV TF_ROCM_USE_IMMEDIATE_MODE=1 ENV MIGRAPHX_DISABLE_REDUCE_FUSION=1
ENV MIGRAPHX_ENABLE_HIPRTC_WORKAROUNDS=1
COPY --from=rocm-dist / / COPY --from=rocm-dist / /

View File

@ -0,0 +1,6 @@
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm-backports
Components: main
Enabled: yes
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg

View File

@ -1 +1 @@
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.3.3/onnxruntime_rocm-1.20.1-cp311-cp311-linux_x86_64.whl onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.0.2/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl

View File

@ -2,7 +2,7 @@ variable "AMDGPU" {
default = "gfx900" default = "gfx900"
} }
variable "ROCM" { variable "ROCM" {
default = "6.3.3" default = "7.0.2"
} }
variable "HSA_OVERRIDE_GFX_VERSION" { variable "HSA_OVERRIDE_GFX_VERSION" {
default = "" default = ""

View File

@ -0,0 +1,28 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM wheels AS synap1680-wheels
ARG TARGETARCH
# Install dependencies
RUN wget -qO- "https://github.com/GaryHuang-ASUS/synaptics_astra_sdk/releases/download/v1.5.0/Synaptics-SL1680-v1.5.0-rt.tar" | tar -C / -xzf -
RUN wget -P /wheels/ "https://github.com/synaptics-synap/synap-python/releases/download/v0.0.4-preview/synap_python-0.0.4-cp311-cp311-manylinux_2_35_aarch64.whl"
FROM deps AS synap1680-deps
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=synap1680-wheels,source=/wheels,target=/deps/synap-wheels \
pip3 install --no-deps -U /deps/synap-wheels/*.whl
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY --from=synap1680-wheels /rootfs/usr/local/lib/*.so /usr/lib
ADD https://raw.githubusercontent.com/synaptics-astra/synap-release/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80/model.synap /synaptics/mobilenet.synap

View File

@ -0,0 +1,27 @@
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wheels"
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "deps"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "rootfs"
}
target synaptics {
dockerfile = "docker/synaptics/Dockerfile"
contexts = {
wheels = "target:wheels",
deps = "target:deps",
rootfs = "target:rootfs"
}
platforms = ["linux/arm64"]
}

View File

@ -0,0 +1,15 @@
BOARDS += synaptics
local-synaptics: version
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=frigate:latest-synaptics \
--load
build-synaptics: version
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics
push-synaptics: build-synaptics
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics \
--push

View File

@ -12,13 +12,16 @@ ARG PIP_BREAK_SYSTEM_PACKAGES
# Install TensorRT wheels # Install TensorRT wheels
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
# remove dependencies from the requirements that have type constraints
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
FROM deps AS frigate-tensorrt FROM deps AS frigate-tensorrt
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 uninstall -y onnxruntime-openvino tensorflow-cpu \ pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl && pip3 install -U /deps/trt-wheels/*.whl
COPY --from=rootfs / / COPY --from=rootfs / /

View File

@ -112,7 +112,7 @@ RUN apt-get update \
&& apt-get install -y protobuf-compiler libprotobuf-dev \ && apt-get install -y protobuf-compiler libprotobuf-dev \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \ RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
FROM wget AS jetson-ffmpeg FROM wget AS jetson-ffmpeg
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
pip3 uninstall -y onnxruntime \ pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \ && pip3 install -U /deps/trt-wheels/*.whl \
&& pip3 install -U /deps/trt-model-wheels/*.whl \
&& ldconfig && ldconfig
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/

View File

@ -14,5 +14,5 @@ nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64' nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64' nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64' onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -1 +1,2 @@
cuda-python == 12.6.*; platform_machine == 'aarch64' cuda-python == 12.6.*; platform_machine == 'aarch64'
numpy == 1.26.*; platform_machine == 'aarch64'

View File

@ -1,3 +1,2 @@
onnx == 1.14.0; platform_machine == 'aarch64' onnx == 1.14.0; platform_machine == 'aarch64'
protobuf == 3.20.3; platform_machine == 'aarch64' protobuf == 3.20.3; platform_machine == 'aarch64'
numpy == 1.23.*; platform_machine == 'aarch64' # required by python-tensorrt 8.2.1 (Jetpack 4.6)

View File

@ -177,9 +177,11 @@ listen [::]:5000 ipv6only=off;
By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing. By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
### Set Base Path via HTTP Header ### Set Base Path via HTTP Header
The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy. The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy.
For example, in Nginx: For example, in Nginx:
``` ```
location /frigate { location /frigate {
proxy_set_header X-Ingress-Path /frigate; proxy_set_header X-Ingress-Path /frigate;
@ -188,9 +190,11 @@ location /frigate {
``` ```
### Set Base Path via Environment Variable ### Set Base Path via Environment Variable
When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file. When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file.
For example: For example:
``` ```
services: services:
frigate: frigate:
@ -200,6 +204,7 @@ services:
``` ```
This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http): This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http):
``` ```
tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate
``` ```
@ -218,7 +223,7 @@ To do this:
### Custom go2rtc version ### Custom go2rtc version
Frigate currently includes go2rtc v1.9.9, there may be certain cases where you want to run a different version of go2rtc. Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
To do this: To do this:

View File

@ -50,7 +50,7 @@ cameras:
### Configuring Minimum Volume ### Configuring Minimum Volume
The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. MQTT explorer can be used on the audio topic to see what volume level is being detected. The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. The Debug view in the Frigate UI has an Audio tab for cameras that have the `audio` role assigned where a graph and the current levels are is displayed. The `min_volume` parameter should be set to the minimum the `RMS` level required to run audio detection.
:::tip :::tip
@ -72,3 +72,76 @@ audio:
- speech - speech
- yell - yell
``` ```
### Audio Transcription
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAIs open-source Whisper models via `faster-whisper`. To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features.
```yaml
audio_transcription:
enabled: True
device: ...
model_size: ...
```
Disable audio transcription for select cameras at the camera level:
```yaml
cameras:
back_yard:
...
audio_transcription:
enabled: False
```
:::note
Audio detection must be enabled and configured as described above in order to use audio transcription features.
:::
The optional config parameters that can be set at the global level include:
- **`enabled`**: Enable or disable the audio transcription feature.
- Default: `False`
- It is recommended to only configure the features at the global level, and enable it at the individual camera level.
- **`device`**: Device to use to run transcription and translation models.
- Default: `CPU`
- This can be `CPU` or `GPU`. The `sherpa-onnx` models are lightweight and run on the CPU only. The `whisper` models can run on GPU but are only supported on CUDA hardware.
- **`model_size`**: The size of the model used for live transcription.
- Default: `small`
- This can be `small` or `large`. The `small` setting uses `sherpa-onnx` models that are fast, lightweight, and always run on the CPU but are not as accurate as the `whisper` model.
- This config option applies to **live transcription only**. Recorded `speech` events will always use a different `whisper` model (and can be accelerated for CUDA hardware if available with `device: GPU`).
- **`language`**: Defines the language used by `whisper` to translate `speech` audio events (and live audio only if using the `large` model).
- Default: `en`
- You must use a valid [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10).
- Transcriptions for `speech` events are translated.
- Live audio is translated only if you are using the `large` model. The `small` `sherpa-onnx` model is English-only.
The only field that is valid at the camera level is `enabled`.
#### Live transcription
The single camera Live view in the Frigate UI supports live transcription of audio for streams defined with the `audio` role. Use the Enable/Disable Live Audio Transcription button/switch to toggle transcription processing. When speech is heard, the UI will display a black box over the top of the camera stream with text. The MQTT topic `frigate/<camera_name>/audio/transcription` will also be updated in real-time with transcribed text.
Results can be error-prone due to a number of factors, including:
- Poor quality camera microphone
- Distance of the audio source to the camera microphone
- Low audio bitrate setting in the camera
- Background noise
- Using the `small` model - it's fast, but not accurate for poor quality audio
For speech sources close to the camera with minimal background noise, use the `small` model.
If you have CUDA hardware, you can experiment with the `large` `whisper` model on GPU. Performance is not quite as fast as the `sherpa-onnx` `small` model, but live transcription is far more accurate. Using the `large` model with CPU will likely be too slow for real-time transcription.
#### Transcription and translation of `speech` audio events
Any `speech` events in Explore can be transcribed and/or translated through the Transcribe button in the Tracked Object Details pane.
In order to use transcription and translation for past events, you must enable audio detection and define `speech` as an audio type to listen for in your config. To have `speech` events translated into the language of your choice, set the `language` config parameter with the correct [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10).
The transcribed/translated speech will appear in the description box in the Tracked Object Details pane. If Semantic Search is enabled, embeddings are generated for the transcription text and are fully searchable using the description search type.
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.

View File

@ -59,6 +59,7 @@ The default session length for user authentication in Frigate is 24 hours. This
While the default provides a balance of security and convenience, you can customize this duration to suit your specific security requirements and user experience preferences. The session length is configured in seconds. While the default provides a balance of security and convenience, you can customize this duration to suit your specific security requirements and user experience preferences. The session length is configured in seconds.
The default value of `86400` will expire the authentication session after 24 hours. Some other examples: The default value of `86400` will expire the authentication session after 24 hours. Some other examples:
- `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout. - `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout.
- `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days. - `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days.
@ -80,7 +81,7 @@ python3 -c 'import secrets; print(secrets.token_hex(64))'
Frigate looks for a JWT token secret in the following order: Frigate looks for a JWT token secret in the following order:
1. An environment variable named `FRIGATE_JWT_SECRET` 1. An environment variable named `FRIGATE_JWT_SECRET`
2. A docker secret named `FRIGATE_JWT_SECRET` in `/run/secrets/` 2. A file named `FRIGATE_JWT_SECRET` in the directory specified by the `CREDENTIALS_DIRECTORY` environment variable (defaults to the Docker Secrets directory: `/run/secrets/`)
3. A `jwt_secret` option from the Home Assistant Add-on options 3. A `jwt_secret` option from the Home Assistant Add-on options
4. A `.jwt_secret` file in the config directory 4. A `.jwt_secret` file in the config directory
@ -123,7 +124,7 @@ proxy:
role: x-forwarded-groups role: x-forwarded-groups
``` ```
Frigate supports both `admin` and `viewer` roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization. Frigate supports `admin`, `viewer`, and custom roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization.
A default role can be provided. Any value in the mapped `role` header will override the default. A default role can be provided. Any value in the mapped `role` header will override the default.
@ -133,6 +134,34 @@ proxy:
default_role: viewer default_role: viewer
``` ```
## Role mapping
In some environments, upstream identity providers (OIDC, SAML, LDAP, etc.) do not pass a Frigate-compatible role directly, but instead pass one or more group claims. To handle this, Frigate supports a `role_map` that translates upstream group names into Frigates internal roles (`admin`, `viewer`, or custom).
```yaml
proxy:
...
header_map:
user: x-forwarded-user
role: x-forwarded-groups
role_map:
admin:
- sysadmins
- access-level-security
viewer:
- camera-viewer
operator: # Custom role mapping
- operators
```
In this example:
- If the proxy passes a role header containing `sysadmins` or `access-level-security`, the user is assigned the `admin` role.
- If the proxy passes a role header containing `camera-viewer`, the user is assigned the `viewer` role.
- If the proxy passes a role header containing `operators`, the user is assigned the `operator` custom role.
- If no mapping matches, Frigate falls back to `default_role` if configured.
- If `role_map` is not defined, Frigate assumes the role header directly contains `admin`, `viewer`, or a custom role name.
#### Port Considerations #### Port Considerations
**Authenticated Port (8971)** **Authenticated Port (8971)**
@ -141,6 +170,7 @@ proxy:
- The `remote-role` header determines the users privileges: - The `remote-role` header determines the users privileges:
- **admin** → Full access (user management, configuration changes). - **admin** → Full access (user management, configuration changes).
- **viewer** → Read-only access. - **viewer** → Read-only access.
- **Custom roles** → Read-only access limited to the cameras defined in `auth.roles[role]`.
- Ensure your **proxy sends both user and role headers** for proper role enforcement. - Ensure your **proxy sends both user and role headers** for proper role enforcement.
**Unauthenticated Port (5000)** **Unauthenticated Port (5000)**
@ -186,6 +216,41 @@ Frigate supports user roles to control access to certain features in the UI and
- **admin**: Full access to all features, including user management and configuration. - **admin**: Full access to all features, including user management and configuration.
- **viewer**: Read-only access to the UI and API, including viewing cameras, review items, and historical footage. Configuration editor and settings in the UI are inaccessible. - **viewer**: Read-only access to the UI and API, including viewing cameras, review items, and historical footage. Configuration editor and settings in the UI are inaccessible.
- **Custom Roles**: Arbitrary role names (alphanumeric, dots/underscores) with specific camera permissions. These extend the system for granular access (e.g., "operator" for select cameras).
### Custom Roles and Camera Access
The viewer role provides read-only access to all cameras in the UI and API. Custom roles allow admins to limit read-only access to specific cameras. Each role specifies an array of allowed camera names. If a user is assigned a custom role, their account is like the **viewer** role - they can only view Live, Review/History, Explore, and Export for the designated cameras. Backend API endpoints enforce this server-side (e.g., returning 403 for unauthorized cameras), and the frontend UI filters content accordingly (e.g., camera dropdowns show only permitted options).
### Role Configuration Example
```yaml
cameras:
front_door:
# ... camera config
side_yard:
# ... camera config
garage:
# ... camera config
auth:
enabled: true
roles:
operator: # Custom role
- front_door
- garage # Operator can access front and garage
neighbor:
- side_yard
```
If you want to provide access to all cameras to a specific user, just use the **viewer** role.
### Managing User Roles
1. Log in as an **admin** user via port `8971` (preferred), or unauthenticated via port `5000`.
2. Navigate to **Settings**.
3. In the **Users** section, edit a users role by selecting from available roles (admin, viewer, or custom).
4. In the **Roles** section, add/edit/delete custom roles (select cameras via switches). Deleting a role auto-reassigns users to "viewer".
### Role Enforcement ### Role Enforcement

View File

@ -21,7 +21,7 @@ Frigate autotracking functions with PTZ cameras capable of relative movement wit
Many cheaper or older PTZs may not support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported. Many cheaper or older PTZs may not support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported.
Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera. The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/) can provide a starting point to determine a camera's compatibility with Frigate's autotracking. Look to see if a camera lists `PTZRelative`, `PTZRelativePanTilt` and/or `PTZRelativeZoom`. These features are required for autotracking, but some cameras still fail to respond even if they claim support.
A growing list of cameras and brands that have been reported by users to work with Frigate's autotracking can be found [here](cameras.md). A growing list of cameras and brands that have been reported by users to work with Frigate's autotracking can be found [here](cameras.md).

View File

@ -147,7 +147,7 @@ WEB Digest Algorithm - MD5
Reolink has many different camera models with inconsistently supported features and behavior. The below table shows a summary of various features and recommendations. Reolink has many different camera models with inconsistently supported features and behavior. The below table shows a summary of various features and recommendations.
| Camera Resolution | Camera Generation | Recommended Stream Type | Additional Notes | | Camera Resolution | Camera Generation | Recommended Stream Type | Additional Notes |
| ---------------- | ------------------------- | -------------------------------- | ----------------------------------------------------------------------- | | ----------------- | ------------------------- | --------------------------------- | ----------------------------------------------------------------------- |
| 5MP or lower | All | http-flv | Stream is h264 | | 5MP or lower | All | http-flv | Stream is h264 |
| 6MP or higher | Latest (ex: Duo3, CX-8##) | http-flv with ffmpeg 8.0, or rtsp | This uses the new http-flv-enhanced over H265 which requires ffmpeg 8.0 | | 6MP or higher | Latest (ex: Duo3, CX-8##) | http-flv with ffmpeg 8.0, or rtsp | This uses the new http-flv-enhanced over H265 which requires ffmpeg 8.0 |
| 6MP or higher | Older (ex: RLC-8##) | rtsp | | | 6MP or higher | Older (ex: RLC-8##) | rtsp | |
@ -238,7 +238,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk - rtspx://192.168.1.1:7441/abcdefghijk
``` ```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-rtsp) [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
@ -257,6 +257,7 @@ TP-Link VIGI cameras need some adjustments to the main stream settings on the ca
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support: To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
- Preparation outside of Frigate: - Preparation outside of Frigate:
- Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0` - Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0`
- Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back. - Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back.
- If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed. - If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed.
@ -284,5 +285,3 @@ cameras:
width: 1024 width: 1024
height: 576 height: 576
``` ```

View File

@ -89,31 +89,35 @@ An ONVIF-capable camera that supports relative movement within the field of view
## ONVIF PTZ camera recommendations ## ONVIF PTZ camera recommendations
This list of working and non-working PTZ cameras is based on user feedback. This list of working and non-working PTZ cameras is based on user feedback. If you'd like to report specific quirks or issues with a manufacturer or camera that would be helpful for other users, open a pull request to add to this list.
| Brand or specific camera | PTZ Controls | Autotracking | Notes | The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/) can provide a starting point to determine a camera's compatibility with Frigate's autotracking. Look to see if a camera lists `PTZRelative`, `PTZRelativePanTilt` and/or `PTZRelativeZoom`. These features are required for autotracking, but some cameras still fail to respond even if they claim support. If they are missing, autotracking will not work (though basic PTZ in the WebUI might). Avoid cameras with no database entry unless they are confirmed as working below.
| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- |
| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking | | Brand or specific camera | PTZ Controls | Autotracking | Notes |
| Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 | | ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- |
| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. | | Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking |
| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. | | Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 |
| Annke CZ504 | ✅ | ✅ | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" | | Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. |
| Ctronics PTZ | ✅ | ❌ | | | Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. |
| Dahua | ✅ | ✅ | Some low-end Dahuas (lite series, among others) have been reported to not support autotracking | | Annke CZ504 | ✅ | ✅ | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" |
| Dahua DH-SD2A500HB | ✅ | ❌ | | | Ctronics PTZ | ✅ | ❌ | |
| Dahua DH-SD49825GB-HNR | ✅ | ✅ | | | Dahua | ✅ | ✅ | Some low-end Dahuas (lite series, picoo series (commonly), among others) have been reported to not support autotracking. These models usually don't have a four digit model number with chassis prefix and options postfix (e.g. DH-P5AE-PV vs DH-SD49825GB-HNR). |
| Dahua DH-P5AE-PV | ❌ | ❌ | | | Dahua DH-SD2A500HB | ✅ | ❌ | |
| Foscam R5 | ✅ | ❌ | | | Dahua DH-SD49825GB-HNR | ✅ | ✅ | |
| Hanwha XNP-6550RH | ✅ | ❌ | | | Dahua DH-P5AE-PV | ❌ | ❌ | |
| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | | Foscam | ✅ | ❌ | In general support PTZ, but not relative move. There are no official ONVIF certifications and tests available on the ONVIF Conformant Products Database | |
| Hikvision DS-2DE3A404IWG-E/W | ✅ | ✅ | | | Foscam R5 | ✅ | ❌ | |
| Reolink | ✅ | ❌ | | | Foscam SD4 | ✅ | ❌ | |
| Speco O8P32X | ✅ | ❌ | | | Hanwha XNP-6550RH | ✅ | ❌ | |
| Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. | | Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others |
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 | | Hikvision DS-2DE3A404IWG-E/W | ✅ | ✅ | |
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands | | Reolink | ✅ | ❌ | |
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. | | Speco O8P32X | ✅ | ❌ | |
| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support | | Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. |
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |
| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support |
## Setting up camera groups ## Setting up camera groups
@ -134,3 +138,7 @@ camera_groups:
icon: LuCar icon: LuCar
order: 0 order: 0
``` ```
## Two-Way Audio
See the guide [here](/configuration/live/#two-way-talk)

View File

@ -0,0 +1,83 @@
---
id: object_classification
title: Object Classification
---
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object.
## Minimum System Requirements
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
## Classes
Classes are the categories your model will learn to distinguish between. Each class represents a distinct visual category that the model will predict.
For object classification:
- Define classes that represent different types or attributes of the detected object
- Examples: For `person` objects, classes might be `delivery_person`, `resident`, `stranger`
- Include a `none` class for objects that don't fit any specific category
- Keep classes visually distinct to improve accuracy
### Classification Type
- **Sub label**:
- Applied to the objects `sub_label` field.
- Ideal for a single, more specific identity or type.
- Example: `cat``Leo`, `Charlie`, `None`.
- **Attribute**:
- Added as metadata to the object (visible in /events): `<model_name>: <predicted_value>`.
- Ideal when multiple attributes can coexist independently.
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
## Example use cases
### Sub label
- **Known pet vs unknown**: For `dog` objects, set sub label to your pets name (e.g., `buddy`) or `none` for others.
- **Mail truck vs normal car**: For `car`, classify as `mail_truck` vs `car` to filter important arrivals.
- **Delivery vs non-delivery person**: For `person`, classify `delivery` vs `visitor` based on uniform/props.
### Attributes
- **Backpack**: For `person`, add attribute `backpack: yes/no`.
- **Helmet**: For `person` (worksite), add `helmet: yes/no`.
- **Leash**: For `dog`, add `leash: yes/no` (useful for park or yard rules).
- **Ladder rack**: For `truck`, add `ladder_rack: yes/no` to flag service vehicles.
## Configuration
Object classification is configured as a custom classification model. Each model has its own name and settings. You must list which object labels should be classified.
```yaml
classification:
custom:
dog:
threshold: 0.8
object_config:
objects: [dog] # object labels to classify
classification_type: sub_label # or: attribute
```
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page.
### Getting Started
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
// TODO add this section once UI is implemented. Explain process of selecting objects and curating training examples.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
- **Data collection**: Use the models Recent Classification tab to gather balanced examples across times of day, weather, and distances.
- **Preprocessing**: Ensure examples reflect object crops similar to Frigates boxes; keep the subject centered.
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.

View File

@ -0,0 +1,62 @@
---
id: state_classification
title: State Classification
---
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region.
## Minimum System Requirements
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
## Classes
Classes are the different states an area on your camera can be in. Each class represents a distinct visual state that the model will learn to recognize.
For state classification:
- Define classes that represent mutually exclusive states
- Examples: `open` and `closed` for a garage door, `on` and `off` for lights
- Use at least 2 classes (typically binary states work best)
- Keep class names clear and descriptive
## Example use cases
- **Door state**: Detect if a garage or front door is open vs closed.
- **Gate state**: Track if a driveway gate is open or closed.
- **Trash day**: Bins at curb vs no bins present.
- **Pool cover**: Cover on vs off.
## Configuration
State classification is configured as a custom classification model. Each model has its own name and settings. You must provide at least one camera crop under `state_config.cameras`.
```yaml
classification:
custom:
front_door:
threshold: 0.8
state_config:
motion: true # run when motion overlaps the crop
interval: 10 # also run every N seconds (optional)
cameras:
front:
crop: [0, 180, 220, 400]
```
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page.
### Getting Started
When choosing a portion of the camera frame for state classification, it is important to make the crop tight around the area of interest to avoid extra signals unrelated to what is being classified.
// TODO add this section once UI is implemented. Explain process of selecting a crop.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
- **Data collection**: Use the models Recent Classifications tab to gather balanced examples across times of day and weather.

View File

@ -24,7 +24,7 @@ Frigate needs to first detect a `person` before it can detect and recognize a fa
Frigate has support for two face recognition model types: Frigate has support for two face recognition model types:
- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. - **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate.
- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. - **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU / NPU is available.
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition. In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
@ -34,7 +34,7 @@ All of these features run locally on your system.
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently. The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
The `large` model is optimized for accuracy, an integrated or discrete GPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. The `large` model is optimized for accuracy, an integrated or discrete GPU / NPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
## Configuration ## Configuration
@ -70,9 +70,12 @@ Fine-tune face recognition with these optional parameters at the global level of
- `min_faces`: Min face recognitions for the sub label to be applied to the person object. - `min_faces`: Min face recognitions for the sub label to be applied to the person object.
- Default: `1` - Default: `1`
- `save_attempts`: Number of images of recognized faces to save for training. - `save_attempts`: Number of images of recognized faces to save for training.
- Default: `100`. - Default: `200`.
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- Default: `True`. - Default: `True`.
- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
- Default: `None`.
- Note: This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)
## Usage ## Usage
@ -111,9 +114,9 @@ When choosing images to include in the face training set it is recommended to al
::: :::
### Understanding the Train Tab ### Understanding the Recent Recognitions Tab
The Train tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching. The Recent Recognitions tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching.
Each face image is labeled with a name (or `Unknown`) along with the confidence score of the recognition attempt. While each image can be used to train the system for a specific person, not all images are suitable for training. Each face image is labeled with a name (or `Unknown`) along with the confidence score of the recognition attempt. While each image can be used to train the system for a specific person, not all images are suitable for training.
@ -137,7 +140,7 @@ Once front-facing images are performing well, start choosing slightly off-angle
Start with the [Usage](#usage) section and re-read the [Model Requirements](#model-requirements) above. Start with the [Usage](#usage) section and re-read the [Model Requirements](#model-requirements) above.
1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Train tab in the Frigate UI's Face Library. 1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Recent Recognitions tab in the Frigate UI's Face Library.
If you are using a Frigate+ or `face` detecting model: If you are using a Frigate+ or `face` detecting model:
@ -185,7 +188,7 @@ Avoid training on images that already score highly, as this can lead to over-fit
No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person. No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person.
For more guidance, refer to the section above on improving recognition accuracy. For more guidance, refer to the section above on improving recognition accuracy.
### I see scores above the threshold in the train tab, but a sub label wasn't assigned? ### I see scores above the threshold in the Recent Recognitions tab, but a sub label wasn't assigned?
The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results. The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results.

View File

@ -9,13 +9,12 @@ Requests for a description are sent off automatically to your AI provider at the
## Configuration ## Configuration
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. Generative AI can be enabled for all cameras or only for specific cameras. If GenAI is disabled for a camera, you can still manually generate descriptions for events using the HTTP API. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
```yaml ```yaml
genai: genai:
enabled: True
provider: gemini provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}" api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-2.0-flash model: gemini-2.0-flash
@ -30,14 +29,17 @@ cameras:
required_zones: required_zones:
- steps - steps
indoor_camera: indoor_camera:
genai: objects:
enabled: False # <- disable GenAI for your indoor camera genai:
enabled: False # <- disable GenAI for your indoor camera
``` ```
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
Generative AI can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
## Ollama ## Ollama
:::warning :::warning
@ -66,7 +68,6 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru
```yaml ```yaml
genai: genai:
enabled: True
provider: ollama provider: ollama
base_url: http://localhost:11434 base_url: http://localhost:11434
model: llava:7b model: llava:7b
@ -93,7 +94,6 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
```yaml ```yaml
genai: genai:
enabled: True
provider: gemini provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}" api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-2.0-flash model: gemini-2.0-flash
@ -121,7 +121,6 @@ To start using OpenAI, you must first [create an API key](https://platform.opena
```yaml ```yaml
genai: genai:
enabled: True
provider: openai provider: openai
api_key: "{FRIGATE_OPENAI_API_KEY}" api_key: "{FRIGATE_OPENAI_API_KEY}"
model: gpt-4o model: gpt-4o
@ -149,7 +148,6 @@ To start using Azure OpenAI, you must first [create a resource](https://learn.mi
```yaml ```yaml
genai: genai:
enabled: True
provider: azure_openai provider: azure_openai
base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview
model: gpt-5-mini model: gpt-5-mini
@ -193,32 +191,35 @@ You are also able to define custom prompts in your configuration.
```yaml ```yaml
genai: genai:
enabled: True
provider: ollama provider: ollama
base_url: http://localhost:11434 base_url: http://localhost:11434
model: llava model: llava
objects:
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance." prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
object_prompts: object_prompts:
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details." person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company." car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
``` ```
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
```yaml ```yaml
cameras: cameras:
front_door: front_door:
genai: objects:
use_snapshot: True genai:
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}." enabled: True
object_prompts: use_snapshot: True
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination." prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it." object_prompts:
objects: person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
- person cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
- cat objects:
required_zones: - person
- steps - cat
required_zones:
- steps
``` ```
### Experiment with prompts ### Experiment with prompts

View File

@ -0,0 +1,143 @@
---
id: genai_config
title: Configuring Generative AI
---
## Configuration
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
## Ollama
:::warning
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
:::
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
:::info
Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger sizes are more capable of complex tasks and understanding of situations, but requires more memory and computational resources. It is recommended to try multiple models and experiment to see which performs best.
:::
:::tip
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. https://github.com/skye-harris/ollama-modelfiles contains optimized model configs for this task.
:::
The following models are recommended:
| Model | Notes |
| ----------------- | ----------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding |
| `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
| `llava-phi3` | Lightweight and fast model with vision comprehension |
:::note
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
:::
### Configuration
```yaml
genai:
provider: ollama
base_url: http://localhost:11434
model: minicpm-v:8b
provider_options: # other Ollama client options can be defined
keep_alive: -1
options:
num_ctx: 8192 # make sure the context matches other services that are using ollama
```
## Google Gemini
Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`.
### Get API Key
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
1. Accept the Terms of Service
2. Click "Get API Key" from the right hand navigation
3. Click "Create API key in new project"
4. Copy the API key for use in your config
### Configuration
```yaml
genai:
provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-1.5-flash
```
## OpenAI
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
### Get API Key
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
### Configuration
```yaml
genai:
provider: openai
api_key: "{FRIGATE_OPENAI_API_KEY}"
model: gpt-4o
```
:::note
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
:::
## Azure OpenAI
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
### Create Resource and Get API Key
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
### Configuration
```yaml
genai:
provider: azure_openai
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
api_key: "{FRIGATE_OPENAI_API_KEY}"
```

View File

@ -0,0 +1,77 @@
---
id: genai_objects
title: Object Descriptions
---
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle, or can optionally be sent earlier after a number of significantly changed frames, for example in use in more real-time notifications. Descriptions can also be regenerated manually via the Frigate UI. Note that if you are manually entering a description for tracked objects prior to its end, this will be overwritten by the generated response.
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
## Usage and Best Practices
Frigate's thumbnail search excels at identifying specific details about tracked objects for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigates default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigates default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you whats happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if theyre moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situations context.
## Custom Prompts
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
```
Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.
```
:::tip
Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
:::
You are also able to define custom prompts in your configuration.
```yaml
genai:
provider: ollama
base_url: http://localhost:11434
model: llava
objects:
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
object_prompts:
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
```
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
```yaml
cameras:
front_door:
objects:
genai:
enabled: True
use_snapshot: True
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
object_prompts:
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
objects:
- person
- cat
required_zones:
- steps
```
### Experiment with prompts
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
- OpenAI - [ChatGPT](https://chatgpt.com)
- Gemini - [Google AI Studio](https://aistudio.google.com)
- Ollama - [Open WebUI](https://docs.openwebui.com/)

View File

@ -0,0 +1,113 @@
---
id: genai_review
title: Review Summaries
---
Generative AI can be used to automatically generate structured summaries of review items. These summaries will show up in Frigate's native notifications as well as in the UI. Generative AI can also be used to take a collection of summaries over a period of time and provide a report, which may be useful to get a quick report of everything that happened while out for some amount of time.
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
## Review Summary Usage and Best Practices
Review summaries provide structured JSON responses that are saved for each review item:
```
- `title` (string): A concise, direct title that describes the purpose or overall action (e.g., "Person taking out trash", "Joe walking dog").
- `scene` (string): A narrative description of what happens across the sequence from start to finish, including setting, detected objects, and their observable actions.
- `confidence` (float): 0-1 confidence in the analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous.
- `other_concerns` (list): List of user-defined concerns that may need additional investigation.
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
```
This will show in multiple places in the UI to give additional context about each activity, and allow viewing more details when extra attention is required. Frigate's built in notifications will also automatically show the title and description when the data is available.
### Defining Typical Activity
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response.
<details>
<summary>Default Activity Context Prompt</summary>
```
### Normal Activity Indicators (Level 0)
- Known/verified people in any zone at any time
- People with pets in residential areas
- Deliveries or services during daytime/evening (6 AM - 10 PM): carrying packages to doors/porches, placing items, leaving
- Services/maintenance workers with visible tools, uniforms, or service vehicles during daytime
- Activity confined to public areas only (sidewalks, streets) without entering property at any time
### Suspicious Activity Indicators (Level 1)
- **Testing or attempting to open doors/windows/handles on vehicles or buildings** — ALWAYS Level 1 regardless of time or duration
- **Unidentified person in private areas (driveways, near vehicles/buildings) during late night/early morning (11 PM - 5 AM)** — ALWAYS Level 1 regardless of activity or duration
- Taking items that don't belong to them (packages, objects from porches/driveways)
- Climbing or jumping fences/barriers to access property
- Attempting to conceal actions or items from view
- Prolonged loitering: remaining in same area without visible purpose throughout most of the sequence
### Critical Threat Indicators (Level 2)
- Holding break-in tools (crowbars, pry bars, bolt cutters)
- Weapons visible (guns, knives, bats used aggressively)
- Forced entry in progress
- Physical aggression or violence
- Active property damage or theft in progress
### Assessment Guidance
Evaluate in this order:
1. **If person is verified/known** → Level 0 regardless of time or activity
2. **If person is unidentified:**
- Check time: If late night/early morning (11 PM - 5 AM) AND in private areas (driveways, near vehicles/buildings) → Level 1
- Check actions: If testing doors/handles, taking items, climbing → Level 1
- Otherwise, if daytime/evening (6 AM - 10 PM) with clear legitimate purpose (delivery, service worker) → Level 0
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.
```
</details>
### Image Source
By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution:
```yaml
review:
genai:
enabled: true
image_source: recordings # Options: "preview" (default) or "recordings"
```
When using `recordings`, frames are extracted at 480px height while maintaining the camera's original aspect ratio, providing better detail for the LLM while being mindful of context window size. This is particularly useful for scenarios where fine details matter, such as identifying license plates, reading text, or analyzing distant objects.
The number of frames sent to the LLM is dynamically calculated based on:
- Your LLM provider's context window size
- The camera's resolution and aspect ratio (ultrawide cameras like 32:9 use more tokens per image)
- The image source (recordings use more tokens than preview images)
Frame counts are automatically optimized to use ~98% of the available context window while capping at 20 frames maximum to ensure reasonable inference times. Note that using recordings will:
- Provide higher quality images to the LLM (480p vs 180p preview images)
- Use more tokens per image due to higher resolution
- Result in fewer frames being sent for ultrawide cameras due to larger image size
- Require that recordings are enabled for the camera
If recordings are not available for a given time period, the system will automatically fall back to using preview frames.
### Additional Concerns
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
```yaml
review:
genai:
enabled: true
additional_concerns:
- animals in the garden
```
## Review Reports
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.

View File

@ -5,11 +5,11 @@ title: Enrichments
# Enrichments # Enrichments
Some of Frigate's enrichments can use a discrete GPU for accelerated processing. Some of Frigate's enrichments can use a discrete GPU or integrated GPU for accelerated processing.
## Requirements ## Requirements
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU and configure the enrichment according to its specific documentation. Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU / NPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU / NPU and configure the enrichment according to its specific documentation.
- **AMD** - **AMD**
@ -18,11 +18,16 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
- **Intel** - **Intel**
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image. - OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
- **Note:** Intel NPUs have limited model support for enrichments. GPU is recommended for enrichments when available.
- **Nvidia** - **Nvidia**
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image. - Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image. - Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
- **RockChip**
- RockChip NPU will automatically be detected and used for semantic search v1 and face recognition in the `-rk` Frigate image.
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments. Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments.
:::note :::note

View File

@ -427,3 +427,29 @@ cameras:
``` ```
::: :::
## Synaptics
Hardware accelerated video de-/encoding is supported on Synpatics SL-series SoC.
### Prerequisites
Make sure to follow the [Synaptics specific installation instructions](/frigate/installation#synaptics).
### Configuration
Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
```yaml
ffmpeg:
hwaccel_args: -c:v h264_v4l2m2m
input_args: preset-rtsp-restream
output_args:
record: preset-record-generic-audio-aac
```
:::warning
Make sure that your SoC supports hardware acceleration for your input stream and your input stream is h264 encoding. For example, if your camera streams with h264 encoding, your SoC must be able to de- and encode with it. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
:::

View File

@ -3,18 +3,18 @@ id: license_plate_recognition
title: License Plate Recognition (LPR) title: License Plate Recognition (LPR)
--- ---
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a [known](#matching) name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles. LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition.
When a plate is recognized, the details are: When a plate is recognized, the details are:
- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object. - Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object.
- Viewable in the Review Item Details pane in Review (sub labels). - Viewable in the Details pane in Review/History.
- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates). - Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates).
- Filterable through the More Filters menu in Explore. - Filterable through the More Filters menu in Explore.
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object. - Published via the `frigate/events` MQTT topic as a `sub_label` ([known](#matching)) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if known) and `plate`. - Published via the `frigate/tracked_object_update` MQTT topic with `name` (if [known](#matching)) and `plate`.
## Model Requirements ## Model Requirements
@ -31,6 +31,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle`
## Minimum System Requirements ## Minimum System Requirements
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required. License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
## Configuration ## Configuration
License plate recognition is disabled by default. Enable it in your config file: License plate recognition is disabled by default. Enable it in your config file:
@ -66,12 +67,15 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs. - **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
- **`device`**: Device to use to run license plate recognition models. - **`device`**: Device to use to run license plate detection _and_ recognition models.
- Default: `CPU` - Default: `CPU`
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. - This can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. However, for users who run a model that detects `license_plate` natively, there is little to no performance gain reported with running LPR on GPU compared to the CPU.
- **`model_size`**: The size of the model used to detect text on plates. - **`model_size`**: The size of the model used to identify regions of text on plates.
- Default: `small` - Default: `small`
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_. - This can be `small` or `large`.
- The `small` model is fast and identifies groups of Latin and Chinese characters.
- The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. It is significantly slower than the `small` model.
- If your country or region does not use multi-line plates, you should use the `small` model as performance is much better for single-line plates.
### Recognition ### Recognition
@ -101,6 +105,32 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- This setting is best adjusted at the camera level if running LPR on multiple cameras. - This setting is best adjusted at the camera level if running LPR on multiple cameras.
- If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 5 and adjusting as needed. You should see how different enhancement levels affect your plates. Use the `debug_save_plates` configuration option (see below). - If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 5 and adjusting as needed. You should see how different enhancement levels affect your plates. Use the `debug_save_plates` configuration option (see below).
### Normalization Rules
- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially. Each rule must have a `pattern` (which can be a string or a regex, prepended by `r`) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
These rules must be defined at the global level of your `lpr` config.
```yaml
lpr:
replace_rules:
- pattern: r'[%#*?]' # Remove noise symbols
replacement: ""
- pattern: r'[= ]' # Normalize = or space to dash
replacement: "-"
- pattern: "O" # Swap 'O' to '0' (common OCR error)
replacement: "0"
- pattern: r'I' # Swap 'I' to '1'
replacement: "1"
- pattern: r'(\w{3})(\w{3})' # Split 6 chars into groups (e.g., ABC123 → ABC-123)
replacement: r'\1-\2'
```
- Rules fire in order: In the example above: clean noise first, then separators, then swaps, then splits.
- Backrefs (`\1`, `\2`) allow dynamic replacements (e.g., capture groups).
- Any changes made by the rules are printed to the LPR debug log.
- Tip: You can test patterns with tools like regex101.com.
### Debugging ### Debugging
- **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `<camera>/<event_id>`, and named based on the capture timestamp. - **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `<camera>/<event_id>`, and named based on the capture timestamp.
@ -135,6 +165,9 @@ lpr:
recognition_threshold: 0.85 recognition_threshold: 0.85
format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers
match_distance: 1 # Allow one character variation in plate matching match_distance: 1 # Allow one character variation in plate matching
replace_rules:
- pattern: "O"
replacement: "0" # Replace the letter O with the number 0 in every plate
known_plates: known_plates:
Delivery Van: Delivery Van:
- "RJ K5678" - "RJ K5678"
@ -145,7 +178,7 @@ lpr:
:::note :::note
If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras. If a camera is configured to detect `car` or `motorcycle` but you don't want Frigate to run LPR for that camera, disable LPR at the camera level:
```yaml ```yaml
cameras: cameras:
@ -273,7 +306,7 @@ With this setup:
- Review items will always be classified as a `detection`. - Review items will always be classified as a `detection`.
- Snapshots will always be saved. - Snapshots will always be saved.
- Zones and object masks are **not** used. - Zones and object masks are **not** used.
- The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a known plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field. - The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a [known](#matching) plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field.
- License plate snapshots are saved at the highest-scoring moment and appear in Explore. - License plate snapshots are saved at the highest-scoring moment and appear in Explore.
- Debug view will not show `license_plate` bounding boxes. - Debug view will not show `license_plate` bounding boxes.

View File

@ -177,6 +177,8 @@ For devices that support two way talk, Frigate can be configured to use the feat
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-cameras) To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-cameras)
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` _usually_ supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
### Streaming options on camera group dashboards ### Streaming options on camera group dashboards
Frigate provides a dialog in the Camera Group Edit pane with several options for streaming on a camera group's dashboard. These settings are _per device_ and are saved in your device's local storage. Frigate provides a dialog in the Camera Group Edit pane with several options for streaming on a camera group's dashboard. These settings are _per device_ and are saved in your device's local storage.
@ -229,7 +231,27 @@ Note that disabling a camera through the config file (`enabled: False`) removes
If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again. If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again.
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations). Errors in stream playback (e.g., connection failures, codec issues, or buffering timeouts) that cause the fallback to low bandwidth mode (jsmpeg) are logged to the browser console for easier debugging. These errors may include:
- Network issues (e.g., MSE or WebRTC network connection problems).
- Unsupported codecs or stream formats (e.g., H.265 in WebRTC, which is not supported in some browsers).
- Buffering timeouts or low bandwidth conditions causing fallback to jsmpeg.
- Browser compatibility problems (e.g., iOS Safari limitations with MSE).
To view browser console logs:
1. Open the Frigate Live View in your browser.
2. Open the browser's Developer Tools (F12 or right-click > Inspect > Console tab).
3. Reproduce the error (e.g., load a problematic stream or simulate network issues).
4. Look for messages prefixed with the camera name.
These logs help identify if the issue is player-specific (MSE vs. WebRTC) or related to camera configuration (e.g., go2rtc streams, codecs). If you see frequent errors:
- Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera_settings_recommendations)).
- Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS).
- Test with a different stream via the UI dropdown (if `live -> streams` is configured).
- For WebRTC-specific issues, ensure port 8555 is forwarded and candidates are set (see (WebRTC Extra Configuration)(#webrtc-extra-configuration)).
- If your cameras are streaming at a high resolution, your browser may be struggling to load all of the streams before the buffering timeout occurs. Frigate prioritizes showing a true live view as quickly as possible. If the fallback occurs often, change your live view settings to use a lower bandwidth substream.
3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?** 3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**

View File

@ -13,12 +13,18 @@ Frigate supports multiple different detectors that work on different types of ha
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. - [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices. - [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
- [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
- [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
**AMD** **AMD**
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection. - [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured. - [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
**Apple Silicon**
- [Apple Silicon](#apple-silicon-detector): Apple Silicon can run on M1 and newer Apple Silicon devices.
**Intel** **Intel**
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. - [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
@ -37,6 +43,10 @@ Frigate supports multiple different detectors that work on different types of ha
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs. - [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**For Testing** **For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. - [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@ -53,7 +63,7 @@ This does not affect using hardware for accelerating other tasks such as [semant
# Officially Supported Detectors # Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `memryx`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## Edge TPU Detector ## Edge TPU Detector
@ -243,41 +253,55 @@ Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-proc
## OpenVINO Detector ## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel NPUs. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes.html). The most common devices are `CPU` and `GPU`. Currently, there is a known issue with using `AUTO`. For backwards compatibility, Frigate will attempt to use `GPU` if `AUTO` is set in your configuration. The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/2025/openvino-workflow/running-inference/inference-devices-and-modes.html). The most common devices are `CPU`, `GPU`, or `NPU`.
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. For detailed system requirements, see [OpenVINO System Requirements](https://docs.openvino.ai/2024/about-openvino/release-notes-openvino/system-requirements.html) OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` or `NPU` device with OpenVINO. For detailed system requirements, see [OpenVINO System Requirements](https://docs.openvino.ai/2025/about-openvino/release-notes-openvino/system-requirements.html)
:::tip :::tip
**NPU + GPU Systems:** If you have both NPU and GPU available (Intel Core Ultra processors), use NPU for object detection and GPU for enrichments (semantic search, face recognition, etc.) for best performance and compatibility.
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be: When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
```yaml ```yaml
detectors: detectors:
ov_0: ov_0:
type: openvino type: openvino
device: GPU device: GPU # or NPU
ov_1: ov_1:
type: openvino type: openvino
device: GPU device: GPU # or NPU
``` ```
::: :::
### Supported Models ### OpenVINO Supported Models
| Model | GPU | NPU | Notes |
| ------------------------------------- | --- | --- | ------------------------------------------------------------ |
| [YOLOv9](#yolo-v3-v4-v7-v9) | ✅ | ✅ | Recommended for GPU & NPU |
| [RF-DETR](#rf-detr) | ✅ | ✅ | Requires XE iGPU or Arc |
| [YOLO-NAS](#yolo-nas) | ✅ | ✅ | |
| [MobileNet v2](#ssdlite-mobilenet-v2) | ✅ | ✅ | Fast and lightweight model, less accurate than larger models |
| [YOLOX](#yolox) | ✅ | ? | |
| [D-FINE](#d-fine) | ❌ | ❌ | |
#### SSDLite MobileNet v2 #### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
<details>
<summary>MobileNet v2 Config</summary>
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model: Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
```yaml ```yaml
detectors: detectors:
ov: ov:
type: openvino type: openvino
device: GPU device: GPU # Or NPU
model: model:
width: 300 width: 300
@ -288,6 +312,8 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt labelmap_path: /openvino-model/coco_91cl_bkgr.txt
``` ```
</details>
#### YOLOX #### YOLOX
This detector also supports YOLOX. Frigate does not come with any YOLOX models preloaded, so you will need to supply your own models. This detector also supports YOLOX. Frigate does not come with any YOLOX models preloaded, so you will need to supply your own models.
@ -296,6 +322,9 @@ This detector also supports YOLOX. Frigate does not come with any YOLOX models p
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
<details>
<summary>YOLO-NAS Setup & Config</summary>
After placing the downloaded onnx model in your config folder, you can use the following configuration: After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml ```yaml
@ -316,6 +345,8 @@ model:
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
</details>
#### YOLO (v3, v4, v7, v9) #### YOLO (v3, v4, v7, v9)
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default. YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
@ -326,6 +357,9 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
::: :::
<details>
<summary>YOLOv Setup & Config</summary>
:::warning :::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
@ -338,7 +372,7 @@ After placing the downloaded onnx model in your config folder, you can use the f
detectors: detectors:
ov: ov:
type: openvino type: openvino
device: GPU device: GPU # or NPU
model: model:
model_type: yolo-generic model_type: yolo-generic
@ -352,6 +386,8 @@ model:
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
</details>
#### RF-DETR #### RF-DETR
[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate. [RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate.
@ -362,6 +398,9 @@ Due to the size and complexity of the RF-DETR model, it is only recommended to b
::: :::
<details>
<summary>RF-DETR Setup & Config</summary>
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml ```yaml
@ -379,6 +418,8 @@ model:
path: /config/model_cache/rfdetr.onnx path: /config/model_cache/rfdetr.onnx
``` ```
</details>
#### D-FINE #### D-FINE
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
@ -389,6 +430,9 @@ Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to
::: :::
<details>
<summary>D-FINE Setup & Config</summary>
After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
```yaml ```yaml
@ -403,7 +447,63 @@ model:
height: 640 height: 640
input_tensor: nchw input_tensor: nchw
input_dtype: float input_dtype: float
path: /config/model_cache/dfine_s_obj2coco.onnx path: /config/model_cache/dfine-s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
</details>
## Apple Silicon detector
The NPU in Apple Silicon can't be accessed from within a container, so the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) must first be setup. It is recommended to use the Frigate docker image with `-standard-arm64` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-standard-arm64`.
### Setup
1. Setup the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) and run the client
2. Configure the detector in Frigate and startup Frigate
### Configuration
Using the detector config below will connect to the client:
```yaml
detectors:
apple-silicon:
type: zmq
endpoint: tcp://host.docker.internal:5555
```
### Apple Silicon Supported Models
There is no default model provided, the following formats are supported:
#### YOLO (v3, v4, v7, v9)
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
:::tip
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate.
:::
When Frigate is started with the following config it will connect to the detector client and transfer the model automatically:
```yaml
detectors:
apple-silicon:
type: zmq
endpoint: tcp://host.docker.internal:5555
model:
model_type: yolo-generic
width: 320 # <--- should match the imgsize set during model export
height: 320 # <--- should match the imgsize set during model export
input_tensor: nchw
input_dtype: float
path: /config/model_cache/yolo.onnx
labelmap_path: /labelmap/coco-80.txt labelmap_path: /labelmap/coco-80.txt
``` ```
@ -489,7 +589,18 @@ We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from mes
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)' $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
``` ```
### Supported Models ### ROCm Supported Models
:::tip
The AMD GPU kernel is known problematic especially when converting models to mxr format. The recommended approach is:
1. Disable object detection in the config.
2. Startup Frigate with the onnx detector configured, the main object detection model will be converted to mxr format and cached in the config directory.
3. Once this is finished as indicated by the logs, enable object detection in the UI and confirm that it is working correctly.
4. Re-enable object detection in the config.
:::
See [ONNX supported models](#supported-models) for supported models, there are some caveats: See [ONNX supported models](#supported-models) for supported models, there are some caveats:
@ -532,7 +643,15 @@ detectors:
::: :::
### Supported Models ### ONNX Supported Models
| Model | Nvidia GPU | AMD GPU | Notes |
| ----------------------------- | ---------- | ------- | --------------------------------------------------- |
| [YOLOv9](#yolo-v3-v4-v7-v9-2) | ✅ | ✅ | Supports CUDA Graphs for optimal Nvidia performance |
| [RF-DETR](#rf-detr) | ✅ | ❌ | Supports CUDA Graphs for optimal Nvidia performance |
| [YOLO-NAS](#yolo-nas-1) | ⚠️ | ⚠️ | Not supported by CUDA Graphs |
| [YOLOX](#yolox-1) | ✅ | ✅ | Supports CUDA Graphs for optimal Nvidia performance |
| [D-FINE](#d-fine) | ⚠️ | ❌ | Not supported by CUDA Graphs |
There is no default model provided, the following formats are supported: There is no default model provided, the following formats are supported:
@ -540,6 +659,9 @@ There is no default model provided, the following formats are supported:
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
<details>
<summary>YOLO-NAS Setup & Config</summary>
:::warning :::warning
If you are using a Frigate+ YOLO-NAS model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. If you are using a Frigate+ YOLO-NAS model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
@ -563,6 +685,8 @@ model:
labelmap_path: /labelmap/coco-80.txt labelmap_path: /labelmap/coco-80.txt
``` ```
</details>
#### YOLO (v3, v4, v7, v9) #### YOLO (v3, v4, v7, v9)
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default. YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
@ -573,6 +697,9 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
::: :::
<details>
<summary>YOLOv Setup & Config</summary>
:::warning :::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
@ -596,12 +723,17 @@ model:
labelmap_path: /labelmap/coco-80.txt labelmap_path: /labelmap/coco-80.txt
``` ```
</details>
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
#### YOLOx #### YOLOx
[YOLOx](https://github.com/Megvii-BaseDetection/YOLOX) models are supported, but not included by default. See [the models section](#downloading-yolo-models) for more information on downloading the YOLOx model for use in Frigate. [YOLOx](https://github.com/Megvii-BaseDetection/YOLOX) models are supported, but not included by default. See [the models section](#downloading-yolo-models) for more information on downloading the YOLOx model for use in Frigate.
<details>
<summary>YOLOx Setup & Config</summary>
After placing the downloaded onnx model in your config folder, you can use the following configuration: After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml ```yaml
@ -621,10 +753,15 @@ model:
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
</details>
#### RF-DETR #### RF-DETR
[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more information on downloading the RF-DETR model for use in Frigate. [RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more information on downloading the RF-DETR model for use in Frigate.
<details>
<summary>RF-DETR Setup & Config</summary>
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml ```yaml
@ -641,10 +778,15 @@ model:
path: /config/model_cache/rfdetr.onnx path: /config/model_cache/rfdetr.onnx
``` ```
</details>
#### D-FINE #### D-FINE
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. [D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
<details>
<summary>D-FINE Setup & Config</summary>
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml ```yaml
@ -662,6 +804,8 @@ model:
labelmap_path: /labelmap/coco-80.txt labelmap_path: /labelmap/coco-80.txt
``` ```
</details>
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## CPU Detector (not recommended) ## CPU Detector (not recommended)
@ -717,6 +861,197 @@ To verify that the integration is working correctly, start Frigate and observe t
# Community Supported Detectors # Community Supported Detectors
## MemryX MX3
This detector is available for use with the MemryX MX3 accelerator M.2 module. Frigate supports the MX3 on compatible hardware platforms, providing efficient and high-performance object detection.
See the [installation docs](../frigate/installation.md#memryx-mx3) for information on configuring the MemryX hardware.
To configure a MemryX detector, simply set the `type` attribute to `memryx` and follow the configuration guide below.
### Configuration
To configure the MemryX detector, use the following example configuration:
#### Single PCIe MemryX MX3
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
```
#### Multiple PCIe MemryX MX3 Modules
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
memx1:
type: memryx
device: PCIe:1
memx2:
type: memryx
device: PCIe:2
```
### Supported Models
MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the container at `/memryx_models/model_folder/`.
#### YOLO-NAS
The [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) model included in this detector is downloaded from the [Models Section](#downloading-yolo-nas-model) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
**Note:** The default model for the MemryX detector is YOLO-NAS 320x320.
The input size for **YOLO-NAS** can be set to either **320x320** (default) or **640x640**.
- The default size of **320x320** is optimized for lower CPU usage and faster inference times.
##### Configuration
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: yolonas
width: 320 # (Can be set to 640 for higher resolution)
height: 320 # (Can be set to 640 for higher resolution)
input_tensor: nchw
input_dtype: float
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/yolonas.zip
# The .zip file must contain:
# ├── yolonas.dfp (a file ending with .dfp)
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOv9
The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
##### Configuration
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: yolo-generic
width: 320 # (Can be set to 640 for higher resolution)
height: 320 # (Can be set to 640 for higher resolution)
input_tensor: nchw
input_dtype: float
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/yolov9.zip
# The .zip file must contain:
# ├── yolov9.dfp (a file ending with .dfp)
# └── yolov9_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOX
The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) and precompiled to DFP.
##### Configuration
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: yolox
width: 640
height: 640
input_tensor: nchw
input_dtype: float_denorm
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/yolox.zip
# The .zip file must contain:
# ├── yolox.dfp (a file ending with .dfp)
```
#### SSDLite MobileNet v2
The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmmlab.com/model/mmdet-det/ssdlite-e8679f.onnx) and has been converted to DFP.
##### Configuration
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: ssd
width: 320
height: 320
input_tensor: nchw
input_dtype: float
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/ssdlite_mobilenet.zip
# The .zip file must contain:
# ├── ssdlite_mobilenet.dfp (a file ending with .dfp)
# └── ssdlite_mobilenet_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### Using a Custom Model
To use your own model:
1. Package your compiled model into a `.zip` file.
2. The `.zip` must contain the compiled `.dfp` file.
3. Depending on the model, the compiler may also generate a cropped post-processing network. If present, it will be named with the suffix `_post.onnx`.
4. Bind-mount the `.zip` file into the container and specify its path using `model.path` in your config.
5. Update the `labelmap_path` to match your custom model's labels.
For detailed instructions on compiling models, refer to the [MemryX Compiler](https://developer.memryx.com/tools/neural_compiler.html#usage) docs and [Tutorials](https://developer.memryx.com/tutorials/tutorials.html).
```yaml
# The detector automatically selects the default model if nothing is provided in the config.
#
# Optionally, you can specify a local model path as a .zip file to override the default.
# If a local path is provided and the file exists, it will be used instead of downloading.
#
# Example:
# path: /config/yolonas.zip
#
# The .zip file must contain:
# ├── yolonas.dfp (a file ending with .dfp)
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
```
---
## NVidia TensorRT Detector ## NVidia TensorRT Detector
Nvidia Jetson devices may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt-jp6` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6`. This detector is designed to work with Yolo models for object detection. Nvidia Jetson devices may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt-jp6` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6`. This detector is designed to work with Yolo models for object detection.
@ -799,6 +1134,41 @@ model:
height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416 height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416
``` ```
## Synaptics
Hardware accelerated object detection is supported on the following SoCs:
- SL1680
This implementation uses the [Synaptics model conversion](https://synaptics-synap.github.io/doc/v/latest/docs/manual/introduction.html#offline-model-conversion), version v3.1.0.
This implementation is based on sdk `v1.5.0`.
See the [installation docs](../frigate/installation.md#synaptics) for information on configuring the SL-series NPU hardware.
### Configuration
When configuring the Synap detector, you have to specify the model: a local **path**.
#### SSD Mobilenet
A synap model is provided in the container at /mobilenet.synap and is used by this detector type by default. The model comes from [Synap-release Github](https://github.com/synaptics-astra/synap-release/tree/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80).
Use the model configuration shown below when using the synaptics detector with the default synap model:
```yaml
detectors: # required
synap_npu: # required
type: synaptics # required
model: # required
path: /synaptics/mobilenet.synap # required
width: 224 # required
height: 224 # required
tensor_format: nhwc # default value (optional. If you change the model, it is required)
labelmap_path: /labelmap/coco-80.txt # required
```
## Rockchip platform ## Rockchip platform
Hardware accelerated object detection is supported on the following SoCs: Hardware accelerated object detection is supported on the following SoCs:
@ -842,7 +1212,7 @@ $ cat /sys/kernel/debug/rknpu/load
::: :::
### Supported Models ### RockChip Supported Models
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional. This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
@ -968,6 +1338,105 @@ Explanation of the paramters:
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`. - **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.2_EN.pdf). - `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.2_EN.pdf).
## DeGirum
DeGirum is a detector that can use any type of hardware listed on [their website](https://hub.degirum.com). DeGirum can be used with local hardware through a DeGirum AI Server, or through the use of `@local`. You can also connect directly to DeGirum's AI Hub to run inferences. **Please Note:** This detector _cannot_ be used for commercial purposes.
### Configuration
#### AI Server Inference
Before starting with the config file for this section, you must first launch an AI server. DeGirum has an AI server ready to use as a docker container. Add this to your `docker-compose.yml` to get started:
```yaml
degirum_detector:
container_name: degirum
image: degirum/aiserver:latest
privileged: true
ports:
- "8778:8778"
```
All supported hardware will automatically be found on your AI server host as long as relevant runtimes and drivers are properly installed on your machine. Refer to [DeGirum's docs site](https://docs.degirum.com/pysdk/runtimes-and-drivers) if you have any trouble.
Once completed, changing the `config.yml` file is simple.
```yaml
degirum_detector:
type: degirum
location: degirum # Set to service name (degirum_detector), container_name (degirum), or a host:port (192.168.29.4:8778)
zoo: degirum/public # DeGirum's public model zoo. Zoo name should be in format "workspace/zoo_name". degirum/public is available to everyone, so feel free to use it if you don't know where to start. If you aren't pulling a model from the AI Hub, leave this and 'token' blank.
token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the [AI Hub](https://hub.degirum.com). This can be left blank if you're pulling a model from the public zoo and running inferences on your local hardware using @local or a local DeGirum AI Server
```
Setting up a model in the `config.yml` is similar to setting up an AI server.
You can set it to:
- A model listed on the [AI Hub](https://hub.degirum.com), given that the correct zoo name is listed in your detector
- If this is what you choose to do, the correct model will be downloaded onto your machine before running.
- A local directory acting as a zoo. See DeGirum's docs site [for more information](https://docs.degirum.com/pysdk/user-guide-pysdk/organizing-models#model-zoo-directory-structure).
- A path to some model.json.
```yaml
model:
path: ./mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1 # directory to model .json and file
width: 300 # width is in the model name as the first number in the "int"x"int" section
height: 300 # height is in the model name as the second number in the "int"x"int" section
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
```
#### Local Inference
It is also possible to eliminate the need for an AI server and run the hardware directly. The benefit of this approach is that you eliminate any bottlenecks that occur when transferring prediction results from the AI server docker container to the frigate one. However, the method of implementing local inference is different for every device and hardware combination, so it's usually more trouble than it's worth. A general guideline to achieve this would be:
1. Ensuring that the frigate docker container has the runtime you want to use. So for instance, running `@local` for Hailo means making sure the container you're using has the Hailo runtime installed.
2. To double check the runtime is detected by the DeGirum detector, make sure the `degirum sys-info` command properly shows whatever runtimes you mean to install.
3. Create a DeGirum detector in your `config.yml` file.
```yaml
degirum_detector:
type: degirum
location: "@local" # For accessing AI Hub devices and models
zoo: degirum/public # DeGirum's public model zoo. Zoo name should be in format "workspace/zoo_name". degirum/public is available to everyone, so feel free to use it if you don't know where to start.
token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the [AI Hub](https://hub.degirum.com). This can be left blank if you're pulling a model from the public zoo and running inferences on your local hardware using @local or a local DeGirum AI Server
```
Once `degirum_detector` is setup, you can choose a model through 'model' section in the `config.yml` file.
```yaml
model:
path: mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1
width: 300 # width is in the model name as the first number in the "int"x"int" section
height: 300 # height is in the model name as the second number in the "int"x"int" section
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
```
#### AI Hub Cloud Inference
If you do not possess whatever hardware you want to run, there's also the option to run cloud inferences. Do note that your detection fps might need to be lowered as network latency does significantly slow down this method of detection. For use with Frigate, we highly recommend using a local AI server as described above. To set up cloud inferences,
1. Sign up at [DeGirum's AI Hub](https://hub.degirum.com).
2. Get an access token.
3. Create a DeGirum detector in your `config.yml` file.
```yaml
degirum_detector:
type: degirum
location: "@cloud" # For accessing AI Hub devices and models
zoo: degirum/public # DeGirum's public model zoo. Zoo name should be in format "workspace/zoo_name". degirum/public is available to everyone, so feel free to use it if you don't know where to start.
token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the (AI Hub)[https://hub.degirum.com).
```
Once `degirum_detector` is setup, you can choose a model through 'model' section in the `config.yml` file.
```yaml
model:
path: mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1
width: 300 # width is in the model name as the first number in the "int"x"int" section
height: 300 # height is in the model name as the second number in the "int"x"int" section
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
```
# Models # Models
Some model types are not included in Frigate by default. Some model types are not included in Frigate by default.

View File

@ -13,34 +13,34 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br
### Most conservative: Ensure all video is saved ### Most conservative: Ensure all video is saved
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
```yaml ```yaml
record: record:
enabled: True enabled: True
retain: continuous:
days: 3 days: 3
mode: all motion:
days: 7
alerts: alerts:
retain: retain:
days: 30 days: 30
mode: motion mode: all
detections: detections:
retain: retain:
days: 30 days: 30
mode: motion mode: all
``` ```
### Reduced storage: Only saving video when motion is detected ### Reduced storage: Only saving video when motion is detected
In order to reduce storage requirements, you can adjust your config to only retain video where motion was detected. In order to reduce storage requirements, you can adjust your config to only retain video where motion / activity was detected.
```yaml ```yaml
record: record:
enabled: True enabled: True
retain: motion:
days: 3 days: 3
mode: motion
alerts: alerts:
retain: retain:
days: 30 days: 30
@ -53,12 +53,12 @@ record:
### Minimum: Alerts only ### Minimum: Alerts only
If you only want to retain video that occurs during a tracked object, this config will discard video unless an alert is ongoing. If you only want to retain video that occurs during activity caused by tracked object(s), this config will discard video unless an alert is ongoing.
```yaml ```yaml
record: record:
enabled: True enabled: True
retain: continuous:
days: 0 days: 0
alerts: alerts:
retain: retain:
@ -80,15 +80,17 @@ Retention configs support decimals meaning they can be configured to retain `0.5
::: :::
### Continuous Recording ### Continuous and Motion Recording
The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled. The number of days to retain continuous and motion recordings can be set via the following config where X is a number, by default continuous recording is disabled.
```yaml ```yaml
record: record:
enabled: True enabled: True
retain: continuous:
days: 1 # <- number of days to keep continuous recordings days: 1 # <- number of days to keep continuous recordings
motion:
days: 2 # <- number of days to keep motion recordings
``` ```
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
@ -112,38 +114,6 @@ This configuration will retain recording segments that overlap with alerts and d
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. **WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
## What do the different retain modes mean?
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects).
Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording.
- With the `all` option all 48 hours of those two days would be kept and viewable.
- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments.
- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary.
The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type.
A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows:
```yaml
record:
enabled: True
retain:
days: 7
mode: motion
alerts:
retain:
days: 14
mode: active_objects
detections:
retain:
days: 14
mode: active_objects
```
The above configuration example can be added globally or on a per camera basis.
## Can I have "continuous" recordings, but only at certain times? ## Can I have "continuous" recordings, but only at certain times?
Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.

View File

@ -73,6 +73,12 @@ tls:
# Optional: Enable TLS for port 8971 (default: shown below) # Optional: Enable TLS for port 8971 (default: shown below)
enabled: True enabled: True
# Optional: IPv6 configuration
networking:
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
ipv6:
enabled: False
# Optional: Proxy configuration # Optional: Proxy configuration
proxy: proxy:
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth # Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
@ -82,7 +88,13 @@ proxy:
# See the docs for more info. # See the docs for more info.
header_map: header_map:
user: x-forwarded-user user: x-forwarded-user
role: x-forwarded-role role: x-forwarded-groups
role_map:
admin:
- sysadmins
- access-level-security
viewer:
- camera-viewer
# Optional: Url for logging out a user. This sets the location of the logout url in # Optional: Url for logging out a user. This sets the location of the logout url in
# the UI. # the UI.
logout_url: /api/logout logout_url: /api/logout
@ -228,6 +240,8 @@ birdseye:
scaling_factor: 2.0 scaling_factor: 2.0
# Optional: Maximum number of cameras to show at one time, showing the most recent (default: show all cameras) # Optional: Maximum number of cameras to show at one time, showing the most recent (default: show all cameras)
max_cameras: 1 max_cameras: 1
# Optional: Frames-per-second to re-send the last composed Birdseye frame when idle (no motion or active updates). (default: shown below)
idle_heartbeat_fps: 0.0
# Optional: ffmpeg configuration # Optional: ffmpeg configuration
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets # More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
@ -256,6 +270,8 @@ ffmpeg:
retry_interval: 10 retry_interval: 10
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below) # Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
apple_compatibility: false apple_compatibility: false
# Optional: Set the index of the GPU to use for hardware acceleration. (default: shown below)
gpu: 0
# Optional: Detect configuration # Optional: Detect configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
@ -275,6 +291,9 @@ detect:
max_disappeared: 25 max_disappeared: 25
# Optional: Configuration for stationary object tracking # Optional: Configuration for stationary object tracking
stationary: stationary:
# Optional: Stationary classifier that uses visual characteristics to determine if an object
# is stationary even if the box changes enough to be considered motion (default: shown below).
classifier: True
# Optional: Frequency for confirming stationary objects (default: same as threshold) # Optional: Frequency for confirming stationary objects (default: same as threshold)
# When set to 1, object detection will run to confirm the object still exists on every frame. # When set to 1, object detection will run to confirm the object still exists on every frame.
# If set to 10, object detection will run to confirm the object still exists on every 10th frame. # If set to 10, object detection will run to confirm the object still exists on every 10th frame.
@ -339,6 +358,33 @@ objects:
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask) # Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object # Checks based on the bottom center of the bounding box of the object
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278 mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
# Optional: Configuration for AI generated tracked object descriptions
genai:
# Optional: Enable AI object description generation (default: shown below)
enabled: False
# Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below)
use_snapshot: False
# Optional: The default prompt for generating descriptions. Can use replacement
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
# Optional: Object specific prompts to customize description results
# Format: {label}: {prompt}
object_prompts:
person: "My special person prompt."
# Optional: objects to generate descriptions for (default: all objects that are tracked)
objects:
- person
- cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: []
# Optional: What triggers to use to send frames for a tracked object to generative AI (default: shown below)
send_triggers:
# Once the object is no longer tracked
tracked_object_end: True
# Optional: After X many significant updates are received (default: shown below)
after_significant_updates: None
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional: Review configuration # Optional: Review configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
@ -351,6 +397,8 @@ review:
labels: labels:
- car - car
- person - person
# Time to cutoff alerts after no alert-causing activity has occurred (default: shown below)
cutoff_time: 40
# Optional: required zones for an object to be marked as an alert (default: none) # Optional: required zones for an object to be marked as an alert (default: none)
# NOTE: when settings required zones globally, this zone must exist on all cameras # NOTE: when settings required zones globally, this zone must exist on all cameras
# or the config will be considered invalid. In that case the required_zones # or the config will be considered invalid. In that case the required_zones
@ -365,12 +413,36 @@ review:
labels: labels:
- car - car
- person - person
# Time to cutoff detections after no detection-causing activity has occurred (default: shown below)
cutoff_time: 30
# Optional: required zones for an object to be marked as a detection (default: none) # Optional: required zones for an object to be marked as a detection (default: none)
# NOTE: when settings required zones globally, this zone must exist on all cameras # NOTE: when settings required zones globally, this zone must exist on all cameras
# or the config will be considered invalid. In that case the required_zones # or the config will be considered invalid. In that case the required_zones
# should be configured at the camera level. # should be configured at the camera level.
required_zones: required_zones:
- driveway - driveway
# Optional: GenAI Review Summary Configuration
genai:
# Optional: Enable the GenAI review summary feature (default: shown below)
enabled: False
# Optional: Enable GenAI review summaries for alerts (default: shown below)
alerts: True
# Optional: Enable GenAI review summaries for detections (default: shown below)
detections: False
# Optional: Activity Context Prompt to give context to the GenAI what activity is and is not suspicious.
# It is important to be direct and detailed. See documentation for the default prompt structure.
activity_context_prompt: """Define what is and is not suspicious
"""
# Optional: Image source for GenAI (default: preview)
# Options: "preview" (uses cached preview frames at ~180p) or "recordings" (extracts frames from recordings at 480p)
# Using "recordings" provides better image quality but uses more tokens per image.
# Frame count is automatically calculated based on context window size, aspect ratio, and image source (capped at 20 frames).
image_source: preview
# Optional: Additional concerns that the GenAI should make note of (default: None)
additional_concerns:
- Animals in the garden
# Optional: Preferred response language (default: English)
preferred_language: English
# Optional: Motion configuration # Optional: Motion configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
@ -440,18 +512,18 @@ record:
expire_interval: 60 expire_interval: 60
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below). # Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
sync_recordings: False sync_recordings: False
# Optional: Retention settings for recording # Optional: Continuous retention settings
retain: continuous:
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
# if you only want to retain recordings of alerts and detections.
days: 0
# Optional: Motion retention settings
motion:
# Optional: Number of days to retain recordings regardless of tracked objects (default: shown below) # Optional: Number of days to retain recordings regardless of tracked objects (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below # NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
# if you only want to retain recordings of alerts and detections. # if you only want to retain recordings of alerts and detections.
days: 0 days: 0
# Optional: Mode for retention. Available options are: all, motion, and active_objects
# all - save all recording segments regardless of activity
# motion - save all recordings segments with any detected motion
# active_objects - save all recording segments with active/moving objects
# NOTE: this mode only applies when the days setting above is greater than 0
mode: all
# Optional: Recording Export Settings # Optional: Recording Export Settings
export: export:
# Optional: Timelapse Output Args (default: shown below). # Optional: Timelapse Output Args (default: shown below).
@ -476,7 +548,7 @@ record:
# Optional: Retention settings for recordings of alerts # Optional: Retention settings for recordings of alerts
retain: retain:
# Required: Retention days (default: shown below) # Required: Retention days (default: shown below)
days: 14 days: 10
# Optional: Mode for retention. (default: shown below) # Optional: Mode for retention. (default: shown below)
# all - save all recording segments for alerts regardless of activity # all - save all recording segments for alerts regardless of activity
# motion - save all recordings segments for alerts with any detected motion # motion - save all recordings segments for alerts with any detected motion
@ -496,7 +568,7 @@ record:
# Optional: Retention settings for recordings of detections # Optional: Retention settings for recordings of detections
retain: retain:
# Required: Retention days (default: shown below) # Required: Retention days (default: shown below)
days: 14 days: 10
# Optional: Mode for retention. (default: shown below) # Optional: Mode for retention. (default: shown below)
# all - save all recording segments for detections regardless of activity # all - save all recording segments for detections regardless of activity
# motion - save all recordings segments for detections with any detected motion # motion - save all recordings segments for detections with any detected motion
@ -513,7 +585,7 @@ record:
snapshots: snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
enabled: False enabled: False
# Optional: save a clean PNG copy of the snapshot image (default: shown below) # Optional: save a clean copy of the snapshot image (default: shown below)
clean_copy: True clean_copy: True
# Optional: print a timestamp on the snapshots (default: shown below) # Optional: print a timestamp on the snapshots (default: shown below)
timestamp: False timestamp: False
@ -546,6 +618,9 @@ semantic_search:
# Optional: Set the model size used for embeddings. (default: shown below) # Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU # NOTE: small model runs on CPU and large model runs on GPU
model_size: "small" model_size: "small"
# Optional: Target a specific device to run the model (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: None
# Optional: Configuration for face recognition capability # Optional: Configuration for face recognition capability
# NOTE: enabled, min_area can be overridden at the camera level # NOTE: enabled, min_area can be overridden at the camera level
@ -564,11 +639,14 @@ face_recognition:
# Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below) # Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below)
min_faces: 1 min_faces: 1
# Optional: Number of images of recognized faces to save for training (default: shown below) # Optional: Number of images of recognized faces to save for training (default: shown below)
save_attempts: 100 save_attempts: 200
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below) # Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
blur_confidence_filter: True blur_confidence_filter: True
# Optional: Set the model size used face recognition. (default: shown below) # Optional: Set the model size used face recognition. (default: shown below)
model_size: small model_size: small
# Optional: Target a specific device to run the model (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: None
# Optional: Configuration for license plate recognition capability # Optional: Configuration for license plate recognition capability
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level # NOTE: enabled, min_area, and enhancement can be overridden at the camera level
@ -576,6 +654,7 @@ lpr:
# Optional: Enable license plate recognition (default: shown below) # Optional: Enable license plate recognition (default: shown below)
enabled: False enabled: False
# Optional: The device to run the models on (default: shown below) # Optional: The device to run the models on (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: CPU device: CPU
# Optional: Set the model size used for text detection. (default: shown below) # Optional: Set the model size used for text detection. (default: shown below)
model_size: small model_size: small
@ -598,30 +677,41 @@ lpr:
enhancement: 0 enhancement: 0
# Optional: Save plate images to /media/frigate/clips/lpr for debugging purposes (default: shown below) # Optional: Save plate images to /media/frigate/clips/lpr for debugging purposes (default: shown below)
debug_save_plates: False debug_save_plates: False
# Optional: List of regex replacement rules to normalize detected plates (default: shown below)
replace_rules: {}
# Optional: Configuration for AI generated tracked object descriptions # Optional: Configuration for AI / LLM provider
# WARNING: Depending on the provider, this will send thumbnails over the internet # WARNING: Depending on the provider, this will send thumbnails over the internet
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at # to Google or OpenAI's LLMs to generate descriptions. GenAI features can be configured at
# the camera level (enabled: False) to enhance privacy for indoor cameras. # the camera level to enhance privacy for indoor cameras.
genai: genai:
# Optional: Enable AI description generation (default: shown below) # Required: Provider must be one of ollama, gemini, or openai
enabled: False
# Required if enabled: Provider must be one of ollama, gemini, or openai
provider: ollama provider: ollama
# Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider. # Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider.
base_url: http://localhost::11434 base_url: http://localhost::11434
# Required if gemini or openai # Required if gemini or openai
api_key: "{FRIGATE_GENAI_API_KEY}" api_key: "{FRIGATE_GENAI_API_KEY}"
# Optional: The default prompt for generating descriptions. Can use replacement # Required: The model to use with the provider.
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below) model: gemini-1.5-flash
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background." # Optional additional args to pass to the GenAI Provider (default: None)
# Optional: Object specific prompts to customize description results provider_options:
# Format: {label}: {prompt} keep_alive: -1
object_prompts:
person: "My special person prompt." # Optional: Configuration for audio transcription
# NOTE: only the enabled option can be overridden at the camera level
audio_transcription:
# Optional: Enable license plate recognition (default: shown below)
enabled: False
# Optional: The device to run the models on (default: shown below)
device: CPU
# Optional: Set the model size used for transcription. (default: shown below)
model_size: small
# Optional: Set the language used for transcription translation. (default: shown below)
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
language: en
# Optional: Restream configuration # Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.9) # Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# NOTE: The default go2rtc API port (1984) must be used, # NOTE: The default go2rtc API port (1984) must be used,
# changing this port for the integrated go2rtc instance is not supported. # changing this port for the integrated go2rtc instance is not supported.
go2rtc: go2rtc:
@ -720,6 +810,8 @@ cameras:
# NOTE: This must be different than any camera names, but can match with another zone on another # NOTE: This must be different than any camera names, but can match with another zone on another
# camera. # camera.
front_steps: front_steps:
# Optional: A friendly name or descriptive text for the zones
friendly_name: ""
# Required: List of x,y coordinates to define the polygon of the zone. # Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428 coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428
@ -827,33 +919,27 @@ cameras:
# By default the cameras are sorted alphabetically. # By default the cameras are sorted alphabetically.
order: 0 order: 0
# Optional: Configuration for AI generated tracked object descriptions # Optional: Configuration for triggers to automate actions based on semantic search results.
genai: triggers:
# Optional: Enable AI description generation (default: shown below) # Required: Unique identifier for the trigger (generated automatically from friendly_name if not specified).
enabled: False trigger_name:
# Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below) # Required: Enable or disable the trigger. (default: shown below)
use_snapshot: False enabled: true
# Optional: The default prompt for generating descriptions. Can use replacement # Optional: A friendly name or descriptive text for the trigger
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below) friendly_name: Unique name or descriptive text
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background." # Type of trigger, either `thumbnail` for image-based matching or `description` for text-based matching. (default: none)
# Optional: Object specific prompts to customize description results type: thumbnail
# Format: {label}: {prompt} # Reference data for matching, either an event ID for `thumbnail` or a text string for `description`. (default: none)
object_prompts: data: 1751565549.853251-b69j73
person: "My special person prompt." # Similarity threshold for triggering. (default: shown below)
# Optional: objects to generate descriptions for (default: all objects that are tracked) threshold: 0.8
objects: # List of actions to perform when the trigger fires. (default: none)
- person # Available options:
- cat # - `notification` (send a webpush notification)
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify) # - `sub_label` (add trigger friendly name as a sub label to the triggering tracked object)
required_zones: [] # - `attribute` (add trigger's name and similarity score as a data attribute to the triggering tracked object)
# Optional: What triggers to use to send frames for a tracked object to generative AI (default: shown below) actions:
send_triggers: - notification
# Once the object is no longer tracked
tracked_object_end: True
# Optional: After X many significant updates are received (default: shown below)
after_significant_updates: None
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional # Optional
ui: ui:

View File

@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.9) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration) for more advanced configurations and features. Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
:::note :::note
@ -24,6 +24,11 @@ birdseye:
restream: True restream: True
``` ```
:::tip
To improve connection speed when using Birdseye via restream you can enable a small idle heartbeat by setting `birdseye.idle_heartbeat_fps` to a low value (e.g. `12`). This makes Frigate periodically push the last frame even when no motion is detected, reducing initial connection latency.
:::
### Securing Restream With Authentication ### Securing Restream With Authentication
The go2rtc restream can be secured with RTSP based username / password authentication. Ex: The go2rtc restream can be secured with RTSP based username / password authentication. Ex:
@ -156,7 +161,7 @@ See [this comment](https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-22
## Advanced Restream Configurations ## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}` NOTE: The output will need to be passed with two curly braces `{{output}}`

View File

@ -39,7 +39,7 @@ If you are enabling Semantic Search for the first time, be advised that Frigate
The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a vision model which is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails. The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a vision model which is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions. The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the object description docs](/configuration/genai/objects.md) for more information on how to automatically generate tracked object descriptions.
Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`: Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`:
@ -78,17 +78,21 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
### GPU Acceleration ### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
```yaml ```yaml
semantic_search: semantic_search:
enabled: True enabled: True
model_size: large model_size: large
# Optional, if using the 'large' model in a multi-GPU installation
device: 0
``` ```
:::info :::info
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically. If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU will be detected and used automatically.
Specify the `device` option to target a specific GPU in a multi-GPU system (see [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)).
If you do not specify a device, the first available GPU will be used.
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
@ -102,3 +106,61 @@ See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day". 4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well. 5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you. 6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you.
## Triggers
Triggers utilize Semantic Search to automate actions when a tracked object matches a specified image or description. Triggers can be configured so that Frigate executes a specific actions when a tracked object's image or description matches a predefined image or text, based on a similarity threshold. Triggers are managed per camera and can be configured via the Frigate UI in the Settings page under the Triggers tab.
:::note
Semantic Search must be enabled to use Triggers.
:::
### Configuration
Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires - `notification`, `sub_label`, and `attribute`.
Triggers are best configured through the Frigate UI.
#### Managing Triggers in the UI
1. Navigate to the **Settings** page and select the **Triggers** tab.
2. Choose a camera from the dropdown menu to view or manage its triggers.
3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one.
4. In the **Create Trigger** wizard:
- Enter a **Name** for the trigger (e.g., "Red Car Alert").
- Enter a descriptive **Friendly Name** for the trigger (e.g., "Red car on the driveway camera").
- Select the **Type** (`Thumbnail` or `Description`).
- For `Thumbnail`, select an image to trigger this action when a similar thumbnail image is detected, based on the threshold.
- For `Description`, enter text to trigger this action when a similar tracked object description is detected.
- Set the **Threshold** for similarity matching.
- Select **Actions** to perform when the trigger fires.
If native webpush notifications are enabled, check the `Send Notification` box to send a notification.
Check the `Add Sub Label` box to add the trigger's friendly name as a sub label to any triggering tracked objects.
Check the `Add Attribute` box to add the trigger's internal ID (e.g., "red_car_alert") to a data attribute on the tracked object that can be processed via the API or MQTT.
5. Save the trigger to update the configuration and store the embedding in the database.
When a trigger fires, the UI highlights the trigger with a blue dot for 3 seconds for easy identification. Additionally, the UI will show the last date/time and tracked object ID that activated your trigger. The last triggered timestamp is not saved to the database or persisted through restarts of Frigate.
### Usage and Best Practices
1. **Thumbnail Triggers**: Select a representative image (event ID) from the Explore page that closely matches the object you want to detect. For best results, choose images where the object is prominent and fills most of the frame.
2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked objects description. Avoid vague terms to improve matching accuracy.
3. **Threshold Tuning**: Adjust the threshold to balance sensitivity and specificity. A higher threshold (e.g., 0.8) requires closer matches, reducing false positives but potentially missing similar objects. A lower threshold (e.g., 0.6) is more inclusive but may trigger more often.
4. **Using Explore**: Use the context menu or right-click / long-press on a tracked object in the Grid View in Explore to quickly add a trigger based on the tracked object's thumbnail.
5. **Editing triggers**: For the best experience, triggers should be edited via the UI. However, Frigate will ensure triggers edited in the config will be synced with triggers created and edited in the UI.
### Notes
- Triggers rely on the same Jina AI CLIP models (V1 or V2) used for semantic search. Ensure `semantic_search` is enabled and properly configured.
- Reindexing embeddings (via the UI or `reindex: True`) does not affect trigger configurations but may update the embeddings used for matching.
- For optimal performance, use a system with sufficient RAM (8GB minimum, 16GB recommended) and a GPU for `large` model configurations, as described in the Semantic Search requirements.
### FAQ
#### Why can't I create a trigger on thumbnails for some text, like "person with a blue shirt" and have it trigger when a person with a blue shirt is detected?
TL;DR: Text-to-image triggers arent supported because CLIP can confuse similar images and give inconsistent scores, making automation unreliable. The same wordimage pair can give different scores and the score ranges can be too close together to set a clear cutoff.
Text-to-image triggers are not supported due to fundamental limitations of CLIP-based similarity search. While CLIP works well for exploratory, manual queries, it is unreliable for automated triggers based on a threshold. Issues include embedding drift (the same textimage pair can yield different cosine distances over time), lack of true semantic grounding (visually similar but incorrect matches), and unstable thresholding (distance distributions are dataset-dependent and often too tightly clustered to separate relevant from irrelevant results). Instead, it is recommended to set up a workflow with thumbnail triggers: first use text search to manually select 35 representative reference tracked objects, then configure thumbnail triggers based on that visual similarity. This provides robust automation without the semantic ambiguity of text to image matching.

View File

@ -27,6 +27,7 @@ cameras:
- entire_yard - entire_yard
zones: zones:
entire_yard: entire_yard:
friendly_name: Entire yard # You can use characters from any language text
coordinates: ... coordinates: ...
``` ```
@ -44,8 +45,10 @@ cameras:
- edge_yard - edge_yard
zones: zones:
edge_yard: edge_yard:
friendly_name: Edge yard # You can use characters from any language text
coordinates: ... coordinates: ...
inner_yard: inner_yard:
friendly_name: Inner yard # You can use characters from any language text
coordinates: ... coordinates: ...
``` ```
@ -59,6 +62,7 @@ cameras:
- entire_yard - entire_yard
zones: zones:
entire_yard: entire_yard:
friendly_name: Entire yard
coordinates: ... coordinates: ...
``` ```
@ -82,13 +86,16 @@ cameras:
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street. Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
### Zone Loitering ### Zone Loitering
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone. Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.
:::note :::note
When using loitering zones, a review item will remain active until the object leaves. Loitering zones are only meant to be used in areas where loitering is not expected behavior. When using loitering zones, a review item will behave in the following way:
- When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary.
- When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end.
::: :::

View File

@ -58,24 +58,36 @@ Frigate supports multiple different detectors that work on different types of ha
- Runs best with tiny or small size models - Runs best with tiny or small size models
- [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. - [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector) - [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
- [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
- [Supports many model architectures](../../configuration/object_detectors#memryx-mx3)
- Runs best with tiny, small, or medium-size models
**AMD** **AMD**
- [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection - [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection
- [Supports limited model architectures](../../configuration/object_detectors#supported-models-1) - [Supports limited model architectures](../../configuration/object_detectors#rocm-supported-models)
- Runs best on discrete AMD GPUs - Runs best on discrete AMD GPUs
**Apple Silicon**
- [Apple Silicon](#apple-silicon): Apple Silicon is usable on all M1 and newer Apple Silicon devices to provide efficient and fast object detection
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#apple-silicon-supported-models)
- Runs well with any size models including large
- Runs via ZMQ proxy which adds some latency, only recommended for local connection
**Intel** **Intel**
- [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. - [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel NPUs to provide efficient object detection.
- [Supports majority of model architectures](../../configuration/object_detectors#supported-models) - [Supports majority of model architectures](../../configuration/object_detectors#openvino-supported-models)
- Runs best with tiny, small, or medium models - Runs best with tiny, small, or medium models
**Nvidia** **Nvidia**
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices. - [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#supported-models-2) - [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
- Runs well with any size models including large - Runs well with any size models including large
**Rockchip** **Rockchip**
@ -85,8 +97,21 @@ Frigate supports multiple different detectors that work on different types of ha
- Runs best with tiny or small size models - Runs best with tiny or small size models
- Runs efficiently on low power hardware - Runs efficiently on low power hardware
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
::: :::
### Synaptics
- **Synaptics** Default model is **mobilenet**
| Name | Synaptics SL1680 Inference Time |
| ---------------- | ------------------------------- |
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
### Hailo-8 ### Hailo-8
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided. Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided.
@ -125,6 +150,7 @@ The OpenVINO detector type is able to run on:
- 6th Gen Intel Platforms and newer that have an iGPU - 6th Gen Intel Platforms and newer that have an iGPU
- x86 hosts with an Intel Arc GPU - x86 hosts with an Intel Arc GPU
- Intel NPUs
- Most modern AMD CPUs (though this is officially not supported by Intel) - Most modern AMD CPUs (though this is officially not supported by Intel)
- x86 & Arm64 hosts via CPU (generally not recommended) - x86 & Arm64 hosts via CPU (generally not recommended)
@ -149,8 +175,9 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp
| Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | | | Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance | | Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance |
| Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | | | Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | |
| Intel Iris XE | ~ 10 ms | s-320: 12 ms s-640: 30 ms | 320: ~ 18 ms 640: ~ 50 ms | | | | Intel Iris XE | ~ 10 ms | t-320: 6 ms t-640: 14 ms s-320: 8 ms s-640: 16 ms | 320: ~ 10 ms 640: ~ 20 ms | 320-n: 33 ms | |
| Intel Arc A310 | ~ 5 ms | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | 320: ~ 8 ms 640: ~ 14 ms | | | | Intel NPU | ~ 6 ms | s-320: 11 ms | 320: ~ 14 ms 640: ~ 34 ms | 320-n: 40 ms | |
| Intel Arc A310 | ~ 5 ms | t-320: 7 ms t-640: 11 ms s-320: 8 ms s-640: 15 ms | 320: ~ 8 ms 640: ~ 14 ms | | |
| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | | | Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | | | Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | |
@ -160,7 +187,7 @@ Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA
#### Minimum Hardware Support #### Minimum Hardware Support
12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. 12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
@ -175,27 +202,71 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben
[NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus)
Inference speeds will vary greatly depending on the GPU and the model used. Inference speeds will vary greatly depending on the GPU and the model used.
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below: `tiny (t)` variants are faster than the equivalent non-tiny model, some known examples are below:
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | ✅ - Accelerated with CUDA Graphs
| --------------- | ------------------------- | ------------------------- | ---------------------- | ❌ - Not accelerated with CUDA Graphs
| GTX 1070 | s-320: 16 ms | 320: 14 ms | |
| RTX 3050 | t-320: 15 ms s-320: 17 ms | 320: ~ 10 ms 640: ~ 16 ms | Nano-320: ~ 12 ms | | Name | ✅ YOLOv9 Inference Time | ✅ RF-DETR Inference Time | ❌ YOLO-NAS Inference Time |
| RTX 3070 | t-320: 11 ms s-320: 13 ms | 320: ~ 8 ms 640: ~ 14 ms | Nano-320: ~ 9 ms | | --------- | ------------------------------------- | ------------------------- | -------------------------- |
| RTX A4000 | | 320: ~ 15 ms | | | GTX 1070 | s-320: 16 ms | | 320: 14 ms |
| Tesla P40 | | 320: ~ 105 ms | | | RTX 3050 | t-320: 8 ms s-320: 10 ms s-640: 28 ms | Nano-320: ~ 12 ms | 320: ~ 10 ms 640: ~ 16 ms |
| RTX 3070 | t-320: 6 ms s-320: 8 ms s-640: 25 ms | Nano-320: ~ 9 ms | 320: ~ 8 ms 640: ~ 14 ms |
| RTX A4000 | | | 320: ~ 15 ms |
| Tesla P40 | | | 320: ~ 105 ms |
### Apple Silicon
With the [Apple Silicon](../configuration/object_detectors.md#apple-silicon-detector) detector Frigate can take advantage of the NPU in M1 and newer Apple Silicon.
:::warning
Apple Silicon can not run within a container, so a ZMQ proxy is utilized to communicate with [the Apple Silicon Frigate detector](https://github.com/frigate-nvr/apple-silicon-detector) which runs on the host. This should add minimal latency when run on the same device.
:::
| Name | YOLOv9 Inference Time |
| ------ | ------------------------------------ |
| M4 | s-320: 10 ms |
| M3 Pro | t-320: 6 ms s-320: 8 ms s-640: 20 ms |
| M1 | s-320: 9ms |
### ROCm - AMD GPU ### ROCm - AMD GPU
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs. With the [ROCm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | | Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| --------- | --------------------- | ------------------------- | | --------- | --------------------------- | ------------------------- |
| AMD 780M | 320: ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms | | AMD 780M | t-320: ~ 14 ms s-320: 20 ms | 320: ~ 25 ms 640: ~ 50 ms |
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms | | AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
## Community Supported Detectors ## Community Supported Detectors
### MemryX MX3
Frigate supports the MemryX MX3 M.2 AI Acceleration Module on compatible hardware platforms, including both x86 (Intel/AMD) and ARM-based SBCs such as Raspberry Pi 5.
A single MemryX MX3 module is capable of handling multiple camera streams using the default models, making it sufficient for most users. For larger deployments with more cameras or bigger models, multiple MX3 modules can be used. Frigate supports multi-detector configurations, allowing you to connect multiple MX3 modules to scale inference capacity.
Detailed information is available [in the detector docs](/configuration/object_detectors#memryx-mx3).
**Default Model Configuration:**
- Default model is **YOLO-NAS-Small**.
The MX3 is a pipelined architecture, where the maximum frames per second supported (and thus supported number of cameras) cannot be calculated as `1/latency` (1/"Inference Time") and is measured separately. When estimating how many camera streams you may support with your configuration, use the **MX3 Total FPS** column to approximate of the detector's limit, not the Inference Time.
| Model | Input Size | MX3 Inference Time | MX3 Total FPS |
| -------------------- | ---------- | ------------------ | ------------- |
| YOLO-NAS-Small | 320 | ~ 9 ms | ~ 378 |
| YOLO-NAS-Small | 640 | ~ 21 ms | ~ 138 |
| YOLOv9s | 320 | ~ 16 ms | ~ 382 |
| YOLOv9s | 640 | ~ 41 ms | ~ 110 |
| YOLOX-Small | 640 | ~ 16 ms | ~ 263 |
| SSDlite MobileNet v2 | 320 | ~ 5 ms | ~ 1056 |
Inference speeds may vary depending on the host platform. The above data was measured on an **Intel 13700 CPU**. Platforms like Raspberry Pi, Orange Pi, and other ARM-based SBCs have different levels of processing capability, which may limit total FPS.
### Nvidia Jetson ### Nvidia Jetson
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).

View File

@ -229,6 +229,77 @@ If you are using `docker run`, add this option to your command `--device /dev/ha
Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup. Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup.
### MemryX MX3
The MemryX MX3 Accelerator is available in the M.2 2280 form factor (like an NVMe SSD), and supports a variety of configurations:
- x86 (Intel/AMD) PCs
- Raspberry Pi 5
- Orange Pi 5 Plus/Max
- Multi-M.2 PCIe carrier cards
#### Configuration
#### Installation
To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/get_started/hardware_setup.html).
Then follow these steps for installing the correct driver/runtime configuration:
1. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/memryx/user_installation.sh).
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
3. Run the script with `./user_installation.sh`
4. **Restart your computer** to complete driver installation.
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/memx0
```
During configuration, you must run Docker in privileged mode and ensure the container can access the max-manager.
In your `docker-compose.yml`, also add:
```yaml
privileged: true
volumes:
/run/mxa_manager:/run/mxa_manager
```
If you can't use Docker Compose, you can run the container with something similar to this:
```bash
docker run -d \
--name frigate-memx \
--restart=unless-stopped \
--mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \
--shm-size=256m \
-v /path/to/your/storage:/media/frigate \
-v /path/to/your/config:/config \
-v /etc/localtime:/etc/localtime:ro \
-v /run/mxa_manager:/run/mxa_manager \
-e FRIGATE_RTSP_PASSWORD='password' \
--privileged=true \
-p 8971:8971 \
-p 8554:8554 \
-p 5000:5000 \
-p 8555:8555/tcp \
-p 8555:8555/udp \
--device /dev/memx0 \
ghcr.io/blakeblackshear/frigate:stable
```
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#memryx-mx3) to complete the setup.
### Rockchip platform ### Rockchip platform
Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and necessary drivers (especially rkvdec2 and rknpu). To check, enter the following commands: Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and necessary drivers (especially rkvdec2 and rknpu). To check, enter the following commands:
@ -282,6 +353,37 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform). Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform).
### Synaptics
- SL1680
#### Setup
Follow Frigate's default installation instructions, but use a docker image with `-synaptics` suffix for example `ghcr.io/blakeblackshear/frigate:stable-synaptics`.
Next, you need to grant docker permissions to access your hardware:
- During the configuration process, you should run docker in privileged mode to avoid any errors due to insufficient permissions. To do so, add `privileged: true` to your `docker-compose.yml` file or the `--privileged` flag to your docker run command.
```yaml
devices:
- /dev/synap
- /dev/video0
- /dev/video1
```
or add these options to your `docker run` command:
```
--device /dev/synap \
--device /dev/video0 \
--device /dev/video1
```
#### Configuration
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
## Docker ## Docker
Running through Docker with Docker Compose is the recommended install method. Running through Docker with Docker Compose is the recommended install method.
@ -299,7 +401,8 @@ services:
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions - /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
- /dev/video11:/dev/video11 # For Raspberry Pi 4B - /dev/video11:/dev/video11 # For Raspberry Pi 4B
- /dev/dri/renderD128:/dev/dri/renderD128 # For intel hwaccel, needs to be updated for your hardware - /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware
- /dev/accel:/dev/accel # Intel NPU
volumes: volumes:
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config - /path/to/your/config:/config

View File

@ -3,17 +3,15 @@ id: configuring_go2rtc
title: Configuring go2rtc title: Configuring go2rtc
--- ---
# Configuring go2rtc
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features: Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio - WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
- Live stream support for cameras in Home Assistant Integration - Live stream support for cameras in Home Assistant Integration
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams - RTSP relay for use with other consumers to reduce the number of connections to your camera streams
# Setup a go2rtc stream ## Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#module-streams), not just rtsp. First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
:::tip :::tip
@ -49,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec: - Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#codecs-madness) in go2rtc documentation. - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml ```yaml
go2rtc: go2rtc:
streams: streams:
@ -111,11 +109,11 @@ section.
::: :::
## Next steps ### Next steps
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera). 1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router. 2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router.
## Important considerations ## Homekit Configuration
If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts. To add camera streams to Homekit Frigate must be configured in docker to use `host` networking mode. Once that is done, you can use the go2rtc WebUI (accessed via port 1984, which is disabled by default) to share export a camera to Homekit. Any changes made will automatically be saved to `/config/go2rtc_homekit.yml`.

View File

@ -0,0 +1,37 @@
---
id: homekit
title: HomeKit
---
Frigate cameras can be integrated with Apple HomeKit through go2rtc. This allows you to view your camera streams directly in the Apple Home app on your iOS, iPadOS, macOS, and tvOS devices.
## Overview
HomeKit integration is handled entirely through go2rtc, which is embedded in Frigate. go2rtc provides the necessary HomeKit Accessory Protocol (HAP) server to expose your cameras to HomeKit.
## Setup
All HomeKit configuration and pairing should be done through the **go2rtc WebUI**.
### Accessing the go2rtc WebUI
The go2rtc WebUI is available at:
```
http://<frigate_host>:1984
```
Replace `<frigate_host>` with the IP address or hostname of your Frigate server.
### Pairing Cameras
1. Navigate to the go2rtc WebUI at `http://<frigate_host>:1984`
2. Use the `add` section to add a new camera to HomeKit
3. Follow the on-screen instructions to generate pairing codes for your cameras
## Requirements
- Frigate must be accessible on your local network using host network_mode
- Your iOS device must be on the same network as Frigate
- Port 1984 must be accessible for the go2rtc WebUI
- For detailed go2rtc configuration options, refer to the [go2rtc documentation](https://github.com/AlexxIT/go2rtc)

View File

@ -215,6 +215,20 @@ When the review activity has ended a final `end` message is published.
} }
``` ```
### `frigate/triggers`
Message published when a trigger defined in a camera's `semantic_search` configuration fires.
```json
{
"name": "car_trigger",
"camera": "driveway",
"event_id": "1751565549.853251-b69j73",
"type": "thumbnail",
"score": 0.85
}
```
### `frigate/stats` ### `frigate/stats`
Same data available at `/api/stats` published at a configurable interval. Same data available at `/api/stats` published at a configurable interval.
@ -233,6 +247,14 @@ Topic with current state of notifications. Published values are `ON` and `OFF`.
## Frigate Camera Topics ## Frigate Camera Topics
### `frigate/<camera_name>/<role>/status`
Publishes the current health status of each role that is enabled (`audio`, `detect`, `record`). Possible values are:
- `online`: Stream is running and being processed
- `offline`: Stream is offline and is being restarted
- `disabled`: Camera is currently disabled
### `frigate/<camera_name>/<object_name>` ### `frigate/<camera_name>/<object_name>`
Publishes the count of objects for the camera for use as a sensor in Home Assistant. Publishes the count of objects for the camera for use as a sensor in Home Assistant.
@ -266,6 +288,8 @@ The height and crop of snapshots can be configured in the config.
Publishes "ON" when a type of audio is detected and "OFF" when it is not for the camera for use as a sensor in Home Assistant. Publishes "ON" when a type of audio is detected and "OFF" when it is not for the camera for use as a sensor in Home Assistant.
`all` can be used as the audio_type for the status of all audio types.
### `frigate/<camera_name>/audio/dBFS` ### `frigate/<camera_name>/audio/dBFS`
Publishes the dBFS value for audio detected on this camera. Publishes the dBFS value for audio detected on this camera.
@ -278,6 +302,12 @@ Publishes the rms value for audio detected on this camera.
**NOTE:** Requires audio detection to be enabled **NOTE:** Requires audio detection to be enabled
### `frigate/<camera_name>/audio/transcription`
Publishes transcribed text for audio detected on this camera.
**NOTE:** Requires audio detection and transcription to be enabled
### `frigate/<camera_name>/enabled/set` ### `frigate/<camera_name>/enabled/set`
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`. Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.
@ -400,6 +430,22 @@ Topic to turn review detections for a camera on or off. Expected values are `ON`
Topic with current state of review detections for a camera. Published values are `ON` and `OFF`. Topic with current state of review detections for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/object_descriptions/set`
Topic to turn generative AI object descriptions for a camera on or off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/object_descriptions/state`
Topic with current state of generative AI object descriptions for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/review_descriptions/set`
Topic to turn generative AI review descriptions for a camera on or off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/review_descriptions/state`
Topic with current state of generative AI review descriptions for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/birdseye/set` ### `frigate/<camera_name>/birdseye/set`
Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode

3073
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -5,14 +5,14 @@ import frigateHttpApiSidebar from "./docs/integrations/api/sidebar";
const sidebars: SidebarsConfig = { const sidebars: SidebarsConfig = {
docs: { docs: {
Frigate: [ Frigate: [
'frigate/index', "frigate/index",
'frigate/hardware', "frigate/hardware",
'frigate/planning_setup', "frigate/planning_setup",
'frigate/installation', "frigate/installation",
'frigate/updating', "frigate/updating",
'frigate/camera_setup', "frigate/camera_setup",
'frigate/video_pipeline', "frigate/video_pipeline",
'frigate/glossary', "frigate/glossary",
], ],
Guides: [ Guides: [
"guides/getting_started", "guides/getting_started",
@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
{ {
type: "link", type: "link",
label: "Go2RTC Configuration Reference", label: "Go2RTC Configuration Reference",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration", href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
} as PropSidebarItemLink, } as PropSidebarItemLink,
], ],
Detectors: [ Detectors: [
@ -37,10 +37,36 @@ const sidebars: SidebarsConfig = {
], ],
Enrichments: [ Enrichments: [
"configuration/semantic_search", "configuration/semantic_search",
"configuration/genai",
"configuration/face_recognition", "configuration/face_recognition",
"configuration/license_plate_recognition", "configuration/license_plate_recognition",
"configuration/bird_classification", "configuration/bird_classification",
{
type: "category",
label: "Custom Classification",
link: {
type: "generated-index",
title: "Custom Classification",
description: "Configuration for custom classification models",
},
items: [
"configuration/custom_classification/state_classification",
"configuration/custom_classification/object_classification",
],
},
{
type: "category",
label: "Generative AI",
link: {
type: "generated-index",
title: "Generative AI",
description: "Generative AI Features",
},
items: [
"configuration/genai/genai_config",
"configuration/genai/genai_review",
"configuration/genai/genai_objects",
],
},
], ],
Cameras: [ Cameras: [
"configuration/cameras", "configuration/cameras",
@ -90,14 +116,15 @@ const sidebars: SidebarsConfig = {
items: frigateHttpApiSidebar, items: frigateHttpApiSidebar,
}, },
"integrations/mqtt", "integrations/mqtt",
"integrations/homekit",
"configuration/metrics", "configuration/metrics",
"integrations/third_party_extensions", "integrations/third_party_extensions",
], ],
'Frigate+': [ "Frigate+": [
'plus/index', "plus/index",
'plus/annotating', "plus/annotating",
'plus/first_model', "plus/first_model",
'plus/faq', "plus/faq",
], ],
Troubleshooting: [ Troubleshooting: [
"troubleshooting/faqs", "troubleshooting/faqs",

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
import argparse import argparse
import faulthandler import faulthandler
import multiprocessing as mp
import signal import signal
import sys import sys
import threading import threading
@ -15,12 +16,17 @@ from frigate.util.config import find_config_file
def main() -> None: def main() -> None:
manager = mp.Manager()
faulthandler.enable() faulthandler.enable()
# Setup the logging thread # Setup the logging thread
setup_logging() setup_logging(manager)
threading.current_thread().name = "frigate" threading.current_thread().name = "frigate"
stop_event = mp.Event()
# send stop event on SIGINT
signal.signal(signal.SIGINT, lambda sig, frame: stop_event.set())
# Make sure we exit cleanly on SIGTERM. # Make sure we exit cleanly on SIGTERM.
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit()) signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
@ -93,7 +99,14 @@ def main() -> None:
print("*************************************************************") print("*************************************************************")
print("*** End Config Validation Errors ***") print("*** End Config Validation Errors ***")
print("*************************************************************") print("*************************************************************")
sys.exit(1)
# attempt to start Frigate in recovery mode
try:
config = FrigateConfig.load(install=True, safe_load=True)
print("Starting Frigate in safe mode.")
except ValidationError:
print("Unable to start Frigate in safe mode.")
sys.exit(1)
if args.validate_config: if args.validate_config:
print("*************************************************************") print("*************************************************************")
print("*** Your config file is valid. ***") print("*** Your config file is valid. ***")
@ -101,8 +114,23 @@ def main() -> None:
sys.exit(0) sys.exit(0)
# Run the main application. # Run the main application.
FrigateApp(config).start() FrigateApp(config, manager, stop_event).start()
if __name__ == "__main__": if __name__ == "__main__":
mp.set_forkserver_preload(
[
# Standard library and core dependencies
"sqlite3",
# Third-party libraries commonly used in Frigate
"numpy",
"cv2",
"peewee",
"zmq",
"ruamel.yaml",
# Frigate core modules
"frigate.camera.maintainer",
]
)
mp.set_start_method("forkserver", force=True)
main() main()

View File

@ -6,21 +6,21 @@ import json
import logging import logging
import os import os
import traceback import traceback
import urllib
from datetime import datetime, timedelta from datetime import datetime, timedelta
from functools import reduce from functools import reduce
from io import StringIO from io import StringIO
from pathlib import Path as FilePath from pathlib import Path as FilePath
from typing import Any, Optional from typing import Any, Dict, List, Optional
import aiofiles import aiofiles
import requests
import ruamel.yaml import ruamel.yaml
from fastapi import APIRouter, Body, Path, Request, Response from fastapi import APIRouter, Body, Path, Request, Response
from fastapi.encoders import jsonable_encoder from fastapi.encoders import jsonable_encoder
from fastapi.params import Depends from fastapi.params import Depends
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
from markupsafe import escape from markupsafe import escape
from peewee import SQL, operator from peewee import SQL, fn, operator
from pydantic import ValidationError from pydantic import ValidationError
from frigate.api.auth import require_role from frigate.api.auth import require_role
@ -28,21 +28,26 @@ from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryPa
from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateTopic,
)
from frigate.models import Event, Timeline from frigate.models import Event, Timeline
from frigate.stats.prometheus import get_metrics, update_metrics from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.util.builtin import ( from frigate.util.builtin import (
clean_camera_user_pass, clean_camera_user_pass,
get_tz_modifiers, flatten_config_data,
update_yaml_from_url, process_config_query_string,
update_yaml_file_bulk,
) )
from frigate.util.config import find_config_file from frigate.util.config import find_config_file
from frigate.util.services import ( from frigate.util.services import (
ffprobe_stream,
get_nvidia_driver_info, get_nvidia_driver_info,
process_logs, process_logs,
restart_frigate, restart_frigate,
vainfo_hwaccel, vainfo_hwaccel,
) )
from frigate.util.time import get_tz_modifiers
from frigate.version import VERSION from frigate.version import VERSION
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -63,43 +68,6 @@ def config_schema(request: Request):
) )
@router.get("/go2rtc/streams")
def go2rtc_streams():
r = requests.get("http://127.0.0.1:1984/api/streams")
if not r.ok:
logger.error("Failed to fetch streams from go2rtc")
return JSONResponse(
content=({"success": False, "message": "Error fetching stream data"}),
status_code=500,
)
stream_data = r.json()
for data in stream_data.values():
for producer in data.get("producers") or []:
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
return JSONResponse(content=stream_data)
@router.get("/go2rtc/streams/{camera_name}")
def go2rtc_camera_stream(request: Request, camera_name: str):
r = requests.get(
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=all&microphone"
)
if not r.ok:
camera_config = request.app.frigate_config.cameras.get(camera_name)
if camera_config and camera_config.enabled:
logger.error("Failed to fetch streams from go2rtc")
return JSONResponse(
content=({"success": False, "message": "Error fetching stream data"}),
status_code=500,
)
stream_data = r.json()
for producer in stream_data.get("producers", []):
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
return JSONResponse(content=stream_data)
@router.get("/version", response_class=PlainTextResponse) @router.get("/version", response_class=PlainTextResponse)
def version(): def version():
return VERSION return VERSION
@ -123,7 +91,14 @@ def metrics(request: Request):
"""Expose Prometheus metrics endpoint and update metrics with latest stats""" """Expose Prometheus metrics endpoint and update metrics with latest stats"""
# Retrieve the latest statistics and update the Prometheus metrics # Retrieve the latest statistics and update the Prometheus metrics
stats = request.app.stats_emitter.get_latest_stats() stats = request.app.stats_emitter.get_latest_stats()
update_metrics(stats) # query DB for count of events by camera, label
event_counts: List[Dict[str, Any]] = (
Event.select(Event.camera, Event.label, fn.Count())
.group_by(Event.camera, Event.label)
.dicts()
)
update_metrics(stats=stats, event_counts=event_counts)
content, content_type = get_metrics() content, content_type = get_metrics()
return Response(content=content, media_type=content_type) return Response(content=content, media_type=content_type)
@ -354,14 +329,37 @@ def config_set(request: Request, body: AppConfigSetBody):
with open(config_file, "r") as f: with open(config_file, "r") as f:
old_raw_config = f.read() old_raw_config = f.read()
f.close()
try: try:
update_yaml_from_url(config_file, str(request.url)) updates = {}
# process query string parameters (takes precedence over body.config_data)
parsed_url = urllib.parse.urlparse(str(request.url))
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
# Filter out empty keys but keep blank values for non-empty keys
query_string = {k: v for k, v in query_string.items() if k}
if query_string:
updates = process_config_query_string(query_string)
elif body.config_data:
updates = flatten_config_data(body.config_data)
if not updates:
return JSONResponse(
content=(
{"success": False, "message": "No configuration data provided"}
),
status_code=400,
)
# apply all updates in a single operation
update_yaml_file_bulk(config_file, updates)
# validate the updated config
with open(config_file, "r") as f: with open(config_file, "r") as f:
new_raw_config = f.read() new_raw_config = f.read()
f.close()
# Validate the config schema
try: try:
config = FrigateConfig.parse(new_raw_config) config = FrigateConfig.parse(new_raw_config)
except Exception: except Exception:
@ -385,8 +383,34 @@ def config_set(request: Request, body: AppConfigSetBody):
status_code=500, status_code=500,
) )
if body.requires_restart == 0: if body.requires_restart == 0 or body.update_topic:
old_config: FrigateConfig = request.app.frigate_config
request.app.frigate_config = config request.app.frigate_config = config
if body.update_topic:
if body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
settings = config.get_nested_object(body.update_topic)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
settings,
)
else:
# Generic handling for global config updates
settings = config.get_nested_object(body.update_topic)
# Publish None for removal, actual config for add/update
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
return JSONResponse( return JSONResponse(
content=( content=(
{ {
@ -398,66 +422,6 @@ def config_set(request: Request, body: AppConfigSetBody):
) )
@router.get("/ffprobe")
def ffprobe(request: Request, paths: str = ""):
path_param = paths
if not path_param:
return JSONResponse(
content=({"success": False, "message": "Path needs to be provided."}),
status_code=404,
)
if path_param.startswith("camera"):
camera = path_param[7:]
if camera not in request.app.frigate_config.cameras.keys():
return JSONResponse(
content=(
{"success": False, "message": f"{camera} is not a valid camera."}
),
status_code=404,
)
if not request.app.frigate_config.cameras[camera].enabled:
return JSONResponse(
content=({"success": False, "message": f"{camera} is not enabled."}),
status_code=404,
)
paths = map(
lambda input: input.path,
request.app.frigate_config.cameras[camera].ffmpeg.inputs,
)
elif "," in clean_camera_user_pass(path_param):
paths = path_param.split(",")
else:
paths = [path_param]
# user has multiple streams
output = []
for path in paths:
ffprobe = ffprobe_stream(request.app.frigate_config.ffmpeg, path.strip())
output.append(
{
"return_code": ffprobe.returncode,
"stderr": (
ffprobe.stderr.decode("unicode_escape").strip()
if ffprobe.returncode != 0
else ""
),
"stdout": (
json.loads(ffprobe.stdout.decode("unicode_escape").strip())
if ffprobe.returncode == 0
else ""
),
}
)
return JSONResponse(content=output)
@router.get("/vainfo") @router.get("/vainfo")
def vainfo(): def vainfo():
vainfo = vainfo_hwaccel() vainfo = vainfo_hwaccel()
@ -733,7 +697,11 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
clauses.append((Timeline.camera == camera)) clauses.append((Timeline.camera == camera))
if source_id: if source_id:
clauses.append((Timeline.source_id == source_id)) source_ids = [sid.strip() for sid in source_id.split(",")]
if len(source_ids) == 1:
clauses.append((Timeline.source_id == source_ids[0]))
else:
clauses.append((Timeline.source_id.in_(source_ids)))
if len(clauses) == 0: if len(clauses) == 0:
clauses.append((True)) clauses.append((True))

View File

@ -11,7 +11,7 @@ import secrets
import time import time
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from typing import List from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Request, Response from fastapi import APIRouter, Depends, HTTPException, Request, Response
from fastapi.responses import JSONResponse, RedirectResponse from fastapi.responses import JSONResponse, RedirectResponse
@ -33,7 +33,23 @@ from frigate.models import User
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.auth]) router = APIRouter(tags=[Tags.auth])
VALID_ROLES = ["admin", "viewer"]
@router.get("/auth/first_time_login")
def first_time_login(request: Request):
"""Return whether the admin first-time login help flag is set in config.
This endpoint is intentionally unauthenticated so the login page can
query it before a user is authenticated.
"""
auth_config = request.app.frigate_config.auth
return JSONResponse(
content={
"admin_first_time_login": auth_config.admin_first_time_login
or auth_config.reset_admin_password
}
)
class RateLimiter: class RateLimiter:
@ -204,6 +220,7 @@ async def get_current_user(request: Request):
def require_role(required_roles: List[str]): def require_role(required_roles: List[str]):
async def role_checker(request: Request): async def role_checker(request: Request):
proxy_config: ProxyConfig = request.app.frigate_config.proxy proxy_config: ProxyConfig = request.app.frigate_config.proxy
config_roles = list(request.app.frigate_config.auth.roles.keys())
# Get role from header (could be comma-separated) # Get role from header (could be comma-separated)
role_header = request.headers.get("remote-role") role_header = request.headers.get("remote-role")
@ -217,19 +234,123 @@ def require_role(required_roles: List[str]):
if not roles: if not roles:
raise HTTPException(status_code=403, detail="Role not provided") raise HTTPException(status_code=403, detail="Role not provided")
# Check if any role matches required_roles # enforce config roles
if not any(role in required_roles for role in roles): valid_roles = [r for r in roles if r in config_roles]
if not valid_roles:
raise HTTPException( raise HTTPException(
status_code=403, status_code=403,
detail=f"Role {', '.join(roles)} not authorized. Required: {', '.join(required_roles)}", detail=f"No valid roles found in {roles}. Required: {', '.join(required_roles)}. Available: {', '.join(config_roles)}",
) )
# Return the first matching role if not any(role in required_roles for role in valid_roles):
return next((role for role in roles if role in required_roles), roles[0]) raise HTTPException(
status_code=403,
detail=f"Role {', '.join(valid_roles)} not authorized. Required: {', '.join(required_roles)}",
)
return next(
(role for role in valid_roles if role in required_roles), valid_roles[0]
)
return role_checker return role_checker
def resolve_role(
headers: dict, proxy_config: ProxyConfig, config_roles: set[str]
) -> str:
"""
Determine the effective role for a request based on proxy headers and configuration.
Order of resolution:
1. If a role header is defined in proxy_config.header_map.role:
- If a role_map is configured, treat the header as group claims
(split by proxy_config.separator) and map to roles.
- If no role_map is configured, treat the header as role names directly.
2. If no valid role is found, return proxy_config.default_role if it's valid in config_roles, else 'viewer'.
Args:
headers (dict): Incoming request headers (case-insensitive).
proxy_config (ProxyConfig): Proxy configuration.
config_roles (set[str]): Set of valid roles from config.
Returns:
str: Resolved role (one of config_roles or validated default).
"""
default_role = proxy_config.default_role
role_header = proxy_config.header_map.role
# Validate default_role against config; fallback to 'viewer' if invalid
validated_default = default_role if default_role in config_roles else "viewer"
if not config_roles:
validated_default = "viewer" # Edge case: no roles defined
if not role_header:
logger.debug(
"No role header configured in proxy_config.header_map. Returning validated default role '%s'.",
validated_default,
)
return validated_default
raw_value = headers.get(role_header, "")
logger.debug("Raw role header value from '%s': %r", role_header, raw_value)
if not raw_value:
logger.debug(
"Role header missing or empty. Returning validated default role '%s'.",
validated_default,
)
return validated_default
# role_map configured, treat header as group claims
if proxy_config.header_map.role_map:
groups = [
g.strip() for g in raw_value.split(proxy_config.separator) if g.strip()
]
logger.debug("Parsed groups from role header: %s", groups)
matched_roles = {
role_name
for role_name, required_groups in proxy_config.header_map.role_map.items()
if any(group in groups for group in required_groups)
}
logger.debug("Matched roles from role_map: %s", matched_roles)
if matched_roles:
resolved = next(
(r for r in config_roles if r in matched_roles), validated_default
)
logger.debug("Resolved role (with role_map) to '%s'.", resolved)
return resolved
logger.debug(
"No role_map match for groups '%s'. Using validated default role '%s'.",
raw_value,
validated_default,
)
return validated_default
# no role_map, treat as role names directly
roles_from_header = [
r.strip().lower() for r in raw_value.split(proxy_config.separator) if r.strip()
]
logger.debug("Parsed roles directly from header: %s", roles_from_header)
resolved = next(
(r for r in config_roles if r in roles_from_header),
validated_default,
)
if resolved == validated_default and roles_from_header:
logger.debug(
"Provided proxy role header values '%s' did not contain a valid role. Using validated default role '%s'.",
raw_value,
validated_default,
)
else:
logger.debug("Resolved role (direct header) to '%s'.", resolved)
return resolved
# Endpoints # Endpoints
@router.get("/auth") @router.get("/auth")
def auth(request: Request): def auth(request: Request):
@ -266,22 +387,11 @@ def auth(request: Request):
else "anonymous" else "anonymous"
) )
role_header = proxy_config.header_map.role # parse header and resolve a valid role
role = ( config_roles_set = set(auth_config.roles.keys())
request.headers.get(role_header, default=proxy_config.default_role) role = resolve_role(request.headers, proxy_config, config_roles_set)
if role_header
else proxy_config.default_role
)
# if comma-separated with "admin", use "admin",
# if comma-separated with "viewer", use "viewer",
# else use default role
roles = [r.strip() for r in role.split(proxy_config.separator)] if role else []
success_response.headers["remote-role"] = next(
(r for r in VALID_ROLES if r in roles), proxy_config.default_role
)
success_response.headers["remote-role"] = role
return success_response return success_response
# now apply authentication # now apply authentication
@ -373,7 +483,13 @@ def profile(request: Request):
username = request.headers.get("remote-user", "anonymous") username = request.headers.get("remote-user", "anonymous")
role = request.headers.get("remote-role", "viewer") role = request.headers.get("remote-role", "viewer")
return JSONResponse(content={"username": username, "role": role}) all_camera_names = set(request.app.frigate_config.cameras.keys())
roles_dict = request.app.frigate_config.auth.roles
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
return JSONResponse(
content={"username": username, "role": role, "allowed_cameras": allowed_cameras}
)
@router.get("/logout") @router.get("/logout")
@ -404,14 +520,23 @@ def login(request: Request, body: AppPostLoginBody):
password_hash = db_user.password_hash password_hash = db_user.password_hash
if verify_password(password, password_hash): if verify_password(password, password_hash):
role = getattr(db_user, "role", "viewer") role = getattr(db_user, "role", "viewer")
if role not in VALID_ROLES: config_roles_set = set(request.app.frigate_config.auth.roles.keys())
role = "viewer" # Enforce valid roles if role not in config_roles_set:
logger.warning(
f"User {db_user.username} has an invalid role {role}, falling back to 'viewer'."
)
role = "viewer"
expiration = int(time.time()) + JWT_SESSION_LENGTH expiration = int(time.time()) + JWT_SESSION_LENGTH
encoded_jwt = create_encoded_jwt(user, role, expiration, request.app.jwt_token) encoded_jwt = create_encoded_jwt(user, role, expiration, request.app.jwt_token)
response = Response("", 200) response = Response("", 200)
set_jwt_cookie( set_jwt_cookie(
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
) )
# Clear admin_first_time_login flag after successful admin login so the
# UI stops showing the first-time login documentation link.
if role == "admin":
request.app.frigate_config.auth.admin_first_time_login = False
return response return response
return JSONResponse(content={"message": "Login failed"}, status_code=401) return JSONResponse(content={"message": "Login failed"}, status_code=401)
@ -430,11 +555,17 @@ def create_user(
body: AppPostUsersBody, body: AppPostUsersBody,
): ):
HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations
config_roles = list(request.app.frigate_config.auth.roles.keys())
if not re.match("^[A-Za-z0-9._]+$", body.username): if not re.match("^[A-Za-z0-9._]+$", body.username):
return JSONResponse(content={"message": "Invalid username"}, status_code=400) return JSONResponse(content={"message": "Invalid username"}, status_code=400)
role = body.role if body.role in VALID_ROLES else "viewer" if body.role not in config_roles:
return JSONResponse(
content={"message": f"Role must be one of: {', '.join(config_roles)}"},
status_code=400,
)
role = body.role or "viewer"
password_hash = hash_password(body.password, iterations=HASH_ITERATIONS) password_hash = hash_password(body.password, iterations=HASH_ITERATIONS)
User.insert( User.insert(
{ {
@ -505,10 +636,52 @@ async def update_role(
return JSONResponse( return JSONResponse(
content={"message": "Cannot modify admin user's role"}, status_code=403 content={"message": "Cannot modify admin user's role"}, status_code=403
) )
if body.role not in VALID_ROLES: config_roles = list(request.app.frigate_config.auth.roles.keys())
if body.role not in config_roles:
return JSONResponse( return JSONResponse(
content={"message": "Role must be 'admin' or 'viewer'"}, status_code=400 content={"message": f"Role must be one of: {', '.join(config_roles)}"},
status_code=400,
) )
User.set_by_id(username, {User.role: body.role}) User.set_by_id(username, {User.role: body.role})
return JSONResponse(content={"success": True}) return JSONResponse(content={"success": True})
async def require_camera_access(
camera_name: Optional[str] = None,
request: Request = None,
):
"""Dependency to enforce camera access based on user role."""
if camera_name is None:
return # For lists, filter later
current_user = await get_current_user(request)
if isinstance(current_user, JSONResponse):
return current_user
role = current_user["role"]
all_camera_names = set(request.app.frigate_config.cameras.keys())
roles_dict = request.app.frigate_config.auth.roles
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
# Admin or full access bypasses
if role == "admin" or not roles_dict.get(role):
return
if camera_name not in allowed_cameras:
raise HTTPException(
status_code=403,
detail=f"Access denied to camera '{camera_name}'. Allowed: {allowed_cameras}",
)
async def get_allowed_cameras_for_filter(request: Request):
"""Dependency to get allowed_cameras for filtering lists."""
current_user = await get_current_user(request)
if isinstance(current_user, JSONResponse):
return [] # Unauthorized: no cameras
role = current_user["role"]
all_camera_names = set(request.app.frigate_config.cameras.keys())
roles_dict = request.app.frigate_config.auth.roles
return User.get_allowed_cameras(role, roles_dict, all_camera_names)

994
frigate/api/camera.py Normal file
View File

@ -0,0 +1,994 @@
"""Camera apis."""
import json
import logging
import re
from importlib.util import find_spec
from pathlib import Path
from urllib.parse import quote_plus
import httpx
import requests
from fastapi import APIRouter, Depends, Query, Request, Response
from fastapi.responses import JSONResponse
from onvif import ONVIFCamera, ONVIFError
from zeep.exceptions import Fault, TransportError
from zeep.transports import AsyncTransport
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags
from frigate.config.config import FrigateConfig
from frigate.util.builtin import clean_camera_user_pass
from frigate.util.image import run_ffmpeg_snapshot
from frigate.util.services import ffprobe_stream
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.camera])
def _is_valid_host(host: str) -> bool:
"""
Validate that the host is in a valid format.
Allows private IPs since cameras are typically on local networks.
Only blocks obviously malicious input to prevent injection attacks.
"""
try:
# Remove port if present
host_without_port = host.split(":")[0] if ":" in host else host
# Block whitespace, newlines, and control characters
if not host_without_port or re.search(r"[\s\x00-\x1f]", host_without_port):
return False
# Allow standard hostname/IP characters: alphanumeric, dots, hyphens
if not re.match(r"^[a-zA-Z0-9.-]+$", host_without_port):
return False
return True
except Exception:
return False
@router.get("/go2rtc/streams")
def go2rtc_streams():
r = requests.get("http://127.0.0.1:1984/api/streams")
if not r.ok:
logger.error("Failed to fetch streams from go2rtc")
return JSONResponse(
content=({"success": False, "message": "Error fetching stream data"}),
status_code=500,
)
stream_data = r.json()
for data in stream_data.values():
for producer in data.get("producers") or []:
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
return JSONResponse(content=stream_data)
@router.get("/go2rtc/streams/{camera_name}")
def go2rtc_camera_stream(request: Request, camera_name: str):
r = requests.get(
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=all&microphone"
)
if not r.ok:
camera_config = request.app.frigate_config.cameras.get(camera_name)
if camera_config and camera_config.enabled:
logger.error("Failed to fetch streams from go2rtc")
return JSONResponse(
content=({"success": False, "message": "Error fetching stream data"}),
status_code=500,
)
stream_data = r.json()
for producer in stream_data.get("producers", []):
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
return JSONResponse(content=stream_data)
@router.put(
"/go2rtc/streams/{stream_name}", dependencies=[Depends(require_role(["admin"]))]
)
def go2rtc_add_stream(request: Request, stream_name: str, src: str = ""):
"""Add or update a go2rtc stream configuration."""
try:
params = {"name": stream_name}
if src:
params["src"] = src
r = requests.put(
"http://127.0.0.1:1984/api/streams",
params=params,
timeout=10,
)
if not r.ok:
logger.error(f"Failed to add go2rtc stream {stream_name}: {r.text}")
return JSONResponse(
content=(
{"success": False, "message": f"Failed to add stream: {r.text}"}
),
status_code=r.status_code,
)
return JSONResponse(
content={"success": True, "message": "Stream added successfully"}
)
except requests.RequestException as e:
logger.error(f"Error communicating with go2rtc: {e}")
return JSONResponse(
content=(
{
"success": False,
"message": "Error communicating with go2rtc",
}
),
status_code=500,
)
@router.delete(
"/go2rtc/streams/{stream_name}", dependencies=[Depends(require_role(["admin"]))]
)
def go2rtc_delete_stream(stream_name: str):
"""Delete a go2rtc stream."""
try:
r = requests.delete(
"http://127.0.0.1:1984/api/streams",
params={"src": stream_name},
timeout=10,
)
if not r.ok:
logger.error(f"Failed to delete go2rtc stream {stream_name}: {r.text}")
return JSONResponse(
content=(
{"success": False, "message": f"Failed to delete stream: {r.text}"}
),
status_code=r.status_code,
)
return JSONResponse(
content={"success": True, "message": "Stream deleted successfully"}
)
except requests.RequestException as e:
logger.error(f"Error communicating with go2rtc: {e}")
return JSONResponse(
content=(
{
"success": False,
"message": "Error communicating with go2rtc",
}
),
status_code=500,
)
@router.get("/ffprobe")
def ffprobe(request: Request, paths: str = "", detailed: bool = False):
path_param = paths
if not path_param:
return JSONResponse(
content=({"success": False, "message": "Path needs to be provided."}),
status_code=404,
)
if path_param.startswith("camera"):
camera = path_param[7:]
if camera not in request.app.frigate_config.cameras.keys():
return JSONResponse(
content=(
{"success": False, "message": f"{camera} is not a valid camera."}
),
status_code=404,
)
if not request.app.frigate_config.cameras[camera].enabled:
return JSONResponse(
content=({"success": False, "message": f"{camera} is not enabled."}),
status_code=404,
)
paths = map(
lambda input: input.path,
request.app.frigate_config.cameras[camera].ffmpeg.inputs,
)
elif "," in clean_camera_user_pass(path_param):
paths = path_param.split(",")
else:
paths = [path_param]
# user has multiple streams
output = []
for path in paths:
ffprobe = ffprobe_stream(
request.app.frigate_config.ffmpeg, path.strip(), detailed=detailed
)
if ffprobe.returncode != 0:
try:
stderr_decoded = ffprobe.stderr.decode("utf-8")
except UnicodeDecodeError:
try:
stderr_decoded = ffprobe.stderr.decode("unicode_escape")
except Exception:
stderr_decoded = str(ffprobe.stderr)
stderr_lines = [
line.strip() for line in stderr_decoded.split("\n") if line.strip()
]
result = {
"return_code": ffprobe.returncode,
"stderr": stderr_lines,
"stdout": "",
}
else:
result = {
"return_code": ffprobe.returncode,
"stderr": [],
"stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip()),
}
# Add detailed metadata if requested and probe was successful
if detailed and ffprobe.returncode == 0 and result["stdout"]:
try:
probe_data = result["stdout"]
metadata = {}
# Extract video stream information
video_stream = None
audio_stream = None
for stream in probe_data.get("streams", []):
if stream.get("codec_type") == "video":
video_stream = stream
elif stream.get("codec_type") == "audio":
audio_stream = stream
# Video metadata
if video_stream:
metadata["video"] = {
"codec": video_stream.get("codec_name"),
"width": video_stream.get("width"),
"height": video_stream.get("height"),
"fps": _extract_fps(video_stream.get("avg_frame_rate")),
"pixel_format": video_stream.get("pix_fmt"),
"profile": video_stream.get("profile"),
"level": video_stream.get("level"),
}
# Calculate resolution string
if video_stream.get("width") and video_stream.get("height"):
metadata["video"]["resolution"] = (
f"{video_stream['width']}x{video_stream['height']}"
)
# Audio metadata
if audio_stream:
metadata["audio"] = {
"codec": audio_stream.get("codec_name"),
"channels": audio_stream.get("channels"),
"sample_rate": audio_stream.get("sample_rate"),
"channel_layout": audio_stream.get("channel_layout"),
}
# Container/format metadata
if probe_data.get("format"):
format_info = probe_data["format"]
metadata["container"] = {
"format": format_info.get("format_name"),
"duration": format_info.get("duration"),
"size": format_info.get("size"),
}
result["metadata"] = metadata
except Exception as e:
logger.warning(f"Failed to extract detailed metadata: {e}")
# Continue without metadata if parsing fails
output.append(result)
return JSONResponse(content=output)
@router.get("/ffprobe/snapshot", dependencies=[Depends(require_role(["admin"]))])
def ffprobe_snapshot(request: Request, url: str = "", timeout: int = 10):
"""Get a snapshot from a stream URL using ffmpeg."""
if not url:
return JSONResponse(
content={"success": False, "message": "URL parameter is required"},
status_code=400,
)
config: FrigateConfig = request.app.frigate_config
image_data, error = run_ffmpeg_snapshot(
config.ffmpeg, url, "mjpeg", timeout=timeout
)
if image_data:
return Response(
image_data,
media_type="image/jpeg",
headers={"Cache-Control": "no-store"},
)
elif error == "timeout":
return JSONResponse(
content={"success": False, "message": "Timeout capturing snapshot"},
status_code=408,
)
else:
logger.error(f"ffmpeg failed: {error}")
return JSONResponse(
content={"success": False, "message": "Failed to capture snapshot"},
status_code=500,
)
@router.get("/reolink/detect", dependencies=[Depends(require_role(["admin"]))])
def reolink_detect(host: str = "", username: str = "", password: str = ""):
"""
Detect Reolink camera capabilities and recommend optimal protocol.
Queries the Reolink camera API to determine the camera's resolution
and recommends either http-flv (for 5MP and below) or rtsp (for higher resolutions).
"""
if not host:
return JSONResponse(
content={"success": False, "message": "Host parameter is required"},
status_code=400,
)
if not username:
return JSONResponse(
content={"success": False, "message": "Username parameter is required"},
status_code=400,
)
if not password:
return JSONResponse(
content={"success": False, "message": "Password parameter is required"},
status_code=400,
)
# Validate host format to prevent injection attacks
if not _is_valid_host(host):
return JSONResponse(
content={"success": False, "message": "Invalid host format"},
status_code=400,
)
try:
# URL-encode credentials to prevent injection
encoded_user = quote_plus(username)
encoded_password = quote_plus(password)
api_url = f"http://{host}/api.cgi?cmd=GetEnc&user={encoded_user}&password={encoded_password}"
response = requests.get(api_url, timeout=5)
if not response.ok:
return JSONResponse(
content={
"success": False,
"protocol": None,
"message": f"Failed to connect to camera API: HTTP {response.status_code}",
},
status_code=200,
)
data = response.json()
enc_data = data[0] if isinstance(data, list) and len(data) > 0 else data
stream_info = None
if isinstance(enc_data, dict):
if enc_data.get("value", {}).get("Enc"):
stream_info = enc_data["value"]["Enc"]
elif enc_data.get("Enc"):
stream_info = enc_data["Enc"]
if not stream_info or not stream_info.get("mainStream"):
return JSONResponse(
content={
"success": False,
"protocol": None,
"message": "Could not find stream information in API response",
}
)
main_stream = stream_info["mainStream"]
width = main_stream.get("width", 0)
height = main_stream.get("height", 0)
if not width or not height:
return JSONResponse(
content={
"success": False,
"protocol": None,
"message": "Could not determine camera resolution",
}
)
megapixels = (width * height) / 1_000_000
protocol = "http-flv" if megapixels <= 5.0 else "rtsp"
return JSONResponse(
content={
"success": True,
"protocol": protocol,
"resolution": f"{width}x{height}",
"megapixels": round(megapixels, 2),
}
)
except requests.exceptions.Timeout:
return JSONResponse(
content={
"success": False,
"protocol": None,
"message": "Connection timeout - camera did not respond",
}
)
except requests.exceptions.RequestException:
return JSONResponse(
content={
"success": False,
"protocol": None,
"message": "Failed to connect to camera",
}
)
except Exception:
logger.exception(f"Error detecting Reolink camera at {host}")
return JSONResponse(
content={
"success": False,
"protocol": None,
"message": "Unable to detect camera capabilities",
}
)
def _extract_fps(r_frame_rate: str) -> float | None:
"""Extract FPS from ffprobe avg_frame_rate / r_frame_rate string (e.g., '30/1' -> 30.0)"""
if not r_frame_rate:
return None
try:
num, den = r_frame_rate.split("/")
return round(float(num) / float(den), 2)
except (ValueError, ZeroDivisionError):
return None
@router.get(
"/onvif/probe",
dependencies=[Depends(require_role(["admin"]))],
summary="Probe ONVIF device",
description=(
"Probe an ONVIF device to determine capabilities and optionally test available stream URIs. "
"Query params: host (required), port (default 80), username, password, test (boolean), "
"auth_type (basic or digest, default basic)."
),
)
async def onvif_probe(
request: Request,
host: str = Query(None),
port: int = Query(80),
username: str = Query(""),
password: str = Query(""),
test: bool = Query(False),
auth_type: str = Query("basic"), # Add auth_type parameter
):
"""
Probe a single ONVIF device to determine capabilities.
Connects to an ONVIF device and queries for:
- Device information (manufacturer, model)
- Media profiles count
- PTZ support
- Available presets
- Autotracking support
Query Parameters:
host: Device host/IP address (required)
port: Device port (default 80)
username: ONVIF username (optional)
password: ONVIF password (optional)
test: run ffprobe on the stream (optional)
auth_type: Authentication type - "basic" or "digest" (default "basic")
Returns:
JSON with device capabilities information
"""
if not host:
return JSONResponse(
content={"success": False, "message": "host parameter is required"},
status_code=400,
)
# Validate host format
if not _is_valid_host(host):
return JSONResponse(
content={"success": False, "message": "Invalid host format"},
status_code=400,
)
# Validate auth_type
if auth_type not in ["basic", "digest"]:
return JSONResponse(
content={
"success": False,
"message": "auth_type must be 'basic' or 'digest'",
},
status_code=400,
)
onvif_camera = None
try:
logger.debug(f"Probing ONVIF device at {host}:{port} with {auth_type} auth")
try:
wsdl_base = None
spec = find_spec("onvif")
if spec and getattr(spec, "origin", None):
wsdl_base = str(Path(spec.origin).parent / "wsdl")
except Exception:
wsdl_base = None
onvif_camera = ONVIFCamera(
host, port, username or "", password or "", wsdl_dir=wsdl_base
)
# Configure digest authentication if requested
if auth_type == "digest" and username and password:
# Create httpx client with digest auth
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
# Replace the transport in the zeep client
transport = AsyncTransport(client=client)
# Update the xaddr before setting transport
await onvif_camera.update_xaddrs()
# Replace transport in all services
if hasattr(onvif_camera, "devicemgmt"):
onvif_camera.devicemgmt.zeep_client.transport = transport
if hasattr(onvif_camera, "media"):
onvif_camera.media.zeep_client.transport = transport
if hasattr(onvif_camera, "ptz"):
onvif_camera.ptz.zeep_client.transport = transport
logger.debug("Configured digest authentication")
else:
await onvif_camera.update_xaddrs()
# Get device information
device_info = {
"manufacturer": "Unknown",
"model": "Unknown",
"firmware_version": "Unknown",
}
try:
device_service = await onvif_camera.create_devicemgmt_service()
# Update transport for device service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
device_service.zeep_client.transport = transport
device_info_resp = await device_service.GetDeviceInformation()
manufacturer = getattr(device_info_resp, "Manufacturer", None) or (
device_info_resp.get("Manufacturer")
if isinstance(device_info_resp, dict)
else None
)
model = getattr(device_info_resp, "Model", None) or (
device_info_resp.get("Model")
if isinstance(device_info_resp, dict)
else None
)
firmware = getattr(device_info_resp, "FirmwareVersion", None) or (
device_info_resp.get("FirmwareVersion")
if isinstance(device_info_resp, dict)
else None
)
device_info.update(
{
"manufacturer": manufacturer or "Unknown",
"model": model or "Unknown",
"firmware_version": firmware or "Unknown",
}
)
except Exception as e:
logger.debug(f"Failed to get device info: {e}")
# Get media profiles
profiles = []
profiles_count = 0
first_profile_token = None
ptz_config_token = None
try:
media_service = await onvif_camera.create_media_service()
# Update transport for media service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
media_service.zeep_client.transport = transport
profiles = await media_service.GetProfiles()
profiles_count = len(profiles) if profiles else 0
if profiles and len(profiles) > 0:
p = profiles[0]
first_profile_token = getattr(p, "token", None) or (
p.get("token") if isinstance(p, dict) else None
)
# Get PTZ configuration token from the profile
ptz_configuration = getattr(p, "PTZConfiguration", None) or (
p.get("PTZConfiguration") if isinstance(p, dict) else None
)
if ptz_configuration:
ptz_config_token = getattr(ptz_configuration, "token", None) or (
ptz_configuration.get("token")
if isinstance(ptz_configuration, dict)
else None
)
except Exception as e:
logger.debug(f"Failed to get media profiles: {e}")
# Check PTZ support and capabilities
ptz_supported = False
presets_count = 0
autotrack_supported = False
try:
ptz_service = await onvif_camera.create_ptz_service()
# Update transport for PTZ service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
ptz_service.zeep_client.transport = transport
# Check if PTZ service is available
try:
await ptz_service.GetServiceCapabilities()
ptz_supported = True
logger.debug("PTZ service is available")
except Exception as e:
logger.debug(f"PTZ service not available: {e}")
ptz_supported = False
# Try to get presets if PTZ is supported and we have a profile
if ptz_supported and first_profile_token:
try:
presets_resp = await ptz_service.GetPresets(
{"ProfileToken": first_profile_token}
)
presets_count = len(presets_resp) if presets_resp else 0
logger.debug(f"Found {presets_count} presets")
except Exception as e:
logger.debug(f"Failed to get presets: {e}")
presets_count = 0
# Check for autotracking support - requires both FOV relative movement and MoveStatus
if ptz_supported and first_profile_token and ptz_config_token:
# First check for FOV relative movement support
pt_r_fov_supported = False
try:
config_request = ptz_service.create_type("GetConfigurationOptions")
config_request.ConfigurationToken = ptz_config_token
ptz_config = await ptz_service.GetConfigurationOptions(
config_request
)
if ptz_config:
# Check for pt-r-fov support
spaces = getattr(ptz_config, "Spaces", None) or (
ptz_config.get("Spaces")
if isinstance(ptz_config, dict)
else None
)
if spaces:
rel_pan_tilt_space = getattr(
spaces, "RelativePanTiltTranslationSpace", None
) or (
spaces.get("RelativePanTiltTranslationSpace")
if isinstance(spaces, dict)
else None
)
if rel_pan_tilt_space:
# Look for FOV space
for i, space in enumerate(rel_pan_tilt_space):
uri = None
if isinstance(space, dict):
uri = space.get("URI")
else:
uri = getattr(space, "URI", None)
if uri and "TranslationSpaceFov" in uri:
pt_r_fov_supported = True
logger.debug(
"FOV relative movement (pt-r-fov) supported"
)
break
logger.debug(f"PTZ config spaces: {ptz_config}")
except Exception as e:
logger.debug(f"Failed to check FOV relative movement: {e}")
pt_r_fov_supported = False
# Now check for MoveStatus support via GetServiceCapabilities
if pt_r_fov_supported:
try:
service_capabilities_request = ptz_service.create_type(
"GetServiceCapabilities"
)
service_capabilities = await ptz_service.GetServiceCapabilities(
service_capabilities_request
)
# Look for MoveStatus in the capabilities
move_status_capable = False
if service_capabilities:
# Try to find MoveStatus key recursively
def find_move_status(obj, key="MoveStatus"):
if isinstance(obj, dict):
if key in obj:
return obj[key]
for v in obj.values():
result = find_move_status(v, key)
if result is not None:
return result
elif hasattr(obj, key):
return getattr(obj, key)
elif hasattr(obj, "__dict__"):
for v in vars(obj).values():
result = find_move_status(v, key)
if result is not None:
return result
return None
move_status_value = find_move_status(service_capabilities)
# MoveStatus should return "true" if supported
if isinstance(move_status_value, bool):
move_status_capable = move_status_value
elif isinstance(move_status_value, str):
move_status_capable = (
move_status_value.lower() == "true"
)
logger.debug(f"MoveStatus capability: {move_status_value}")
# Autotracking is supported if both conditions are met
autotrack_supported = pt_r_fov_supported and move_status_capable
if autotrack_supported:
logger.debug(
"Autotracking fully supported (pt-r-fov + MoveStatus)"
)
else:
logger.debug(
f"Autotracking not fully supported - pt-r-fov: {pt_r_fov_supported}, MoveStatus: {move_status_capable}"
)
except Exception as e:
logger.debug(f"Failed to check MoveStatus support: {e}")
autotrack_supported = False
except Exception as e:
logger.debug(f"Failed to probe PTZ service: {e}")
result = {
"success": True,
"host": host,
"port": port,
"manufacturer": device_info["manufacturer"],
"model": device_info["model"],
"firmware_version": device_info["firmware_version"],
"profiles_count": profiles_count,
"ptz_supported": ptz_supported,
"presets_count": presets_count,
"autotrack_supported": autotrack_supported,
}
# Gather RTSP candidates
rtsp_candidates: list[dict] = []
try:
media_service = await onvif_camera.create_media_service()
# Update transport for media service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
media_service.zeep_client.transport = transport
if profiles_count and media_service:
for p in profiles or []:
token = getattr(p, "token", None) or (
p.get("token") if isinstance(p, dict) else None
)
if not token:
continue
try:
stream_setup = {
"Stream": "RTP-Unicast",
"Transport": {"Protocol": "RTSP"},
}
stream_req = {
"ProfileToken": token,
"StreamSetup": stream_setup,
}
stream_uri_resp = await media_service.GetStreamUri(stream_req)
uri = (
stream_uri_resp.get("Uri")
if isinstance(stream_uri_resp, dict)
else getattr(stream_uri_resp, "Uri", None)
)
if uri:
logger.debug(
f"GetStreamUri returned for token {token}: {uri}"
)
# If credentials were provided, do NOT add the unauthenticated URI.
try:
if isinstance(uri, str) and uri.startswith("rtsp://"):
if username and password and "@" not in uri:
# Inject URL-encoded credentials and add only the
# authenticated version.
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
injected = uri.replace(
"rtsp://", f"rtsp://{cred}", 1
)
rtsp_candidates.append(
{
"source": "GetStreamUri",
"profile_token": token,
"uri": injected,
}
)
else:
# No credentials provided or URI already contains
# credentials — add the URI as returned.
rtsp_candidates.append(
{
"source": "GetStreamUri",
"profile_token": token,
"uri": uri,
}
)
else:
# Non-RTSP URIs (e.g., http-flv) — add as returned.
rtsp_candidates.append(
{
"source": "GetStreamUri",
"profile_token": token,
"uri": uri,
}
)
except Exception as e:
logger.debug(
f"Skipping stream URI for token {token} due to processing error: {e}"
)
continue
except Exception:
logger.debug(
f"GetStreamUri failed for token {token}", exc_info=True
)
continue
# Add common RTSP patterns as fallback
if not rtsp_candidates:
common_paths = [
"/h264",
"/live.sdp",
"/media.amp",
"/Streaming/Channels/101",
"/Streaming/Channels/1",
"/stream1",
"/cam/realmonitor?channel=1&subtype=0",
"/11",
]
# Use URL-encoded credentials for pattern fallback URIs when provided
auth_str = (
f"{quote_plus(username)}:{quote_plus(password)}@"
if username and password
else ""
)
rtsp_port = 554
for path in common_paths:
uri = f"rtsp://{auth_str}{host}:{rtsp_port}{path}"
rtsp_candidates.append({"source": "pattern", "uri": uri})
except Exception:
logger.debug("Failed to collect RTSP candidates")
# Optionally test RTSP candidates using ffprobe_stream
tested_candidates = []
if test and rtsp_candidates:
for c in rtsp_candidates:
uri = c["uri"]
to_test = [uri]
try:
if (
username
and password
and isinstance(uri, str)
and uri.startswith("rtsp://")
and "@" not in uri
):
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
cred_uri = uri.replace("rtsp://", f"rtsp://{cred}", 1)
if cred_uri not in to_test:
to_test.append(cred_uri)
except Exception:
pass
for test_uri in to_test:
try:
probe = ffprobe_stream(
request.app.frigate_config.ffmpeg, test_uri, detailed=False
)
print(probe)
ok = probe is not None and getattr(probe, "returncode", 1) == 0
tested_candidates.append(
{
"uri": test_uri,
"source": c.get("source"),
"ok": ok,
"profile_token": c.get("profile_token"),
}
)
except Exception as e:
logger.debug(f"Unable to probe stream: {e}")
tested_candidates.append(
{
"uri": test_uri,
"source": c.get("source"),
"ok": False,
"profile_token": c.get("profile_token"),
}
)
result["rtsp_candidates"] = rtsp_candidates
if test:
result["rtsp_tested"] = tested_candidates
logger.debug(f"ONVIF probe successful: {result}")
return JSONResponse(content=result)
except ONVIFError as e:
logger.warning(f"ONVIF error probing {host}:{port}: {e}")
return JSONResponse(
content={"success": False, "message": "ONVIF error"},
status_code=400,
)
except (Fault, TransportError) as e:
logger.warning(f"Connection error probing {host}:{port}: {e}")
return JSONResponse(
content={"success": False, "message": "Connection error"},
status_code=503,
)
except Exception as e:
logger.warning(f"Error probing ONVIF device at {host}:{port}, {e}")
return JSONResponse(
content={"success": False, "message": "Probe failed"},
status_code=500,
)
finally:
# Best-effort cleanup of ONVIF camera client session
if onvif_camera is not None:
try:
# Check if the camera has a close method and call it
if hasattr(onvif_camera, "close"):
await onvif_camera.close()
except Exception as e:
logger.debug(f"Error closing ONVIF camera session: {e}")

View File

@ -3,7 +3,9 @@
import datetime import datetime
import logging import logging
import os import os
import random
import shutil import shutil
import string
from typing import Any from typing import Any
import cv2 import cv2
@ -14,20 +16,46 @@ from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role from frigate.api.auth import require_role
from frigate.api.defs.request.classification_body import RenameFaceBody from frigate.api.defs.request.classification_body import (
AudioTranscriptionBody,
DeleteFaceImagesBody,
GenerateObjectExamplesBody,
GenerateStateExamplesBody,
RenameFaceBody,
)
from frigate.api.defs.response.classification_response import (
FaceRecognitionResponse,
FacesResponse,
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera import DetectConfig from frigate.config.camera import DetectConfig
from frigate.const import FACE_DIR from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
from frigate.models import Event from frigate.models import Event
from frigate.util.path import get_event_snapshot from frigate.util.classification import (
collect_object_classification_examples,
collect_state_classification_examples,
get_dataset_image_count,
read_training_metadata,
)
from frigate.util.file import get_event_snapshot
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.events]) router = APIRouter(tags=[Tags.classification])
@router.get("/faces") @router.get(
"/faces",
response_model=FacesResponse,
summary="Get all registered faces",
description="""Returns a dictionary mapping face names to lists of image filenames.
Each key represents a registered face name, and the value is a list of image
files associated with that face. Supported image formats include .webp, .png,
.jpg, and .jpeg.""",
)
def get_faces(): def get_faces():
face_dict: dict[str, list[str]] = {} face_dict: dict[str, list[str]] = {}
@ -51,7 +79,15 @@ def get_faces():
return JSONResponse(status_code=200, content=face_dict) return JSONResponse(status_code=200, content=face_dict)
@router.post("/faces/reprocess", dependencies=[Depends(require_role(["admin"]))]) @router.post(
"/faces/reprocess",
dependencies=[Depends(require_role(["admin"]))],
summary="Reprocess a face training image",
description="""Reprocesses a face training image to update the prediction.
Requires face recognition to be enabled in the configuration. The training file
must exist in the faces/train directory. Returns a success response or an error
message if face recognition is not enabled or the training file is invalid.""",
)
def reclassify_face(request: Request, body: dict = None): def reclassify_face(request: Request, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -78,13 +114,32 @@ def reclassify_face(request: Request, body: dict = None):
context: EmbeddingsContext = request.app.embeddings context: EmbeddingsContext = request.app.embeddings
response = context.reprocess_face(training_file) response = context.reprocess_face(training_file)
if not isinstance(response, dict):
return JSONResponse(
status_code=500,
content={
"success": False,
"message": "Could not process request.",
},
)
return JSONResponse( return JSONResponse(
status_code=200 if response.get("success", True) else 400,
content=response, content=response,
status_code=200,
) )
@router.post("/faces/train/{name}/classify") @router.post(
"/faces/train/{name}/classify",
response_model=GenericResponse,
summary="Classify and save a face training image",
description="""Adds a training image to a specific face name for face recognition.
Accepts either a training file from the train directory or an event_id to extract
the face from. The image is saved to the face's directory and the face classifier
is cleared to incorporate the new training data. Returns a success message with
the new filename or an error if face recognition is not enabled, the file/event
is invalid, or the face cannot be extracted.""",
)
def train_face(request: Request, name: str, body: dict = None): def train_face(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -123,8 +178,7 @@ def train_face(request: Request, name: str, body: dict = None):
new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp" new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp"
new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}") new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}")
if not os.path.exists(new_file_folder): os.makedirs(new_file_folder, exist_ok=True)
os.mkdir(new_file_folder)
if training_file_name: if training_file_name:
shutil.move(training_file, os.path.join(new_file_folder, new_name)) shutil.move(training_file, os.path.join(new_file_folder, new_name))
@ -188,7 +242,16 @@ def train_face(request: Request, name: str, body: dict = None):
) )
@router.post("/faces/{name}/create", dependencies=[Depends(require_role(["admin"]))]) @router.post(
"/faces/{name}/create",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Create a new face name",
description="""Creates a new folder for a face name in the faces directory.
This is used to organize face training images. The face name is sanitized and
spaces are replaced with underscores. Returns a success message or an error if
face recognition is not enabled.""",
)
async def create_face(request: Request, name: str): async def create_face(request: Request, name: str):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -205,7 +268,16 @@ async def create_face(request: Request, name: str):
) )
@router.post("/faces/{name}/register", dependencies=[Depends(require_role(["admin"]))]) @router.post(
"/faces/{name}/register",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Register a face image",
description="""Registers a face image for a specific face name by uploading an image file.
The uploaded image is processed and added to the face recognition system. Returns a
success response with details about the registration, or an error if face recognition
is not enabled or the image cannot be processed.""",
)
async def register_face(request: Request, name: str, file: UploadFile): async def register_face(request: Request, name: str, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -231,7 +303,14 @@ async def register_face(request: Request, name: str, file: UploadFile):
) )
@router.post("/faces/recognize") @router.post(
"/faces/recognize",
response_model=FaceRecognitionResponse,
summary="Recognize a face from an uploaded image",
description="""Recognizes a face from an uploaded image file by comparing it against
registered faces in the system. Returns the recognized face name and confidence score,
or an error if face recognition is not enabled or the image cannot be processed.""",
)
async def recognize_face(request: Request, file: UploadFile): async def recognize_face(request: Request, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -257,28 +336,38 @@ async def recognize_face(request: Request, file: UploadFile):
) )
@router.post("/faces/{name}/delete", dependencies=[Depends(require_role(["admin"]))]) @router.post(
def deregister_faces(request: Request, name: str, body: dict = None): "/faces/{name}/delete",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete face images",
description="""Deletes specific face images for a given face name. The image IDs must belong
to the specified face folder. To delete an entire face folder, all image IDs in that
folder must be sent. Returns a success message or an error if face recognition is not enabled.""",
)
def deregister_faces(request: Request, name: str, body: DeleteFaceImagesBody):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
status_code=400, status_code=400,
content={"message": "Face recognition is not enabled.", "success": False}, content={"message": "Face recognition is not enabled.", "success": False},
) )
json: dict[str, Any] = body or {}
list_of_ids = json.get("ids", "")
context: EmbeddingsContext = request.app.embeddings context: EmbeddingsContext = request.app.embeddings
context.delete_face_ids( context.delete_face_ids(name, map(lambda file: sanitize_filename(file), body.ids))
name, map(lambda file: sanitize_filename(file), list_of_ids)
)
return JSONResponse( return JSONResponse(
content=({"success": True, "message": "Successfully deleted faces."}), content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200, status_code=200,
) )
@router.put("/faces/{old_name}/rename", dependencies=[Depends(require_role(["admin"]))]) @router.put(
"/faces/{old_name}/rename",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Rename a face name",
description="""Renames a face name in the system. The old name must exist and the new
name must be valid. Returns a success message or an error if face recognition is not enabled.""",
)
def rename_face(request: Request, old_name: str, body: RenameFaceBody): def rename_face(request: Request, old_name: str, body: RenameFaceBody):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -307,7 +396,14 @@ def rename_face(request: Request, old_name: str, body: RenameFaceBody):
) )
@router.put("/lpr/reprocess") @router.put(
"/lpr/reprocess",
summary="Reprocess a license plate",
description="""Reprocesses a license plate image to update the plate.
Requires license plate recognition to be enabled in the configuration. The event_id
must exist in the database. Returns a success message or an error if license plate
recognition is not enabled or the event_id is invalid.""",
)
def reprocess_license_plate(request: Request, event_id: str): def reprocess_license_plate(request: Request, event_id: str):
if not request.app.frigate_config.lpr.enabled: if not request.app.frigate_config.lpr.enabled:
message = "License plate recognition is not enabled." message = "License plate recognition is not enabled."
@ -340,7 +436,14 @@ def reprocess_license_plate(request: Request, event_id: str):
) )
@router.put("/reindex", dependencies=[Depends(require_role(["admin"]))]) @router.put(
"/reindex",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Reindex embeddings",
description="""Reindexes the embeddings for all tracked objects.
Requires semantic search to be enabled in the configuration. Returns a success message or an error if semantic search is not enabled.""",
)
def reindex_embeddings(request: Request): def reindex_embeddings(request: Request):
if not request.app.frigate_config.semantic_search.enabled: if not request.app.frigate_config.semantic_search.enabled:
message = ( message = (
@ -384,3 +487,502 @@ def reindex_embeddings(request: Request):
}, },
status_code=500, status_code=500,
) )
@router.put(
"/audio/transcribe",
response_model=GenericResponse,
summary="Transcribe audio",
description="""Transcribes audio from a specific event.
Requires audio transcription to be enabled in the configuration. The event_id
must exist in the database. Returns a success message or an error if audio transcription is not enabled or the event_id is invalid.""",
)
def transcribe_audio(request: Request, body: AudioTranscriptionBody):
event_id = body.event_id
try:
event = Event.get(Event.id == event_id)
except DoesNotExist:
message = f"Event {event_id} not found"
logger.error(message)
return JSONResponse(
content=({"success": False, "message": message}), status_code=404
)
if not request.app.frigate_config.cameras[event.camera].audio_transcription.enabled:
message = f"Audio transcription is not enabled for {event.camera}."
logger.error(message)
return JSONResponse(
content=(
{
"success": False,
"message": message,
}
),
status_code=400,
)
context: EmbeddingsContext = request.app.embeddings
response = context.transcribe_audio(model_to_dict(event))
if response == "started":
return JSONResponse(
content={
"success": True,
"message": "Audio transcription has started.",
},
status_code=202, # 202 Accepted
)
elif response == "in_progress":
return JSONResponse(
content={
"success": False,
"message": "Audio transcription for a speech event is currently in progress. Try again later.",
},
status_code=409, # 409 Conflict
)
else:
return JSONResponse(
content={
"success": False,
"message": "Failed to transcribe audio.",
},
status_code=500,
)
# custom classification training
@router.get(
"/classification/{name}/dataset",
summary="Get classification dataset",
description="""Gets the dataset for a specific classification model.
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""",
)
def get_classification_dataset(name: str):
dataset_dict: dict[str, list[str]] = {}
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "dataset")
if not os.path.exists(dataset_dir):
return JSONResponse(
status_code=200, content={"categories": {}, "training_metadata": None}
)
for category_name in os.listdir(dataset_dir):
category_dir = os.path.join(dataset_dir, category_name)
if not os.path.isdir(category_dir):
continue
dataset_dict[category_name] = []
for file in filter(
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
os.listdir(category_dir),
):
dataset_dict[category_name].append(file)
# Get training metadata
metadata = read_training_metadata(sanitize_filename(name))
current_image_count = get_dataset_image_count(sanitize_filename(name))
if metadata is None:
training_metadata = {
"has_trained": False,
"last_training_date": None,
"last_training_image_count": 0,
"current_image_count": current_image_count,
"new_images_count": current_image_count,
"dataset_changed": current_image_count > 0,
}
else:
last_training_count = metadata.get("last_training_image_count", 0)
# Dataset has changed if count is different (either added or deleted images)
dataset_changed = current_image_count != last_training_count
# Only show positive count for new images (ignore deletions in the count display)
new_images_count = max(0, current_image_count - last_training_count)
training_metadata = {
"has_trained": True,
"last_training_date": metadata.get("last_training_date"),
"last_training_image_count": last_training_count,
"current_image_count": current_image_count,
"new_images_count": new_images_count,
"dataset_changed": dataset_changed,
}
return JSONResponse(
status_code=200,
content={
"categories": dataset_dict,
"training_metadata": training_metadata,
},
)
@router.get(
"/classification/{name}/train",
summary="Get classification train images",
description="""Gets the train images for a specific classification model.
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""",
)
def get_classification_images(name: str):
train_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "train")
if not os.path.exists(train_dir):
return JSONResponse(status_code=200, content=[])
return JSONResponse(
status_code=200,
content=list(
filter(
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
os.listdir(train_dir),
)
),
)
@router.post(
"/classification/{name}/train",
response_model=GenericResponse,
summary="Train a classification model",
description="""Trains a specific classification model.
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""",
)
async def train_configured_model(request: Request, name: str):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
context: EmbeddingsContext = request.app.embeddings
context.start_classification_training(name)
return JSONResponse(
content={"success": True, "message": "Started classification model training."},
status_code=200,
)
@router.post(
"/classification/{name}/dataset/{category}/delete",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete classification dataset images",
description="""Deletes specific dataset images for a given classification model and category.
The image IDs must belong to the specified category. Returns a success message or an error if the name or category is invalid.""",
)
def delete_classification_dataset_images(
request: Request, name: str, category: str, body: dict = None
):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
json: dict[str, Any] = body or {}
list_of_ids = json.get("ids", "")
folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category)
)
for id in list_of_ids:
file_path = os.path.join(folder, sanitize_filename(id))
if os.path.isfile(file_path):
os.unlink(file_path)
if os.path.exists(folder) and not os.listdir(folder):
os.rmdir(folder)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted images."}),
status_code=200,
)
@router.put(
"/classification/{name}/dataset/{old_category}/rename",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Rename a classification category",
description="""Renames a classification category for a given classification model.
The old category must exist and the new name must be valid. Returns a success message or an error if the name is invalid.""",
)
def rename_classification_category(
request: Request, name: str, old_category: str, body: dict = None
):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
json: dict[str, Any] = body or {}
new_category = sanitize_filename(json.get("new_category", ""))
if not new_category:
return JSONResponse(
content=(
{
"success": False,
"message": "New category name is required.",
}
),
status_code=400,
)
old_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(old_category)
)
new_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", new_category
)
if not os.path.exists(old_folder):
return JSONResponse(
content=(
{
"success": False,
"message": f"Category {old_category} does not exist.",
}
),
status_code=404,
)
if os.path.exists(new_folder):
return JSONResponse(
content=(
{
"success": False,
"message": f"Category {new_category} already exists.",
}
),
status_code=400,
)
try:
os.rename(old_folder, new_folder)
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully renamed category to {new_category}.",
}
),
status_code=200,
)
except Exception as e:
logger.error(f"Error renaming category: {e}")
return JSONResponse(
content=(
{
"success": False,
"message": "Failed to rename category",
}
),
status_code=500,
)
@router.post(
"/classification/{name}/dataset/categorize",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Categorize a classification image",
description="""Categorizes a specific classification image for a given classification model and category.
The image must exist in the specified category. Returns a success message or an error if the name or category is invalid.""",
)
def categorize_classification_image(request: Request, name: str, body: dict = None):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
json: dict[str, Any] = body or {}
category = sanitize_filename(json.get("category", ""))
training_file_name = sanitize_filename(json.get("training_file", ""))
training_file = os.path.join(
CLIPS_DIR, sanitize_filename(name), "train", training_file_name
)
if training_file_name and not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file_name}",
}
),
status_code=404,
)
random_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
timestamp = datetime.datetime.now().timestamp()
new_name = f"{category}-{timestamp}-{random_id}.png"
new_file_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", category
)
os.makedirs(new_file_folder, exist_ok=True)
# use opencv because webp images can not be used to train
img = cv2.imread(training_file)
cv2.imwrite(os.path.join(new_file_folder, new_name), img)
os.unlink(training_file)
return JSONResponse(
content=({"success": True, "message": "Successfully categorized image."}),
status_code=200,
)
@router.post(
"/classification/{name}/train/delete",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete classification train images",
description="""Deletes specific train images for a given classification model.
The image IDs must belong to the specified train folder. Returns a success message or an error if the name is invalid.""",
)
def delete_classification_train_images(request: Request, name: str, body: dict = None):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
json: dict[str, Any] = body or {}
list_of_ids = json.get("ids", "")
folder = os.path.join(CLIPS_DIR, sanitize_filename(name), "train")
for id in list_of_ids:
file_path = os.path.join(folder, sanitize_filename(id))
if os.path.isfile(file_path):
os.unlink(file_path)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted images."}),
status_code=200,
)
@router.post(
"/classification/generate_examples/state",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Generate state classification examples",
)
async def generate_state_examples(request: Request, body: GenerateStateExamplesBody):
"""Generate examples for state classification."""
model_name = sanitize_filename(body.model_name)
cameras_normalized = {
camera_name: tuple(crop)
for camera_name, crop in body.cameras.items()
if camera_name in request.app.frigate_config.cameras
}
collect_state_classification_examples(model_name, cameras_normalized)
return JSONResponse(
content={"success": True, "message": "Example generation completed"},
status_code=200,
)
@router.post(
"/classification/generate_examples/object",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Generate object classification examples",
)
async def generate_object_examples(request: Request, body: GenerateObjectExamplesBody):
"""Generate examples for object classification."""
model_name = sanitize_filename(body.model_name)
collect_object_classification_examples(model_name, body.label)
return JSONResponse(
content={"success": True, "message": "Example generation completed"},
status_code=200,
)
@router.delete(
"/classification/{name}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete a classification model",
description="""Deletes a specific classification model and all its associated data.
Works even if the model is not in the config (e.g., partially created during wizard).
Returns a success message.""",
)
def delete_classification_model(request: Request, name: str):
sanitized_name = sanitize_filename(name)
# Delete the classification model's data directory in clips
data_dir = os.path.join(CLIPS_DIR, sanitized_name)
if os.path.exists(data_dir):
try:
shutil.rmtree(data_dir)
logger.info(f"Deleted classification data directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete data directory for {name}: {e}")
# Delete the classification model's files in model_cache
model_dir = os.path.join(MODEL_CACHE_DIR, sanitized_name)
if os.path.exists(model_dir):
try:
shutil.rmtree(model_dir)
logger.info(f"Deleted classification model directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete model directory for {name}: {e}")
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully deleted classification model {name}.",
}
),
status_code=200,
)

View File

@ -1,7 +1,8 @@
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional, Union
from pydantic import BaseModel from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
class Extension(str, Enum): class Extension(str, Enum):
@ -22,6 +23,7 @@ class MediaLatestFrameQueryParams(BaseModel):
zones: Optional[int] = None zones: Optional[int] = None
mask: Optional[int] = None mask: Optional[int] = None
motion: Optional[int] = None motion: Optional[int] = None
paths: Optional[int] = None
regions: Optional[int] = None regions: Optional[int] = None
quality: Optional[int] = 70 quality: Optional[int] = 70
height: Optional[int] = None height: Optional[int] = None
@ -51,3 +53,10 @@ class MediaMjpegFeedQueryParams(BaseModel):
class MediaRecordingsSummaryQueryParams(BaseModel): class MediaRecordingsSummaryQueryParams(BaseModel):
timezone: str = "utc" timezone: str = "utc"
cameras: Optional[str] = "all" cameras: Optional[str] = "all"
class MediaRecordingsAvailabilityQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30

View File

@ -1,9 +1,13 @@
from typing import Optional from typing import Optional
from pydantic import BaseModel from pydantic import BaseModel, Field
from frigate.events.types import RegenerateDescriptionEnum from frigate.events.types import RegenerateDescriptionEnum
class RegenerateQueryParameters(BaseModel): class RegenerateQueryParameters(BaseModel):
source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails
force: Optional[bool] = Field(
default=False,
description="Force (re)generating the description even if GenAI is disabled for this camera.",
)

View File

@ -1,10 +1,12 @@
from typing import Optional from typing import Any, Dict, Optional
from pydantic import BaseModel from pydantic import BaseModel
class AppConfigSetBody(BaseModel): class AppConfigSetBody(BaseModel):
requires_restart: int = 1 requires_restart: int = 1
update_topic: str | None = None
config_data: Optional[Dict[str, Any]] = None
class AppPutPasswordBody(BaseModel): class AppPutPasswordBody(BaseModel):

View File

@ -1,5 +1,31 @@
from pydantic import BaseModel from typing import Dict, List, Tuple
from pydantic import BaseModel, Field
class RenameFaceBody(BaseModel): class RenameFaceBody(BaseModel):
new_name: str new_name: str = Field(description="New name for the face")
class AudioTranscriptionBody(BaseModel):
event_id: str = Field(description="ID of the event to transcribe audio for")
class DeleteFaceImagesBody(BaseModel):
ids: List[str] = Field(
description="List of image filenames to delete from the face folder"
)
class GenerateStateExamplesBody(BaseModel):
model_name: str = Field(description="Name of the classification model")
cameras: Dict[str, Tuple[float, float, float, float]] = Field(
description="Dictionary mapping camera names to normalized crop coordinates in [x1, y1, x2, y2] format (values 0-1)"
)
class GenerateObjectExamplesBody(BaseModel):
model_name: str = Field(description="Name of the classification model")
label: str = Field(
description="Object label to collect examples for (e.g., 'person', 'car')"
)

View File

@ -2,6 +2,8 @@ from typing import List, Optional, Union
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from frigate.config.classification import TriggerType
class EventsSubLabelBody(BaseModel): class EventsSubLabelBody(BaseModel):
subLabel: str = Field(title="Sub label", max_length=100) subLabel: str = Field(title="Sub label", max_length=100)
@ -45,3 +47,9 @@ class EventsDeleteBody(BaseModel):
class SubmitPlusBody(BaseModel): class SubmitPlusBody(BaseModel):
include_annotation: int = Field(default=1) include_annotation: int = Field(default=1)
class TriggerEmbeddingBody(BaseModel):
type: TriggerType
data: str
threshold: float = Field(default=0.5, ge=0.0, le=1.0)

View File

@ -4,3 +4,5 @@ from pydantic import BaseModel, conlist, constr
class ReviewModifyMultipleBody(BaseModel): class ReviewModifyMultipleBody(BaseModel):
# List of string with at least one element and each element with at least one char # List of string with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1) ids: conlist(constr(min_length=1), min_length=1)
# Whether to mark items as reviewed (True) or unreviewed (False)
reviewed: bool = True

View File

@ -0,0 +1,38 @@
from typing import Dict, List, Optional
from pydantic import BaseModel, Field, RootModel
class FacesResponse(RootModel[Dict[str, List[str]]]):
"""Response model for the get_faces endpoint.
Returns a mapping of face names to lists of image filenames.
Each face name corresponds to a directory in the faces folder,
and the list contains the names of image files for that face.
Example:
{
"john_doe": ["face1.webp", "face2.jpg"],
"jane_smith": ["face3.png"]
}
"""
root: Dict[str, List[str]] = Field(
default_factory=dict,
description="Dictionary mapping face names to lists of image filenames",
)
class FaceRecognitionResponse(BaseModel):
"""Response model for face recognition endpoint.
Returns the result of attempting to recognize a face from an uploaded image.
"""
success: bool = Field(description="Whether the face recognition was successful")
score: Optional[float] = Field(
default=None, description="Confidence score of the recognition (0-1)"
)
face_name: Optional[str] = Field(
default=None, description="The recognized face name if successful"
)

View File

@ -0,0 +1,30 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class ExportModel(BaseModel):
"""Model representing a single export."""
id: str = Field(description="Unique identifier for the export")
camera: str = Field(description="Camera name associated with this export")
name: str = Field(description="Friendly name of the export")
date: float = Field(description="Unix timestamp when the export was created")
video_path: str = Field(description="File path to the exported video")
thumb_path: str = Field(description="File path to the export thumbnail")
in_progress: bool = Field(
description="Whether the export is currently being processed"
)
class StartExportResponse(BaseModel):
"""Response model for starting an export."""
success: bool = Field(description="Whether the export was started successfully")
message: str = Field(description="Status or error message")
export_id: Optional[str] = Field(
default=None, description="The export ID if successfully started"
)
ExportsResponse = List[ExportModel]

View File

@ -0,0 +1,17 @@
from typing import List
from pydantic import BaseModel, Field
class PreviewModel(BaseModel):
"""Model representing a single preview clip."""
camera: str = Field(description="Camera name for this preview")
src: str = Field(description="Path to the preview video file")
type: str = Field(description="MIME type of the preview video (video/mp4)")
start: float = Field(description="Unix timestamp when the preview starts")
end: float = Field(description="Unix timestamp when the preview ends")
PreviewsResponse = List[PreviewModel]
PreviewFramesResponse = List[str]

View File

@ -3,6 +3,7 @@ from enum import Enum
class Tags(Enum): class Tags(Enum):
app = "App" app = "App"
camera = "Camera"
preview = "Preview" preview = "Preview"
logs = "Logs" logs = "Logs"
media = "Media" media = "Media"
@ -10,5 +11,5 @@ class Tags(Enum):
review = "Review" review = "Review"
export = "Export" export = "Export"
events = "Events" events = "Events"
classification = "classification" classification = "Classification"
auth = "Auth" auth = "Auth"

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ import logging
import random import random
import string import string
from pathlib import Path from pathlib import Path
from typing import List
import psutil import psutil
from fastapi import APIRouter, Depends, Request from fastapi import APIRouter, Depends, Request
@ -12,9 +13,19 @@ from pathvalidate import sanitize_filepath
from peewee import DoesNotExist from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role from frigate.api.auth import (
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
)
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
from frigate.api.defs.request.export_rename_body import ExportRenameBody from frigate.api.defs.request.export_rename_body import ExportRenameBody
from frigate.api.defs.response.export_response import (
ExportModel,
ExportsResponse,
StartExportResponse,
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.const import CLIPS_DIR, EXPORT_DIR from frigate.const import CLIPS_DIR, EXPORT_DIR
from frigate.models import Export, Previews, Recordings from frigate.models import Export, Previews, Recordings
@ -23,20 +34,43 @@ from frigate.record.export import (
PlaybackSourceEnum, PlaybackSourceEnum,
RecordingExporter, RecordingExporter,
) )
from frigate.util.builtin import is_current_hour from frigate.util.time import is_current_hour
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.export]) router = APIRouter(tags=[Tags.export])
@router.get("/exports") @router.get(
def get_exports(): "/exports",
exports = Export.select().order_by(Export.date.desc()).dicts().iterator() response_model=ExportsResponse,
summary="Get exports",
description="""Gets all exports from the database for cameras the user has access to.
Returns a list of exports ordered by date (most recent first).""",
)
def get_exports(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
exports = (
Export.select()
.where(Export.camera << allowed_cameras)
.order_by(Export.date.desc())
.dicts()
.iterator()
)
return JSONResponse(content=[e for e in exports]) return JSONResponse(content=[e for e in exports])
@router.post("/export/{camera_name}/start/{start_time}/end/{end_time}") @router.post(
"/export/{camera_name}/start/{start_time}/end/{end_time}",
response_model=StartExportResponse,
dependencies=[Depends(require_camera_access)],
summary="Start recording export",
description="""Starts an export of a recording for the specified time range.
The export can be from recordings or preview footage. Returns the export ID if
successful, or an error message if the camera is invalid or no recordings/previews
are found for the time range.""",
)
def export_recording( def export_recording(
request: Request, request: Request,
camera_name: str, camera_name: str,
@ -140,11 +174,18 @@ def export_recording(
@router.patch( @router.patch(
"/export/{event_id}/rename", dependencies=[Depends(require_role(["admin"]))] "/export/{event_id}/rename",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Rename export",
description="""Renames an export.
NOTE: This changes the friendly name of the export, not the filename.
""",
) )
def export_rename(event_id: str, body: ExportRenameBody): async def export_rename(event_id: str, body: ExportRenameBody, request: Request):
try: try:
export: Export = Export.get(Export.id == event_id) export: Export = Export.get(Export.id == event_id)
await require_camera_access(export.camera, request=request)
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content=( content=(
@ -169,10 +210,16 @@ def export_rename(event_id: str, body: ExportRenameBody):
) )
@router.delete("/export/{event_id}", dependencies=[Depends(require_role(["admin"]))]) @router.delete(
def export_delete(event_id: str): "/export/{event_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete export",
)
async def export_delete(event_id: str, request: Request):
try: try:
export: Export = Export.get(Export.id == event_id) export: Export = Export.get(Export.id == event_id)
await require_camera_access(export.camera, request=request)
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content=( content=(
@ -222,10 +269,18 @@ def export_delete(event_id: str):
) )
@router.get("/exports/{export_id}") @router.get(
def get_export(export_id: str): "/exports/{export_id}",
response_model=ExportModel,
summary="Get a single export",
description="""Gets a specific export by ID. The user must have access to the camera
associated with the export.""",
)
async def get_export(export_id: str, request: Request):
try: try:
return JSONResponse(content=model_to_dict(Export.get(Export.id == export_id))) export = Export.get(Export.id == export_id)
await require_camera_access(export.camera, request=request)
return JSONResponse(content=model_to_dict(export))
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Export not found"}, content={"success": False, "message": "Export not found"},

View File

@ -1,8 +1,10 @@
import logging import logging
import re
from typing import Optional from typing import Optional
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from joserfc.jwk import OctKey
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
from slowapi import _rate_limit_exceeded_handler from slowapi import _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded from slowapi.errors import RateLimitExceeded
@ -13,6 +15,7 @@ from starlette_context.plugins import Plugin
from frigate.api import app as main_app from frigate.api import app as main_app
from frigate.api import ( from frigate.api import (
auth, auth,
camera,
classification, classification,
event, event,
export, export,
@ -26,6 +29,7 @@ from frigate.comms.event_metadata_updater import (
EventMetadataPublisher, EventMetadataPublisher,
) )
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera.updater import CameraConfigUpdatePublisher
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
from frigate.stats.emitter import StatsEmitter from frigate.stats.emitter import StatsEmitter
@ -57,6 +61,7 @@ def create_fastapi_app(
onvif: OnvifController, onvif: OnvifController,
stats_emitter: StatsEmitter, stats_emitter: StatsEmitter,
event_metadata_updater: EventMetadataPublisher, event_metadata_updater: EventMetadataPublisher,
config_publisher: CameraConfigUpdatePublisher,
): ):
logger.info("Starting FastAPI app") logger.info("Starting FastAPI app")
app = FastAPI( app = FastAPI(
@ -110,6 +115,7 @@ def create_fastapi_app(
# Routes # Routes
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters # Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
app.include_router(auth.router) app.include_router(auth.router)
app.include_router(camera.router)
app.include_router(classification.router) app.include_router(classification.router)
app.include_router(review.router) app.include_router(review.router)
app.include_router(main_app.router) app.include_router(main_app.router)
@ -127,6 +133,27 @@ def create_fastapi_app(
app.onvif = onvif app.onvif = onvif
app.stats_emitter = stats_emitter app.stats_emitter = stats_emitter
app.event_metadata_updater = event_metadata_updater app.event_metadata_updater = event_metadata_updater
app.jwt_token = get_jwt_secret() if frigate_config.auth.enabled else None app.config_publisher = config_publisher
if frigate_config.auth.enabled:
secret = get_jwt_secret()
key_bytes = None
if isinstance(secret, str):
# If the secret looks like hex (e.g., generated by secrets.token_hex), use raw bytes
if len(secret) % 2 == 0 and re.fullmatch(r"[0-9a-fA-F]+", secret or ""):
try:
key_bytes = bytes.fromhex(secret)
except ValueError:
key_bytes = secret.encode("utf-8")
else:
key_bytes = secret.encode("utf-8")
elif isinstance(secret, (bytes, bytearray)):
key_bytes = bytes(secret)
else:
key_bytes = str(secret).encode("utf-8")
app.jwt_token = OctKey.import_key(key_bytes)
else:
app.jwt_token = None
return app return app

View File

@ -8,25 +8,27 @@ import os
import subprocess as sp import subprocess as sp
import time import time
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from functools import reduce
from pathlib import Path as FilePath from pathlib import Path as FilePath
from typing import Any from typing import Any, List
from urllib.parse import unquote from urllib.parse import unquote
import cv2 import cv2
import numpy as np import numpy as np
import pytz import pytz
from fastapi import APIRouter, Path, Query, Request, Response from fastapi import APIRouter, Depends, Path, Query, Request, Response
from fastapi.params import Depends
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
from pathvalidate import sanitize_filename from pathvalidate import sanitize_filename
from peewee import DoesNotExist, fn from peewee import DoesNotExist, fn, operator
from tzlocal import get_localzone_name from tzlocal import get_localzone_name
from frigate.api.auth import get_allowed_cameras_for_filter, require_camera_access
from frigate.api.defs.query.media_query_parameters import ( from frigate.api.defs.query.media_query_parameters import (
Extension, Extension,
MediaEventsSnapshotQueryParams, MediaEventsSnapshotQueryParams,
MediaLatestFrameQueryParams, MediaLatestFrameQueryParams,
MediaMjpegFeedQueryParams, MediaMjpegFeedQueryParams,
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams, MediaRecordingsSummaryQueryParams,
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
@ -42,18 +44,17 @@ from frigate.const import (
) )
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.track.object_processing import TrackedObjectProcessor from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.builtin import get_tz_modifiers from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording from frigate.util.image import get_image_from_recording
from frigate.util.path import get_event_thumbnail_bytes from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.media]) router = APIRouter(tags=[Tags.media])
@router.get("/{camera_name}") @router.get("/{camera_name}", dependencies=[Depends(require_camera_access)])
def mjpeg_feed( async def mjpeg_feed(
request: Request, request: Request,
camera_name: str, camera_name: str,
params: MediaMjpegFeedQueryParams = Depends(), params: MediaMjpegFeedQueryParams = Depends(),
@ -109,7 +110,7 @@ def imagestream(
) )
@router.get("/{camera_name}/ptz/info") @router.get("/{camera_name}/ptz/info", dependencies=[Depends(require_camera_access)])
async def camera_ptz_info(request: Request, camera_name: str): async def camera_ptz_info(request: Request, camera_name: str):
if camera_name in request.app.frigate_config.cameras: if camera_name in request.app.frigate_config.cameras:
# Schedule get_camera_info in the OnvifController's event loop # Schedule get_camera_info in the OnvifController's event loop
@ -125,8 +126,10 @@ async def camera_ptz_info(request: Request, camera_name: str):
) )
@router.get("/{camera_name}/latest.{extension}") @router.get(
def latest_frame( "/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)]
)
async def latest_frame(
request: Request, request: Request,
camera_name: str, camera_name: str,
extension: Extension, extension: Extension,
@ -139,6 +142,7 @@ def latest_frame(
"zones": params.zones, "zones": params.zones,
"mask": params.mask, "mask": params.mask,
"motion_boxes": params.motion, "motion_boxes": params.motion,
"paths": params.paths,
"regions": params.regions, "regions": params.regions,
} }
quality = params.quality quality = params.quality
@ -233,8 +237,11 @@ def latest_frame(
) )
@router.get("/{camera_name}/recordings/{frame_time}/snapshot.{format}") @router.get(
def get_snapshot_from_recording( "/{camera_name}/recordings/{frame_time}/snapshot.{format}",
dependencies=[Depends(require_camera_access)],
)
async def get_snapshot_from_recording(
request: Request, request: Request,
camera_name: str, camera_name: str,
frame_time: float, frame_time: float,
@ -320,8 +327,10 @@ def get_snapshot_from_recording(
) )
@router.post("/{camera_name}/plus/{frame_time}") @router.post(
def submit_recording_snapshot_to_plus( "/{camera_name}/plus/{frame_time}", dependencies=[Depends(require_camera_access)]
)
async def submit_recording_snapshot_to_plus(
request: Request, camera_name: str, frame_time: str request: Request, camera_name: str, frame_time: str
): ):
if camera_name not in request.app.frigate_config.cameras: if camera_name not in request.app.frigate_config.cameras:
@ -409,111 +418,195 @@ def get_recordings_storage_usage(request: Request):
@router.get("/recordings/summary") @router.get("/recordings/summary")
def all_recordings_summary(params: MediaRecordingsSummaryQueryParams = Depends()): def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist""" """Returns true/false by day indicating if recordings exist"""
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
cameras = params.cameras cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
query = ( time_range_query = (
Recordings.select( Recordings.select(
fn.strftime( fn.MIN(Recordings.start_time).alias("min_time"),
"%Y-%m-%d", fn.MAX(Recordings.start_time).alias("max_time"),
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day")
) )
.group_by( .where(Recordings.camera << camera_list)
fn.strftime( .dicts()
"%Y-%m-%d", .get()
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
) )
if cameras != "all": min_time = time_range_query.get("min_time")
query = query.where(Recordings.camera << cameras.split(",")) max_time = time_range_query.get("max_time")
recording_days = query.namedtuples() if min_time is None or max_time is None:
days = {day.day: True for day in recording_days} return JSONResponse(content={})
return JSONResponse(content=days) dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get("/{camera_name}/recordings/summary") @router.get(
def recordings_summary(camera_name: str, timezone: str = "utc"): "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera""" """Returns hourly summary for recordings of given camera"""
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
recording_groups = ( time_range_query = (
Recordings.select( Recordings.select(
fn.strftime( fn.MIN(Recordings.start_time).alias("min_time"),
"%Y-%m-%d %H", fn.MAX(Recordings.start_time).alias("max_time"),
fn.datetime(
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
) )
.where(Recordings.camera == camera_name) .where(Recordings.camera == camera_name)
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600) .dicts()
.order_by(Recordings.start_time.desc()) .get()
.namedtuples()
) )
event_groups = ( min_time = time_range_query.get("min_time")
Event.select( max_time = time_range_query.get("max_time")
fn.strftime(
"%Y-%m-%d %H", days: dict[str, dict] = {}
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier if min_time is None or max_time is None:
), return JSONResponse(content=list(days.values()))
).alias("hour"),
fn.COUNT(Event.id).alias("count"), dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
) )
.where(Event.camera == camera_name, Event.has_clip)
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups} event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
days = {} event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups: for recording_group in recording_groups:
parts = recording_group.hour.split() parts = recording_group.hour.split()
hour = parts[1] hour = parts[1]
day = parts[0] day = parts[0]
events_count = event_map.get(recording_group.hour, 0) events_count = event_map.get(recording_group.hour, 0)
hour_data = { hour_data = {
"hour": hour, "hour": hour,
"events": events_count, "events": events_count,
"motion": recording_group.motion, "motion": recording_group.motion,
"objects": recording_group.objects, "objects": recording_group.objects,
"duration": round(recording_group.duration), "duration": round(recording_group.duration),
} }
if day not in days: if day in days:
days[day] = {"events": events_count, "hours": [hour_data], "day": day} # merge counts if already present (edge-case at DST boundary)
else: days[day]["events"] += events_count or 0
days[day]["events"] += events_count days[day]["hours"].append(hour_data)
days[day]["hours"].append(hour_data) else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values())) return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings") @router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
def recordings( async def recordings(
camera_name: str, camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(), after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(), before: float = datetime.now().timestamp(),
@ -542,11 +635,93 @@ def recordings(
return JSONResponse(content=list(recordings)) return JSONResponse(content=list(recordings))
@router.get("/recordings/unavailable", response_model=list[dict])
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.get( @router.get(
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
dependencies=[Depends(require_camera_access)],
description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.", description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.",
) )
def recording_clip( async def recording_clip(
request: Request, request: Request,
camera_name: str, camera_name: str,
start_ts: float, start_ts: float,
@ -642,9 +817,10 @@ def recording_clip(
@router.get( @router.get(
"/vod/{camera_name}/start/{start_ts}/end/{end_ts}", "/vod/{camera_name}/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
def vod_ts(camera_name: str, start_ts: float, end_ts: float): async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
recordings = ( recordings = (
Recordings.select( Recordings.select(
Recordings.path, Recordings.path,
@ -719,20 +895,24 @@ def vod_ts(camera_name: str, start_ts: float, end_ts: float):
@router.get( @router.get(
"/vod/{year_month}/{day}/{hour}/{camera_name}", "/vod/{year_month}/{day}/{hour}/{camera_name}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str): async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
"""VOD for specific hour. Uses the default timezone (UTC).""" """VOD for specific hour. Uses the default timezone (UTC)."""
return vod_hour( return await vod_hour(
year_month, day, hour, camera_name, get_localzone_name().replace("/", ",") year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
) )
@router.get( @router.get(
"/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}", "/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str): async def vod_hour(
year_month: str, day: int, hour: int, camera_name: str, tz_name: str
):
parts = year_month.split("-") parts = year_month.split("-")
start_date = ( start_date = (
datetime(int(parts[0]), int(parts[1]), day, hour, tzinfo=timezone.utc) datetime(int(parts[0]), int(parts[1]), day, hour, tzinfo=timezone.utc)
@ -742,14 +922,15 @@ def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: st
start_ts = start_date.timestamp() start_ts = start_date.timestamp()
end_ts = end_date.timestamp() end_ts = end_date.timestamp()
return vod_ts(camera_name, start_ts, end_ts) return await vod_ts(camera_name, start_ts, end_ts)
@router.get( @router.get(
"/vod/event/{event_id}", "/vod/event/{event_id}",
description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
def vod_event( async def vod_event(
request: Request,
event_id: str, event_id: str,
padding: int = Query(0, description="Padding to apply to the vod."), padding: int = Query(0, description="Padding to apply to the vod."),
): ):
@ -765,22 +946,14 @@ def vod_event(
status_code=404, status_code=404,
) )
if not event.has_clip: await require_camera_access(event.camera, request=request)
logger.error(f"Event does not have recordings: {event_id}")
return JSONResponse(
content={
"success": False,
"message": "Recordings not available.",
},
status_code=404,
)
end_ts = ( end_ts = (
datetime.now().timestamp() datetime.now().timestamp()
if event.end_time is None if event.end_time is None
else (event.end_time + padding) else (event.end_time + padding)
) )
vod_response = vod_ts(event.camera, event.start_time - padding, end_ts) vod_response = await vod_ts(event.camera, event.start_time - padding, end_ts)
# If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
if ( if (
@ -798,7 +971,7 @@ def vod_event(
"/events/{event_id}/snapshot.jpg", "/events/{event_id}/snapshot.jpg",
description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.", description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
) )
def event_snapshot( async def event_snapshot(
request: Request, request: Request,
event_id: str, event_id: str,
params: MediaEventsSnapshotQueryParams = Depends(), params: MediaEventsSnapshotQueryParams = Depends(),
@ -808,6 +981,7 @@ def event_snapshot(
try: try:
event = Event.get(Event.id == event_id, Event.end_time != None) event = Event.get(Event.id == event_id, Event.end_time != None)
event_complete = True event_complete = True
await require_camera_access(event.camera, request=request)
if not event.has_snapshot: if not event.has_snapshot:
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Snapshot not available"}, content={"success": False, "message": "Snapshot not available"},
@ -836,6 +1010,7 @@ def event_snapshot(
height=params.height, height=params.height,
quality=params.quality, quality=params.quality,
) )
await require_camera_access(camera_state.name, request=request)
except Exception: except Exception:
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Ongoing event not found"}, content={"success": False, "message": "Ongoing event not found"},
@ -869,7 +1044,7 @@ def event_snapshot(
@router.get("/events/{event_id}/thumbnail.{extension}") @router.get("/events/{event_id}/thumbnail.{extension}")
def event_thumbnail( async def event_thumbnail(
request: Request, request: Request,
event_id: str, event_id: str,
extension: Extension, extension: Extension,
@ -882,6 +1057,7 @@ def event_thumbnail(
event_complete = False event_complete = False
try: try:
event: Event = Event.get(Event.id == event_id) event: Event = Event.get(Event.id == event_id)
await require_camera_access(event.camera, request=request)
if event.end_time is not None: if event.end_time is not None:
event_complete = True event_complete = True
@ -944,7 +1120,7 @@ def event_thumbnail(
) )
@router.get("/{camera_name}/grid.jpg") @router.get("/{camera_name}/grid.jpg", dependencies=[Depends(require_camera_access)])
def grid_snapshot( def grid_snapshot(
request: Request, camera_name: str, color: str = "green", font_scale: float = 0.5 request: Request, camera_name: str, color: str = "green", font_scale: float = 0.5
): ):
@ -1065,9 +1241,9 @@ def grid_snapshot(
) )
@router.get("/events/{event_id}/snapshot-clean.png") @router.get("/events/{event_id}/snapshot-clean.webp")
def event_snapshot_clean(request: Request, event_id: str, download: bool = False): def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
png_bytes = None webp_bytes = None
try: try:
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
snapshot_config = request.app.frigate_config.cameras[event.camera].snapshots snapshot_config = request.app.frigate_config.cameras[event.camera].snapshots
@ -1089,7 +1265,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
if event_id in camera_state.tracked_objects: if event_id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(event_id) tracked_obj = camera_state.tracked_objects.get(event_id)
if tracked_obj is not None: if tracked_obj is not None:
png_bytes = tracked_obj.get_clean_png() webp_bytes = tracked_obj.get_clean_webp()
break break
except Exception: except Exception:
return JSONResponse( return JSONResponse(
@ -1105,12 +1281,56 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Event not found"}, status_code=404 content={"success": False, "message": "Event not found"}, status_code=404
) )
if png_bytes is None: if webp_bytes is None:
try: try:
clean_snapshot_path = os.path.join( # webp
clean_snapshot_path_webp = os.path.join(
CLIPS_DIR, f"{event.camera}-{event.id}-clean.webp"
)
# png (legacy)
clean_snapshot_path_png = os.path.join(
CLIPS_DIR, f"{event.camera}-{event.id}-clean.png" CLIPS_DIR, f"{event.camera}-{event.id}-clean.png"
) )
if not os.path.exists(clean_snapshot_path):
if os.path.exists(clean_snapshot_path_webp):
with open(clean_snapshot_path_webp, "rb") as image_file:
webp_bytes = image_file.read()
elif os.path.exists(clean_snapshot_path_png):
# convert png to webp and save for future use
png_image = cv2.imread(clean_snapshot_path_png, cv2.IMREAD_UNCHANGED)
if png_image is None:
return JSONResponse(
content={
"success": False,
"message": "Invalid png snapshot",
},
status_code=400,
)
ret, webp_data = cv2.imencode(
".webp", png_image, [int(cv2.IMWRITE_WEBP_QUALITY), 60]
)
if not ret:
return JSONResponse(
content={
"success": False,
"message": "Unable to convert png to webp",
},
status_code=400,
)
webp_bytes = webp_data.tobytes()
# save the converted webp for future requests
try:
with open(clean_snapshot_path_webp, "wb") as f:
f.write(webp_bytes)
except Exception as e:
logger.warning(
f"Failed to save converted webp for event {event.id}: {e}"
)
# continue since we now have the data to return
else:
return JSONResponse( return JSONResponse(
content={ content={
"success": False, "success": False,
@ -1118,39 +1338,35 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
}, },
status_code=404, status_code=404,
) )
with open(
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}-clean.png"), "rb"
) as image_file:
png_bytes = image_file.read()
except Exception: except Exception:
logger.error(f"Unable to load clean png for event: {event.id}") logger.error(f"Unable to load clean snapshot for event: {event.id}")
return JSONResponse( return JSONResponse(
content={ content={
"success": False, "success": False,
"message": "Unable to load clean png for event", "message": "Unable to load clean snapshot for event",
}, },
status_code=400, status_code=400,
) )
headers = { headers = {
"Content-Type": "image/png", "Content-Type": "image/webp",
"Cache-Control": "private, max-age=31536000", "Cache-Control": "private, max-age=31536000",
} }
if download: if download:
headers["Content-Disposition"] = ( headers["Content-Disposition"] = (
f"attachment; filename=snapshot-{event_id}-clean.png" f"attachment; filename=snapshot-{event_id}-clean.webp"
) )
return Response( return Response(
png_bytes, webp_bytes,
media_type="image/png", media_type="image/webp",
headers=headers, headers=headers,
) )
@router.get("/events/{event_id}/clip.mp4") @router.get("/events/{event_id}/clip.mp4")
def event_clip( async def event_clip(
request: Request, request: Request,
event_id: str, event_id: str,
padding: int = Query(0, description="Padding to apply to clip."), padding: int = Query(0, description="Padding to apply to clip."),
@ -1172,7 +1388,9 @@ def event_clip(
if event.end_time is None if event.end_time is None
else event.end_time + padding else event.end_time + padding
) )
return recording_clip(request, event.camera, event.start_time - padding, end_ts) return await recording_clip(
request, event.camera, event.start_time - padding, end_ts
)
@router.get("/events/{event_id}/preview.gif") @router.get("/events/{event_id}/preview.gif")
@ -1191,7 +1409,10 @@ def event_preview(request: Request, event_id: str):
return preview_gif(request, event.camera, start_ts, end_ts) return preview_gif(request, event.camera, start_ts, end_ts)
@router.get("/{camera_name}/start/{start_ts}/end/{end_ts}/preview.gif") @router.get(
"/{camera_name}/start/{start_ts}/end/{end_ts}/preview.gif",
dependencies=[Depends(require_camera_access)],
)
def preview_gif( def preview_gif(
request: Request, request: Request,
camera_name: str, camera_name: str,
@ -1347,7 +1568,10 @@ def preview_gif(
) )
@router.get("/{camera_name}/start/{start_ts}/end/{end_ts}/preview.mp4") @router.get(
"/{camera_name}/start/{start_ts}/end/{end_ts}/preview.mp4",
dependencies=[Depends(require_camera_access)],
)
def preview_mp4( def preview_mp4(
request: Request, request: Request,
camera_name: str, camera_name: str,
@ -1587,9 +1811,14 @@ def preview_thumbnail(file_name: str):
####################### dynamic routes ########################### ####################### dynamic routes ###########################
@router.get("/{camera_name}/{label}/best.jpg") @router.get(
@router.get("/{camera_name}/{label}/thumbnail.jpg") "/{camera_name}/{label}/best.jpg", dependencies=[Depends(require_camera_access)]
def label_thumbnail(request: Request, camera_name: str, label: str): )
@router.get(
"/{camera_name}/{label}/thumbnail.jpg",
dependencies=[Depends(require_camera_access)],
)
async def label_thumbnail(request: Request, camera_name: str, label: str):
label = unquote(label) label = unquote(label)
event_query = Event.select(fn.MAX(Event.id)).where(Event.camera == camera_name) event_query = Event.select(fn.MAX(Event.id)).where(Event.camera == camera_name)
if label != "any": if label != "any":
@ -1598,7 +1827,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
try: try:
event_id = event_query.scalar() event_id = event_query.scalar()
return event_thumbnail(request, event_id, Extension.jpg, 60) return await event_thumbnail(request, event_id, Extension.jpg, 60)
except DoesNotExist: except DoesNotExist:
frame = np.zeros((175, 175, 3), np.uint8) frame = np.zeros((175, 175, 3), np.uint8)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
@ -1610,8 +1839,10 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
) )
@router.get("/{camera_name}/{label}/clip.mp4") @router.get(
def label_clip(request: Request, camera_name: str, label: str): "/{camera_name}/{label}/clip.mp4", dependencies=[Depends(require_camera_access)]
)
async def label_clip(request: Request, camera_name: str, label: str):
label = unquote(label) label = unquote(label)
event_query = Event.select(fn.MAX(Event.id)).where( event_query = Event.select(fn.MAX(Event.id)).where(
Event.camera == camera_name, Event.has_clip == True Event.camera == camera_name, Event.has_clip == True
@ -1622,15 +1853,17 @@ def label_clip(request: Request, camera_name: str, label: str):
try: try:
event = event_query.get() event = event_query.get()
return event_clip(request, event.id) return await event_clip(request, event.id)
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Event not found"}, status_code=404 content={"success": False, "message": "Event not found"}, status_code=404
) )
@router.get("/{camera_name}/{label}/snapshot.jpg") @router.get(
def label_snapshot(request: Request, camera_name: str, label: str): "/{camera_name}/{label}/snapshot.jpg", dependencies=[Depends(require_camera_access)]
)
async def label_snapshot(request: Request, camera_name: str, label: str):
"""Returns the snapshot image from the latest event for the given camera and label combo""" """Returns the snapshot image from the latest event for the given camera and label combo"""
label = unquote(label) label = unquote(label)
if label == "any": if label == "any":
@ -1651,7 +1884,7 @@ def label_snapshot(request: Request, camera_name: str, label: str):
try: try:
event: Event = event_query.get() event: Event = event_query.get()
return event_snapshot(request, event.id, MediaEventsSnapshotQueryParams()) return await event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
except DoesNotExist: except DoesNotExist:
frame = np.zeros((720, 1280, 3), np.uint8) frame = np.zeros((720, 1280, 3), np.uint8)
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) _, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])

View File

@ -19,7 +19,13 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.notifications]) router = APIRouter(tags=[Tags.notifications])
@router.get("/notifications/pubkey") @router.get(
"/notifications/pubkey",
summary="Get VAPID public key",
description="""Gets the VAPID public key for the notifications.
Returns the public key or an error if notifications are not enabled.
""",
)
def get_vapid_pub_key(request: Request): def get_vapid_pub_key(request: Request):
config = request.app.frigate_config config = request.app.frigate_config
notifications_enabled = config.notifications.enabled notifications_enabled = config.notifications.enabled
@ -39,7 +45,13 @@ def get_vapid_pub_key(request: Request):
return JSONResponse(content=utils.b64urlencode(raw_pub), status_code=200) return JSONResponse(content=utils.b64urlencode(raw_pub), status_code=200)
@router.post("/notifications/register") @router.post(
"/notifications/register",
summary="Register notifications",
description="""Registers a notifications subscription.
Returns a success message or an error if the subscription is not provided.
""",
)
def register_notifications(request: Request, body: dict = None): def register_notifications(request: Request, body: dict = None):
if request.app.frigate_config.auth.enabled: if request.app.frigate_config.auth.enabled:
# FIXME: For FastAPI the remote-user is not being populated # FIXME: For FastAPI the remote-user is not being populated

View File

@ -5,9 +5,14 @@ import os
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
import pytz import pytz
from fastapi import APIRouter from fastapi import APIRouter, Depends
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from frigate.api.auth import require_camera_access
from frigate.api.defs.response.preview_response import (
PreviewFramesResponse,
PreviewsResponse,
)
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.const import BASE_DIR, CACHE_DIR, PREVIEW_FRAME_TYPE from frigate.const import BASE_DIR, CACHE_DIR, PREVIEW_FRAME_TYPE
from frigate.models import Previews from frigate.models import Previews
@ -18,7 +23,16 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.preview]) router = APIRouter(tags=[Tags.preview])
@router.get("/preview/{camera_name}/start/{start_ts}/end/{end_ts}") @router.get(
"/preview/{camera_name}/start/{start_ts}/end/{end_ts}",
response_model=PreviewsResponse,
dependencies=[Depends(require_camera_access)],
summary="Get preview clips for time range",
description="""Gets all preview clips for a specified camera and time range.
Returns a list of preview video clips that overlap with the requested time period,
ordered by start time. Use camera_name='all' to get previews from all cameras.
Returns an error if no previews are found.""",
)
def preview_ts(camera_name: str, start_ts: float, end_ts: float): def preview_ts(camera_name: str, start_ts: float, end_ts: float):
"""Get all mp4 previews relevant for time period.""" """Get all mp4 previews relevant for time period."""
if camera_name != "all": if camera_name != "all":
@ -71,7 +85,16 @@ def preview_ts(camera_name: str, start_ts: float, end_ts: float):
return JSONResponse(content=clips, status_code=200) return JSONResponse(content=clips, status_code=200)
@router.get("/preview/{year_month}/{day}/{hour}/{camera_name}/{tz_name}") @router.get(
"/preview/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
response_model=PreviewsResponse,
dependencies=[Depends(require_camera_access)],
summary="Get preview clips for specific hour",
description="""Gets all preview clips for a specific hour in a given timezone.
Converts the provided date/time from the specified timezone to UTC and retrieves
all preview clips for that hour. Use camera_name='all' to get previews from all cameras.
The tz_name should be a timezone like 'America/New_York' (use commas instead of slashes).""",
)
def preview_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str): def preview_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str):
"""Get all mp4 previews relevant for time period given the timezone""" """Get all mp4 previews relevant for time period given the timezone"""
parts = year_month.split("-") parts = year_month.split("-")
@ -86,7 +109,15 @@ def preview_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name
return preview_ts(camera_name, start_ts, end_ts) return preview_ts(camera_name, start_ts, end_ts)
@router.get("/preview/{camera_name}/start/{start_ts}/end/{end_ts}/frames") @router.get(
"/preview/{camera_name}/start/{start_ts}/end/{end_ts}/frames",
response_model=PreviewFramesResponse,
dependencies=[Depends(require_camera_access)],
summary="Get cached preview frame filenames",
description="""Gets a list of cached preview frame filenames for a specific camera and time range.
Returns an array of filenames for preview frames that fall within the specified time period,
sorted in chronological order. These are individual frame images cached for quick preview display.""",
)
def get_preview_frames_from_cache(camera_name: str, start_ts: float, end_ts: float): def get_preview_frames_from_cache(camera_name: str, start_ts: float, end_ts: float):
"""Get list of cached preview frames""" """Get list of cached preview frames"""
preview_dir = os.path.join(CACHE_DIR, "preview_frames") preview_dir = os.path.join(CACHE_DIR, "preview_frames")

View File

@ -4,15 +4,21 @@ import datetime
import logging import logging
from functools import reduce from functools import reduce
from pathlib import Path from pathlib import Path
from typing import List
import pandas as pd import pandas as pd
from fastapi import APIRouter from fastapi import APIRouter, Request
from fastapi.params import Depends from fastapi.params import Depends
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from peewee import Case, DoesNotExist, IntegrityError, fn, operator from peewee import Case, DoesNotExist, IntegrityError, fn, operator
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import get_current_user, require_role from frigate.api.auth import (
get_allowed_cameras_for_filter,
get_current_user,
require_camera_access,
require_role,
)
from frigate.api.defs.query.review_query_parameters import ( from frigate.api.defs.query.review_query_parameters import (
ReviewActivityMotionQueryParams, ReviewActivityMotionQueryParams,
ReviewQueryParams, ReviewQueryParams,
@ -26,9 +32,11 @@ from frigate.api.defs.response.review_response import (
ReviewSummaryResponse, ReviewSummaryResponse,
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.embeddings import EmbeddingsContext
from frigate.models import Recordings, ReviewSegment, UserReviewStatus from frigate.models import Recordings, ReviewSegment, UserReviewStatus
from frigate.review.types import SeverityEnum from frigate.review.types import SeverityEnum
from frigate.util.builtin import get_tz_modifiers from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -39,6 +47,7 @@ router = APIRouter(tags=[Tags.review])
async def review( async def review(
params: ReviewQueryParams = Depends(), params: ReviewQueryParams = Depends(),
current_user: dict = Depends(get_current_user), current_user: dict = Depends(get_current_user),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
): ):
if isinstance(current_user, JSONResponse): if isinstance(current_user, JSONResponse):
return current_user return current_user
@ -63,8 +72,14 @@ async def review(
] ]
if cameras != "all": if cameras != "all":
camera_list = cameras.split(",") requested = set(cameras.split(","))
clauses.append((ReviewSegment.camera << camera_list)) filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
camera_list = list(filtered)
else:
camera_list = allowed_cameras
clauses.append((ReviewSegment.camera << camera_list))
if labels != "all": if labels != "all":
# use matching so segments with multiple labels # use matching so segments with multiple labels
@ -138,7 +153,7 @@ async def review(
@router.get("/review_ids", response_model=list[ReviewSegmentResponse]) @router.get("/review_ids", response_model=list[ReviewSegmentResponse])
def review_ids(ids: str): async def review_ids(request: Request, ids: str):
ids = ids.split(",") ids = ids.split(",")
if not ids: if not ids:
@ -147,6 +162,18 @@ def review_ids(ids: str):
status_code=400, status_code=400,
) )
for review_id in ids:
try:
review = ReviewSegment.get(ReviewSegment.id == review_id)
await require_camera_access(review.camera, request=request)
except DoesNotExist:
return JSONResponse(
content=(
{"success": False, "message": f"Review {review_id} not found"}
),
status_code=404,
)
try: try:
reviews = ( reviews = (
ReviewSegment.select().where(ReviewSegment.id << ids).dicts().iterator() ReviewSegment.select().where(ReviewSegment.id << ids).dicts().iterator()
@ -163,13 +190,13 @@ def review_ids(ids: str):
async def review_summary( async def review_summary(
params: ReviewSummaryQueryParams = Depends(), params: ReviewSummaryQueryParams = Depends(),
current_user: dict = Depends(get_current_user), current_user: dict = Depends(get_current_user),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
): ):
if isinstance(current_user, JSONResponse): if isinstance(current_user, JSONResponse):
return current_user return current_user
user_id = current_user["username"] user_id = current_user["username"]
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp() day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
cameras = params.cameras cameras = params.cameras
@ -179,8 +206,14 @@ async def review_summary(
clauses = [(ReviewSegment.start_time > day_ago)] clauses = [(ReviewSegment.start_time > day_ago)]
if cameras != "all": if cameras != "all":
camera_list = cameras.split(",") requested = set(cameras.split(","))
clauses.append((ReviewSegment.camera << camera_list)) filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
clauses.append((ReviewSegment.camera << camera_list))
if labels != "all": if labels != "all":
# use matching so segments with multiple labels # use matching so segments with multiple labels
@ -274,8 +307,14 @@ async def review_summary(
clauses = [] clauses = []
if cameras != "all": if cameras != "all":
camera_list = cameras.split(",") requested = set(cameras.split(","))
clauses.append((ReviewSegment.camera << camera_list)) filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
clauses.append((ReviewSegment.camera << camera_list))
if labels != "all": if labels != "all":
# use matching so segments with multiple labels # use matching so segments with multiple labels
@ -289,95 +328,142 @@ async def review_summary(
) )
clauses.append(reduce(operator.or_, label_clauses)) clauses.append(reduce(operator.or_, label_clauses))
day_in_seconds = 60 * 60 * 24 # Find the time range of available data
last_month_query = ( time_range_query = (
ReviewSegment.select( ReviewSegment.select(
fn.strftime( fn.MIN(ReviewSegment.start_time).alias("min_time"),
"%Y-%m-%d", fn.MAX(ReviewSegment.start_time).alias("max_time"),
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
) )
.where(reduce(operator.and_, clauses) if clauses else True) .where(reduce(operator.and_, clauses) if clauses else True)
.group_by( .dicts()
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds .get()
)
.order_by(ReviewSegment.start_time.desc())
) )
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
data = { data = {
"last24Hours": last_24_query, "last24Hours": last_24_query,
} }
for e in last_month_query.dicts().iterator(): # If no data, return early
data[e["day"]] = e if min_time is None or max_time is None:
return JSONResponse(content=data)
# Get DST transition periods
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
day_in_seconds = 60 * 60 * 24
# Query each DST period separately with the correct offset
for period_start, period_end, period_offset in dst_periods:
# Calculate hour/minute modifiers for this period
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
# Build clauses including time range for this period
period_clauses = clauses.copy()
period_clauses.append(
(ReviewSegment.start_time >= period_start)
& (ReviewSegment.start_time <= period_end)
)
period_query = (
ReviewSegment.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
)
.where(reduce(operator.and_, period_clauses))
.group_by(
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
)
# Merge results from this period
for e in period_query.dicts().iterator():
day_key = e["day"]
if day_key in data:
# Merge counts if day already exists (edge case at DST boundary)
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
data[day_key]["total_alert"] += e["total_alert"] or 0
data[day_key]["total_detection"] += e["total_detection"] or 0
else:
data[day_key] = e
return JSONResponse(content=data) return JSONResponse(content=data)
@router.post("/reviews/viewed", response_model=GenericResponse) @router.post("/reviews/viewed", response_model=GenericResponse)
async def set_multiple_reviewed( async def set_multiple_reviewed(
request: Request,
body: ReviewModifyMultipleBody, body: ReviewModifyMultipleBody,
current_user: dict = Depends(get_current_user), current_user: dict = Depends(get_current_user),
): ):
@ -388,26 +474,33 @@ async def set_multiple_reviewed(
for review_id in body.ids: for review_id in body.ids:
try: try:
review = ReviewSegment.get(ReviewSegment.id == review_id)
await require_camera_access(review.camera, request=request)
review_status = UserReviewStatus.get( review_status = UserReviewStatus.get(
UserReviewStatus.user_id == user_id, UserReviewStatus.user_id == user_id,
UserReviewStatus.review_segment == review_id, UserReviewStatus.review_segment == review_id,
) )
# If it exists and isnt reviewed, update it # Update based on the reviewed parameter
if not review_status.has_been_reviewed: if review_status.has_been_reviewed != body.reviewed:
review_status.has_been_reviewed = True review_status.has_been_reviewed = body.reviewed
review_status.save() review_status.save()
except DoesNotExist: except DoesNotExist:
try: try:
UserReviewStatus.create( UserReviewStatus.create(
user_id=user_id, user_id=user_id,
review_segment=ReviewSegment.get(id=review_id), review_segment=ReviewSegment.get(id=review_id),
has_been_reviewed=True, has_been_reviewed=body.reviewed,
) )
except (DoesNotExist, IntegrityError): except (DoesNotExist, IntegrityError):
pass pass
return JSONResponse( return JSONResponse(
content=({"success": True, "message": "Reviewed multiple items"}), content=(
{
"success": True,
"message": f"Marked multiple items as {'reviewed' if body.reviewed else 'unreviewed'}",
}
),
status_code=200, status_code=200,
) )
@ -469,7 +562,10 @@ def delete_reviews(body: ReviewModifyMultipleBody):
@router.get( @router.get(
"/review/activity/motion", response_model=list[ReviewActivityMotionResponse] "/review/activity/motion", response_model=list[ReviewActivityMotionResponse]
) )
def motion_activity(params: ReviewActivityMotionQueryParams = Depends()): def motion_activity(
params: ReviewActivityMotionQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get motion and audio activity.""" """Get motion and audio activity."""
cameras = params.cameras cameras = params.cameras
before = params.before or datetime.datetime.now().timestamp() before = params.before or datetime.datetime.now().timestamp()
@ -484,8 +580,14 @@ def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
clauses.append((Recordings.motion > 0)) clauses.append((Recordings.motion > 0))
if cameras != "all": if cameras != "all":
camera_list = cameras.split(",") requested = set(cameras.split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
camera_list = list(filtered)
clauses.append((Recordings.camera << camera_list)) clauses.append((Recordings.camera << camera_list))
else:
clauses.append((Recordings.camera << allowed_cameras))
data: list[Recordings] = ( data: list[Recordings] = (
Recordings.select( Recordings.select(
@ -543,15 +645,13 @@ def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
@router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse) @router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse)
def get_review_from_event(event_id: str): async def get_review_from_event(request: Request, event_id: str):
try: try:
return JSONResponse( review = ReviewSegment.get(
model_to_dict( ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
ReviewSegment.get(
ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
)
)
) )
await require_camera_access(review.camera, request=request)
return JSONResponse(model_to_dict(review))
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Review item not found"}, content={"success": False, "message": "Review item not found"},
@ -560,11 +660,11 @@ def get_review_from_event(event_id: str):
@router.get("/review/{review_id}", response_model=ReviewSegmentResponse) @router.get("/review/{review_id}", response_model=ReviewSegmentResponse)
def get_review(review_id: str): async def get_review(request: Request, review_id: str):
try: try:
return JSONResponse( review = ReviewSegment.get(ReviewSegment.id == review_id)
content=model_to_dict(ReviewSegment.get(ReviewSegment.id == review_id)) await require_camera_access(review.camera, request=request)
) return JSONResponse(content=model_to_dict(review))
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Review item not found"}, content={"success": False, "message": "Review item not found"},
@ -606,3 +706,35 @@ async def set_not_reviewed(
content=({"success": True, "message": f"Set Review {review_id} as not viewed"}), content=({"success": True, "message": f"Set Review {review_id} as not viewed"}),
status_code=200, status_code=200,
) )
@router.post(
"/review/summarize/start/{start_ts}/end/{end_ts}",
description="Use GenAI to summarize review items over a period of time.",
)
def generate_review_summary(request: Request, start_ts: float, end_ts: float):
config: FrigateConfig = request.app.frigate_config
if not config.genai.provider:
return JSONResponse(
content=(
{
"success": False,
"message": "GenAI must be configured to use this feature.",
}
),
status_code=400,
)
context: EmbeddingsContext = request.app.embeddings
summary = context.generate_review_summary(start_ts, end_ts)
if summary:
return JSONResponse(
content=({"success": True, "summary": summary}), status_code=200
)
else:
return JSONResponse(
content=({"success": False, "message": "Failed to create summary."}),
status_code=500,
)

View File

@ -5,6 +5,7 @@ import os
import secrets import secrets
import shutil import shutil
from multiprocessing import Queue from multiprocessing import Queue
from multiprocessing.managers import DictProxy, SyncManager
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
@ -14,19 +15,20 @@ import uvicorn
from peewee_migrate import Router from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase from playhouse.sqlite_ext import SqliteExtDatabase
import frigate.util as util
from frigate.api.auth import hash_password from frigate.api.auth import hash_password
from frigate.api.fastapi_app import create_fastapi_app from frigate.api.fastapi_app import create_fastapi_app
from frigate.camera import CameraMetrics, PTZMetrics from frigate.camera import CameraMetrics, PTZMetrics
from frigate.camera.maintainer import CameraMaintainer
from frigate.comms.base_communicator import Communicator from frigate.comms.base_communicator import Communicator
from frigate.comms.config_updater import ConfigPublisher
from frigate.comms.dispatcher import Dispatcher from frigate.comms.dispatcher import Dispatcher
from frigate.comms.event_metadata_updater import EventMetadataPublisher from frigate.comms.event_metadata_updater import EventMetadataPublisher
from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.inter_process import InterProcessCommunicator
from frigate.comms.mqtt import MqttClient from frigate.comms.mqtt import MqttClient
from frigate.comms.object_detector_signaler import DetectorProxy
from frigate.comms.webpush import WebPushClient from frigate.comms.webpush import WebPushClient
from frigate.comms.ws import WebSocketClient from frigate.comms.ws import WebSocketClient
from frigate.comms.zmq_proxy import ZmqProxy from frigate.comms.zmq_proxy import ZmqProxy
from frigate.config.camera.updater import CameraConfigUpdatePublisher
from frigate.config.config import FrigateConfig from frigate.config.config import FrigateConfig
from frigate.const import ( from frigate.const import (
CACHE_DIR, CACHE_DIR,
@ -36,12 +38,12 @@ from frigate.const import (
FACE_DIR, FACE_DIR,
MODEL_CACHE_DIR, MODEL_CACHE_DIR,
RECORD_DIR, RECORD_DIR,
SHM_FRAMES_VAR,
THUMB_DIR, THUMB_DIR,
TRIGGER_DIR,
) )
from frigate.data_processing.types import DataProcessorMetrics from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
from frigate.events.audio import AudioProcessor from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup from frigate.events.cleanup import EventCleanup
from frigate.events.maintainer import EventProcessor from frigate.events.maintainer import EventProcessor
@ -55,56 +57,58 @@ from frigate.models import (
Regions, Regions,
ReviewSegment, ReviewSegment,
Timeline, Timeline,
Trigger,
User, User,
) )
from frigate.object_detection.base import ObjectDetectProcess from frigate.object_detection.base import ObjectDetectProcess
from frigate.output.output import output_frames from frigate.output.output import OutputProcess
from frigate.ptz.autotrack import PtzAutoTrackerThread from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
from frigate.record.cleanup import RecordingCleanup from frigate.record.cleanup import RecordingCleanup
from frigate.record.export import migrate_exports from frigate.record.export import migrate_exports
from frigate.record.record import manage_recordings from frigate.record.record import RecordProcess
from frigate.review.review import manage_review_segments from frigate.review.review import ReviewProcess
from frigate.stats.emitter import StatsEmitter from frigate.stats.emitter import StatsEmitter
from frigate.stats.util import stats_init from frigate.stats.util import stats_init
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor from frigate.timeline import TimelineProcessor
from frigate.track.object_processing import TrackedObjectProcessor from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.builtin import empty_and_close_queue from frigate.util.builtin import empty_and_close_queue
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.image import UntrackedSharedMemory
from frigate.util.object import get_camera_regions_grid
from frigate.util.services import set_file_limit from frigate.util.services import set_file_limit
from frigate.version import VERSION from frigate.version import VERSION
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog from frigate.watchdog import FrigateWatchdog
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class FrigateApp: class FrigateApp:
def __init__(self, config: FrigateConfig) -> None: def __init__(
self, config: FrigateConfig, manager: SyncManager, stop_event: MpEvent
) -> None:
self.metrics_manager = manager
self.audio_process: Optional[mp.Process] = None self.audio_process: Optional[mp.Process] = None
self.stop_event: MpEvent = mp.Event() self.stop_event = stop_event
self.detection_queue: Queue = mp.Queue() self.detection_queue: Queue = mp.Queue()
self.detectors: dict[str, ObjectDetectProcess] = {} self.detectors: dict[str, ObjectDetectProcess] = {}
self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue() self.log_queue: Queue = mp.Queue()
self.camera_metrics: dict[str, CameraMetrics] = {} self.camera_metrics: DictProxy = self.metrics_manager.dict()
self.embeddings_metrics: DataProcessorMetrics | None = ( self.embeddings_metrics: DataProcessorMetrics | None = (
DataProcessorMetrics() DataProcessorMetrics(
self.metrics_manager, list(config.classification.custom.keys())
)
if ( if (
config.semantic_search.enabled config.semantic_search.enabled
or config.lpr.enabled or config.lpr.enabled
or config.face_recognition.enabled or config.face_recognition.enabled
or len(config.classification.custom) > 0
) )
else None else None
) )
self.ptz_metrics: dict[str, PTZMetrics] = {} self.ptz_metrics: dict[str, PTZMetrics] = {}
self.processes: dict[str, int] = {} self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None self.embeddings: Optional[EmbeddingsContext] = None
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
self.frame_manager = SharedMemoryFrameManager()
self.config = config self.config = config
def ensure_dirs(self) -> None: def ensure_dirs(self) -> None:
@ -121,6 +125,9 @@ class FrigateApp:
if self.config.face_recognition.enabled: if self.config.face_recognition.enabled:
dirs.append(FACE_DIR) dirs.append(FACE_DIR)
if self.config.semantic_search.enabled:
dirs.append(TRIGGER_DIR)
for d in dirs: for d in dirs:
if not os.path.exists(d) and not os.path.islink(d): if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}") logger.info(f"Creating directory: {d}")
@ -131,7 +138,7 @@ class FrigateApp:
def init_camera_metrics(self) -> None: def init_camera_metrics(self) -> None:
# create camera_metrics # create camera_metrics
for camera_name in self.config.cameras.keys(): for camera_name in self.config.cameras.keys():
self.camera_metrics[camera_name] = CameraMetrics() self.camera_metrics[camera_name] = CameraMetrics(self.metrics_manager)
self.ptz_metrics[camera_name] = PTZMetrics( self.ptz_metrics[camera_name] = PTZMetrics(
autotracker_enabled=self.config.cameras[ autotracker_enabled=self.config.cameras[
camera_name camera_name
@ -140,8 +147,16 @@ class FrigateApp:
def init_queues(self) -> None: def init_queues(self) -> None:
# Queue for cameras to push tracked objects to # Queue for cameras to push tracked objects to
# leaving room for 2 extra cameras to be added
self.detected_frames_queue: Queue = mp.Queue( self.detected_frames_queue: Queue = mp.Queue(
maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2 maxsize=(
sum(
camera.enabled_in_config == True
for camera in self.config.cameras.values()
)
+ 2
)
* 2
) )
# Queue for timeline events # Queue for timeline events
@ -217,52 +232,24 @@ class FrigateApp:
self.processes["go2rtc"] = proc.info["pid"] self.processes["go2rtc"] = proc.info["pid"]
def init_recording_manager(self) -> None: def init_recording_manager(self) -> None:
recording_process = util.Process( recording_process = RecordProcess(self.config, self.stop_event)
target=manage_recordings,
name="recording_manager",
args=(self.config,),
)
recording_process.daemon = True
self.recording_process = recording_process self.recording_process = recording_process
recording_process.start() recording_process.start()
self.processes["recording"] = recording_process.pid or 0 self.processes["recording"] = recording_process.pid or 0
logger.info(f"Recording process started: {recording_process.pid}") logger.info(f"Recording process started: {recording_process.pid}")
def init_review_segment_manager(self) -> None: def init_review_segment_manager(self) -> None:
review_segment_process = util.Process( review_segment_process = ReviewProcess(self.config, self.stop_event)
target=manage_review_segments,
name="review_segment_manager",
args=(self.config,),
)
review_segment_process.daemon = True
self.review_segment_process = review_segment_process self.review_segment_process = review_segment_process
review_segment_process.start() review_segment_process.start()
self.processes["review_segment"] = review_segment_process.pid or 0 self.processes["review_segment"] = review_segment_process.pid or 0
logger.info(f"Review process started: {review_segment_process.pid}") logger.info(f"Review process started: {review_segment_process.pid}")
def init_embeddings_manager(self) -> None: def init_embeddings_manager(self) -> None:
genai_cameras = [ # always start the embeddings process
c for c in self.config.cameras.values() if c.enabled and c.genai.enabled embedding_process = EmbeddingProcess(
] self.config, self.embeddings_metrics, self.stop_event
if (
not self.config.semantic_search.enabled
and not genai_cameras
and not self.config.lpr.enabled
and not self.config.face_recognition.enabled
and not self.config.classification.bird.enabled
):
return
embedding_process = util.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(
self.config,
self.embeddings_metrics,
),
) )
embedding_process.daemon = True
self.embedding_process = embedding_process self.embedding_process = embedding_process
embedding_process.start() embedding_process.start()
self.processes["embeddings"] = embedding_process.pid or 0 self.processes["embeddings"] = embedding_process.pid or 0
@ -279,7 +266,9 @@ class FrigateApp:
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
}, },
timeout=max( timeout=max(
60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) 60,
10
* len([c for c in self.config.cameras.values() if c.enabled_in_config]),
), ),
load_vec_extension=self.config.semantic_search.enabled, load_vec_extension=self.config.semantic_search.enabled,
) )
@ -293,6 +282,7 @@ class FrigateApp:
ReviewSegment, ReviewSegment,
Timeline, Timeline,
User, User,
Trigger,
] ]
self.db.bind(models) self.db.bind(models)
@ -308,24 +298,15 @@ class FrigateApp:
migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys())) migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys()))
def init_embeddings_client(self) -> None: def init_embeddings_client(self) -> None:
genai_cameras = [ # Create a client for other processes to use
c for c in self.config.cameras.values() if c.enabled and c.genai.enabled self.embeddings = EmbeddingsContext(self.db)
]
if (
self.config.semantic_search.enabled
or self.config.lpr.enabled
or genai_cameras
or self.config.face_recognition.enabled
):
# Create a client for other processes to use
self.embeddings = EmbeddingsContext(self.db)
def init_inter_process_communicator(self) -> None: def init_inter_process_communicator(self) -> None:
self.inter_process_communicator = InterProcessCommunicator() self.inter_process_communicator = InterProcessCommunicator()
self.inter_config_updater = ConfigPublisher() self.inter_config_updater = CameraConfigUpdatePublisher()
self.event_metadata_updater = EventMetadataPublisher() self.event_metadata_updater = EventMetadataPublisher()
self.inter_zmq_proxy = ZmqProxy() self.inter_zmq_proxy = ZmqProxy()
self.detection_proxy = DetectorProxy()
def init_onvif(self) -> None: def init_onvif(self) -> None:
self.onvif_controller = OnvifController(self.config, self.ptz_metrics) self.onvif_controller = OnvifController(self.config, self.ptz_metrics)
@ -358,8 +339,6 @@ class FrigateApp:
def start_detectors(self) -> None: def start_detectors(self) -> None:
for name in self.config.cameras.keys(): for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
try: try:
largest_frame = max( largest_frame = max(
[ [
@ -391,8 +370,10 @@ class FrigateApp:
self.detectors[name] = ObjectDetectProcess( self.detectors[name] = ObjectDetectProcess(
name, name,
self.detection_queue, self.detection_queue,
self.detection_out_events, list(self.config.cameras.keys()),
self.config,
detector_config, detector_config,
self.stop_event,
) )
def start_ptz_autotracker(self) -> None: def start_ptz_autotracker(self) -> None:
@ -416,79 +397,22 @@ class FrigateApp:
self.detected_frames_processor.start() self.detected_frames_processor.start()
def start_video_output_processor(self) -> None: def start_video_output_processor(self) -> None:
output_processor = util.Process( output_processor = OutputProcess(self.config, self.stop_event)
target=output_frames,
name="output_processor",
args=(self.config,),
)
output_processor.daemon = True
self.output_processor = output_processor self.output_processor = output_processor
output_processor.start() output_processor.start()
logger.info(f"Output process started: {output_processor.pid}") logger.info(f"Output process started: {output_processor.pid}")
def init_historical_regions(self) -> None: def start_camera_processor(self) -> None:
# delete region grids for removed or renamed cameras self.camera_maintainer = CameraMaintainer(
cameras = list(self.config.cameras.keys()) self.config,
Regions.delete().where(~(Regions.camera << cameras)).execute() self.detection_queue,
self.detected_frames_queue,
# create or update region grids for each camera self.camera_metrics,
for camera in self.config.cameras.values(): self.ptz_metrics,
assert camera.name is not None self.stop_event,
self.region_grids[camera.name] = get_camera_regions_grid( self.metrics_manager,
camera.name, )
camera.detect, self.camera_maintainer.start()
max(self.config.model.width, self.config.model.height),
)
def start_camera_processors(self) -> None:
for name, config in self.config.cameras.items():
if not self.config.cameras[name].enabled_in_config:
logger.info(f"Camera processor not started for disabled camera {name}")
continue
camera_process = util.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
self.config.model,
self.config.model.merged_labelmap,
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
self.camera_metrics[name],
self.ptz_metrics[name],
self.region_grids[name],
),
daemon=True,
)
self.camera_metrics[name].process = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self) -> None:
shm_frame_count = self.shm_frame_count()
for name, config in self.config.cameras.items():
if not self.config.cameras[name].enabled_in_config:
logger.info(f"Capture process not started for disabled camera {name}")
continue
# pre-create shms
for i in range(shm_frame_count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
capture_process = util.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, shm_frame_count, self.camera_metrics[name]),
)
capture_process.daemon = True
self.camera_metrics[name].capture_process = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_audio_processor(self) -> None: def start_audio_processor(self) -> None:
audio_cameras = [ audio_cameras = [
@ -498,7 +422,9 @@ class FrigateApp:
] ]
if audio_cameras: if audio_cameras:
self.audio_process = AudioProcessor(audio_cameras, self.camera_metrics) self.audio_process = AudioProcessor(
self.config, audio_cameras, self.camera_metrics, self.stop_event
)
self.audio_process.start() self.audio_process.start()
self.processes["audio_detector"] = self.audio_process.pid or 0 self.processes["audio_detector"] = self.audio_process.pid or 0
@ -546,45 +472,6 @@ class FrigateApp:
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event) self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start() self.frigate_watchdog.start()
def shm_frame_count(self) -> int:
total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1)
# required for log files + nginx cache
min_req_shm = 40 + 10
if self.config.birdseye.restream:
min_req_shm += 8
available_shm = total_shm - min_req_shm
cam_total_frame_size = 0.0
for camera in self.config.cameras.values():
if camera.enabled and camera.detect.width and camera.detect.height:
cam_total_frame_size += round(
(camera.detect.width * camera.detect.height * 1.5 + 270480)
/ 1048576,
1,
)
if cam_total_frame_size == 0.0:
return 0
shm_frame_count = min(
int(os.environ.get(SHM_FRAMES_VAR, "50")),
int(available_shm / (cam_total_frame_size)),
)
logger.debug(
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
)
if shm_frame_count < 20:
logger.warning(
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB."
)
return shm_frame_count
def init_auth(self) -> None: def init_auth(self) -> None:
if self.config.auth.enabled: if self.config.auth.enabled:
if User.select().count() == 0: if User.select().count() == 0:
@ -601,6 +488,8 @@ class FrigateApp:
} }
).execute() ).execute()
self.config.auth.admin_first_time_login = True
logger.info("********************************************************") logger.info("********************************************************")
logger.info("********************************************************") logger.info("********************************************************")
logger.info("*** Auth is enabled, but no users exist. ***") logger.info("*** Auth is enabled, but no users exist. ***")
@ -645,19 +534,17 @@ class FrigateApp:
self.init_recording_manager() self.init_recording_manager()
self.init_review_segment_manager() self.init_review_segment_manager()
self.init_go2rtc() self.init_go2rtc()
self.start_detectors()
self.init_embeddings_manager() self.init_embeddings_manager()
self.bind_database() self.bind_database()
self.check_db_data_migrations() self.check_db_data_migrations()
self.init_inter_process_communicator() self.init_inter_process_communicator()
self.start_detectors()
self.init_dispatcher() self.init_dispatcher()
self.init_embeddings_client() self.init_embeddings_client()
self.start_video_output_processor() self.start_video_output_processor()
self.start_ptz_autotracker() self.start_ptz_autotracker()
self.init_historical_regions()
self.start_detected_frames_processor() self.start_detected_frames_processor()
self.start_camera_processors() self.start_camera_processor()
self.start_camera_capture_processes()
self.start_audio_processor() self.start_audio_processor()
self.start_storage_maintainer() self.start_storage_maintainer()
self.start_stats_emitter() self.start_stats_emitter()
@ -680,6 +567,7 @@ class FrigateApp:
self.onvif_controller, self.onvif_controller,
self.stats_emitter, self.stats_emitter,
self.event_metadata_updater, self.event_metadata_updater,
self.inter_config_updater,
), ),
host="127.0.0.1", host="127.0.0.1",
port=5001, port=5001,
@ -713,24 +601,6 @@ class FrigateApp:
if self.onvif_controller: if self.onvif_controller:
self.onvif_controller.close() self.onvif_controller.close()
# ensure the capture processes are done
for camera, metrics in self.camera_metrics.items():
capture_process = metrics.capture_process
if capture_process is not None:
logger.info(f"Waiting for capture process for {camera} to stop")
capture_process.terminate()
capture_process.join()
# ensure the camera processors are done
for camera, metrics in self.camera_metrics.items():
camera_process = metrics.process
if camera_process is not None:
logger.info(f"Waiting for process for {camera} to stop")
camera_process.terminate()
camera_process.join()
logger.info(f"Closing frame queue for {camera}")
empty_and_close_queue(metrics.frame_queue)
# ensure the detectors are done # ensure the detectors are done
for detector in self.detectors.values(): for detector in self.detectors.values():
detector.stop() detector.stop()
@ -774,14 +644,12 @@ class FrigateApp:
self.inter_config_updater.stop() self.inter_config_updater.stop()
self.event_metadata_updater.stop() self.event_metadata_updater.stop()
self.inter_zmq_proxy.stop() self.inter_zmq_proxy.stop()
self.detection_proxy.stop()
self.frame_manager.cleanup()
while len(self.detection_shms) > 0: while len(self.detection_shms) > 0:
shm = self.detection_shms.pop() shm = self.detection_shms.pop()
shm.close() shm.close()
shm.unlink() shm.unlink()
# exit the mp Manager process
_stop_logging() _stop_logging()
self.metrics_manager.shutdown()
os._exit(os.EX_OK)

View File

@ -1,7 +1,7 @@
import multiprocessing as mp import multiprocessing as mp
from multiprocessing.managers import SyncManager
from multiprocessing.sharedctypes import Synchronized from multiprocessing.sharedctypes import Synchronized
from multiprocessing.synchronize import Event from multiprocessing.synchronize import Event
from typing import Optional
class CameraMetrics: class CameraMetrics:
@ -16,25 +16,25 @@ class CameraMetrics:
frame_queue: mp.Queue frame_queue: mp.Queue
process: Optional[mp.Process] process_pid: Synchronized
capture_process: Optional[mp.Process] capture_process_pid: Synchronized
ffmpeg_pid: Synchronized ffmpeg_pid: Synchronized
def __init__(self): def __init__(self, manager: SyncManager):
self.camera_fps = mp.Value("d", 0) self.camera_fps = manager.Value("d", 0)
self.detection_fps = mp.Value("d", 0) self.detection_fps = manager.Value("d", 0)
self.detection_frame = mp.Value("d", 0) self.detection_frame = manager.Value("d", 0)
self.process_fps = mp.Value("d", 0) self.process_fps = manager.Value("d", 0)
self.skipped_fps = mp.Value("d", 0) self.skipped_fps = manager.Value("d", 0)
self.read_start = mp.Value("d", 0) self.read_start = manager.Value("d", 0)
self.audio_rms = mp.Value("d", 0) self.audio_rms = manager.Value("d", 0)
self.audio_dBFS = mp.Value("d", 0) self.audio_dBFS = manager.Value("d", 0)
self.frame_queue = mp.Queue(maxsize=2) self.frame_queue = manager.Queue(maxsize=2)
self.process = None self.process_pid = manager.Value("i", 0)
self.capture_process = None self.capture_process_pid = manager.Value("i", 0)
self.ffmpeg_pid = mp.Value("i", 0) self.ffmpeg_pid = manager.Value("i", 0)
class PTZMetrics: class PTZMetrics:

View File

@ -1,9 +1,20 @@
"""Manage camera activity and updating listeners.""" """Manage camera activity and updating listeners."""
import datetime
import json
import logging
import random
import string
from collections import Counter from collections import Counter
from typing import Any, Callable from typing import Any, Callable
from frigate.config.config import FrigateConfig from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.config import CameraConfig, FrigateConfig
logger = logging.getLogger(__name__)
class CameraActivityManager: class CameraActivityManager:
@ -23,26 +34,33 @@ class CameraActivityManager:
if not camera_config.enabled_in_config: if not camera_config.enabled_in_config:
continue continue
self.last_camera_activity[camera_config.name] = {} self.__init_camera(camera_config)
self.camera_all_object_counts[camera_config.name] = Counter()
self.camera_active_object_counts[camera_config.name] = Counter()
for zone, zone_config in camera_config.zones.items(): def __init_camera(self, camera_config: CameraConfig) -> None:
if zone not in self.all_zone_labels: self.last_camera_activity[camera_config.name] = {}
self.zone_all_object_counts[zone] = Counter() self.camera_all_object_counts[camera_config.name] = Counter()
self.zone_active_object_counts[zone] = Counter() self.camera_active_object_counts[camera_config.name] = Counter()
self.all_zone_labels[zone] = set()
self.all_zone_labels[zone].update( for zone, zone_config in camera_config.zones.items():
zone_config.objects if zone not in self.all_zone_labels:
if zone_config.objects self.zone_all_object_counts[zone] = Counter()
else camera_config.objects.track self.zone_active_object_counts[zone] = Counter()
) self.all_zone_labels[zone] = set()
self.all_zone_labels[zone].update(
zone_config.objects
if zone_config.objects
else camera_config.objects.track
)
def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None: def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None:
all_objects: list[dict[str, Any]] = [] all_objects: list[dict[str, Any]] = []
for camera in new_activity.keys(): for camera in new_activity.keys():
# handle cameras that were added dynamically
if camera not in self.camera_all_object_counts:
self.__init_camera(self.config.cameras[camera])
new_objects = new_activity[camera].get("objects", []) new_objects = new_activity[camera].get("objects", [])
all_objects.extend(new_objects) all_objects.extend(new_objects)
@ -132,3 +150,110 @@ class CameraActivityManager:
if any_changed: if any_changed:
self.publish(f"{camera}/all", sum(list(all_objects.values()))) self.publish(f"{camera}/all", sum(list(all_objects.values())))
self.publish(f"{camera}/all/active", sum(list(active_objects.values()))) self.publish(f"{camera}/all/active", sum(list(active_objects.values())))
class AudioActivityManager:
def __init__(
self, config: FrigateConfig, publish: Callable[[str, Any], None]
) -> None:
self.config = config
self.publish = publish
self.current_audio_detections: dict[str, dict[str, dict[str, Any]]] = {}
self.event_metadata_publisher = EventMetadataPublisher()
for camera_config in config.cameras.values():
if not camera_config.audio.enabled_in_config:
continue
self.__init_camera(camera_config)
def __init_camera(self, camera_config: CameraConfig) -> None:
self.current_audio_detections[camera_config.name] = {}
def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None:
now = datetime.datetime.now().timestamp()
for camera in new_activity.keys():
# handle cameras that were added dynamically
if camera not in self.current_audio_detections:
self.__init_camera(self.config.cameras[camera])
new_detections = new_activity[camera].get("detections", [])
if self.compare_audio_activity(camera, new_detections, now):
logger.debug(f"Audio detections for {camera}: {new_activity}")
self.publish(
f"{camera}/audio/all",
"ON" if len(self.current_audio_detections[camera]) > 0 else "OFF",
)
self.publish(
"audio_detections",
json.dumps(self.current_audio_detections),
)
def compare_audio_activity(
self, camera: str, new_detections: list[tuple[str, float]], now: float
) -> None:
max_not_heard = self.config.cameras[camera].audio.max_not_heard
current = self.current_audio_detections[camera]
any_changed = False
for label, score in new_detections:
any_changed = True
if label in current:
current[label]["last_detection"] = now
current[label]["score"] = score
else:
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
event_id = f"{now}-{rand_id}"
self.publish(f"{camera}/audio/{label}", "ON")
self.event_metadata_publisher.publish(
(
now,
camera,
label,
event_id,
True,
score,
None,
None,
"audio",
{},
),
EventMetadataTypeEnum.manual_event_create.value,
)
current[label] = {
"id": event_id,
"score": score,
"last_detection": now,
}
# expire detections
for label in list(current.keys()):
if now - current[label]["last_detection"] > max_not_heard:
any_changed = True
self.publish(f"{camera}/audio/{label}", "OFF")
self.event_metadata_publisher.publish(
(current[label]["id"], now),
EventMetadataTypeEnum.manual_event_end.value,
)
del current[label]
return any_changed
def expire_all(self, camera: str) -> None:
now = datetime.datetime.now().timestamp()
current = self.current_audio_detections.get(camera, {})
for label in list(current.keys()):
self.publish(f"{camera}/audio/{label}", "OFF")
self.event_metadata_publisher.publish(
(current[label]["id"], now),
EventMetadataTypeEnum.manual_event_end.value,
)
del current[label]

View File

@ -0,0 +1,225 @@
"""Create and maintain camera processes / management."""
import logging
import multiprocessing as mp
import threading
from multiprocessing import Queue
from multiprocessing.managers import DictProxy, SyncManager
from multiprocessing.synchronize import Event as MpEvent
from frigate.camera import CameraMetrics, PTZMetrics
from frigate.config import FrigateConfig
from frigate.config.camera import CameraConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber,
)
from frigate.models import Regions
from frigate.util.builtin import empty_and_close_queue
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.object import get_camera_regions_grid
from frigate.util.services import calculate_shm_requirements
from frigate.video import CameraCapture, CameraTracker
logger = logging.getLogger(__name__)
class CameraMaintainer(threading.Thread):
def __init__(
self,
config: FrigateConfig,
detection_queue: Queue,
detected_frames_queue: Queue,
camera_metrics: DictProxy,
ptz_metrics: dict[str, PTZMetrics],
stop_event: MpEvent,
metrics_manager: SyncManager,
):
super().__init__(name="camera_processor")
self.config = config
self.detection_queue = detection_queue
self.detected_frames_queue = detected_frames_queue
self.stop_event = stop_event
self.camera_metrics = camera_metrics
self.ptz_metrics = ptz_metrics
self.frame_manager = SharedMemoryFrameManager()
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
self.update_subscriber = CameraConfigUpdateSubscriber(
self.config,
{},
[
CameraConfigUpdateEnum.add,
CameraConfigUpdateEnum.remove,
],
)
self.shm_count = self.__calculate_shm_frame_count()
self.camera_processes: dict[str, mp.Process] = {}
self.capture_processes: dict[str, mp.Process] = {}
self.metrics_manager = metrics_manager
def __init_historical_regions(self) -> None:
# delete region grids for removed or renamed cameras
cameras = list(self.config.cameras.keys())
Regions.delete().where(~(Regions.camera << cameras)).execute()
# create or update region grids for each camera
for camera in self.config.cameras.values():
assert camera.name is not None
self.region_grids[camera.name] = get_camera_regions_grid(
camera.name,
camera.detect,
max(self.config.model.width, self.config.model.height),
)
def __calculate_shm_frame_count(self) -> int:
shm_stats = calculate_shm_requirements(self.config)
if not shm_stats:
# /dev/shm not available
return 0
logger.debug(
f"Calculated total camera size {shm_stats['available']} / "
f"{shm_stats['camera_frame_size']} :: {shm_stats['shm_frame_count']} "
f"frames for each camera in SHM"
)
if shm_stats["shm_frame_count"] < 20:
logger.warning(
f"The current SHM size of {shm_stats['total']}MB is too small, "
f"recommend increasing it to at least {shm_stats['min_shm']}MB."
)
return shm_stats["shm_frame_count"]
def __start_camera_processor(
self, name: str, config: CameraConfig, runtime: bool = False
) -> None:
if not config.enabled_in_config:
logger.info(f"Camera processor not started for disabled camera {name}")
return
if runtime:
self.camera_metrics[name] = CameraMetrics(self.metrics_manager)
self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False)
self.region_grids[name] = get_camera_regions_grid(
name,
config.detect,
max(self.config.model.width, self.config.model.height),
)
try:
largest_frame = max(
[
det.model.height * det.model.width * 3
if det.model is not None
else 320
for det in self.config.detectors.values()
]
)
UntrackedSharedMemory(name=f"out-{name}", create=True, size=20 * 6 * 4)
UntrackedSharedMemory(
name=name,
create=True,
size=largest_frame,
)
except FileExistsError:
pass
camera_process = CameraTracker(
config,
self.config.model,
self.config.model.merged_labelmap,
self.detection_queue,
self.detected_frames_queue,
self.camera_metrics[name],
self.ptz_metrics[name],
self.region_grids[name],
self.stop_event,
self.config.logger,
)
self.camera_processes[config.name] = camera_process
camera_process.start()
self.camera_metrics[config.name].process_pid.value = camera_process.pid
logger.info(f"Camera processor started for {config.name}: {camera_process.pid}")
def __start_camera_capture(
self, name: str, config: CameraConfig, runtime: bool = False
) -> None:
if not config.enabled_in_config:
logger.info(f"Capture process not started for disabled camera {name}")
return
# pre-create shms
count = 10 if runtime else self.shm_count
for i in range(count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
capture_process = CameraCapture(
config,
count,
self.camera_metrics[name],
self.stop_event,
self.config.logger,
)
capture_process.daemon = True
self.capture_processes[name] = capture_process
capture_process.start()
self.camera_metrics[name].capture_process_pid.value = capture_process.pid
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def __stop_camera_capture_process(self, camera: str) -> None:
capture_process = self.capture_processes[camera]
if capture_process is not None:
logger.info(f"Waiting for capture process for {camera} to stop")
capture_process.terminate()
capture_process.join()
def __stop_camera_process(self, camera: str) -> None:
camera_process = self.camera_processes[camera]
if camera_process is not None:
logger.info(f"Waiting for process for {camera} to stop")
camera_process.terminate()
camera_process.join()
logger.info(f"Closing frame queue for {camera}")
empty_and_close_queue(self.camera_metrics[camera].frame_queue)
def run(self):
self.__init_historical_regions()
# start camera processes
for camera, config in self.config.cameras.items():
self.__start_camera_processor(camera, config)
self.__start_camera_capture(camera, config)
while not self.stop_event.wait(1):
updates = self.update_subscriber.check_for_updates()
for update_type, updated_cameras in updates.items():
if update_type == CameraConfigUpdateEnum.add.name:
for camera in updated_cameras:
self.__start_camera_processor(
camera,
self.update_subscriber.camera_configs[camera],
runtime=True,
)
self.__start_camera_capture(
camera,
self.update_subscriber.camera_configs[camera],
runtime=True,
)
elif update_type == CameraConfigUpdateEnum.remove.name:
self.__stop_camera_capture_process(camera)
self.__stop_camera_process(camera)
# ensure the capture processes are done
for camera in self.camera_processes.keys():
self.__stop_camera_capture_process(camera)
# ensure the camera processors are done
for camera in self.capture_processes.keys():
self.__stop_camera_process(camera)
self.update_subscriber.stop()
self.frame_manager.cleanup()

View File

@ -54,7 +54,7 @@ class CameraState:
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.prev_enabled = self.camera_config.enabled self.prev_enabled = self.camera_config.enabled
def get_current_frame(self, draw_options: dict[str, Any] = {}): def get_current_frame(self, draw_options: dict[str, Any] = {}) -> np.ndarray:
with self.current_frame_lock: with self.current_frame_lock:
frame_copy = np.copy(self._current_frame) frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time frame_time = self.current_frame_time
@ -228,12 +228,51 @@ class CameraState:
position=self.camera_config.timestamp_style.position, position=self.camera_config.timestamp_style.position,
) )
if draw_options.get("paths"):
for obj in tracked_objects.values():
if obj["frame_time"] == frame_time and obj["path_data"]:
color = self.config.model.colormap.get(
obj["label"], (255, 255, 255)
)
path_points = [
(
int(point[0][0] * self.camera_config.detect.width),
int(point[0][1] * self.camera_config.detect.height),
)
for point in obj["path_data"]
]
for point in path_points:
cv2.circle(frame_copy, point, 5, color, -1)
for i in range(1, len(path_points)):
cv2.line(
frame_copy,
path_points[i - 1],
path_points[i],
color,
2,
)
bottom_center = (
int((obj["box"][0] + obj["box"][2]) / 2),
int(obj["box"][3]),
)
cv2.line(
frame_copy,
path_points[-1],
bottom_center,
color,
2,
)
return frame_copy return frame_copy
def finished(self, obj_id): def finished(self, obj_id):
del self.tracked_objects[obj_id] del self.tracked_objects[obj_id]
def on(self, event_type: str, callback: Callable[[dict], None]): def on(self, event_type: str, callback: Callable):
self.callbacks[event_type].append(callback) self.callbacks[event_type].append(callback)
def update( def update(
@ -491,17 +530,19 @@ class CameraState:
# write clean snapshot if enabled # write clean snapshot if enabled
if self.camera_config.snapshots.clean_copy: if self.camera_config.snapshots.clean_copy:
ret, png = cv2.imencode(".png", img_frame) ret, webp = cv2.imencode(
".webp", img_frame, [int(cv2.IMWRITE_WEBP_QUALITY), 80]
)
if ret: if ret:
with open( with open(
os.path.join( os.path.join(
CLIPS_DIR, CLIPS_DIR,
f"{self.camera_config.name}-{event_id}-clean.png", f"{self.camera_config.name}-{event_id}-clean.webp",
), ),
"wb", "wb",
) as p: ) as p:
p.write(png.tobytes()) p.write(webp.tobytes())
# write jpg snapshot with optional annotations # write jpg snapshot with optional annotations
if draw.get("boxes") and isinstance(draw.get("boxes"), list): if draw.get("boxes") and isinstance(draw.get("boxes"), list):

View File

@ -1,8 +1,9 @@
"""Facilitates communication between processes.""" """Facilitates communication between processes."""
import multiprocessing as mp import multiprocessing as mp
from _pickle import UnpicklingError
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from typing import Any, Optional from typing import Any
import zmq import zmq
@ -32,7 +33,7 @@ class ConfigPublisher:
class ConfigSubscriber: class ConfigSubscriber:
"""Simplifies receiving an updated config.""" """Simplifies receiving an updated config."""
def __init__(self, topic: str, exact=False) -> None: def __init__(self, topic: str, exact: bool = False) -> None:
self.topic = topic self.topic = topic
self.exact = exact self.exact = exact
self.context = zmq.Context() self.context = zmq.Context()
@ -40,7 +41,7 @@ class ConfigSubscriber:
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic) self.socket.setsockopt_string(zmq.SUBSCRIBE, topic)
self.socket.connect(SOCKET_PUB_SUB) self.socket.connect(SOCKET_PUB_SUB)
def check_for_update(self) -> Optional[tuple[str, Any]]: def check_for_update(self) -> tuple[str, Any] | tuple[None, None]:
"""Returns updated config or None if no update.""" """Returns updated config or None if no update."""
try: try:
topic = self.socket.recv_string(flags=zmq.NOBLOCK) topic = self.socket.recv_string(flags=zmq.NOBLOCK)
@ -50,7 +51,7 @@ class ConfigSubscriber:
return (topic, obj) return (topic, obj)
else: else:
return (None, None) return (None, None)
except zmq.ZMQError: except (zmq.ZMQError, UnicodeDecodeError, UnpicklingError):
return (None, None) return (None, None)
def stop(self) -> None: def stop(self) -> None:

View File

@ -1,7 +1,7 @@
"""Facilitates communication between processes.""" """Facilitates communication between processes."""
from enum import Enum from enum import Enum
from typing import Any, Optional from typing import Any
from .zmq_proxy import Publisher, Subscriber from .zmq_proxy import Publisher, Subscriber
@ -19,8 +19,7 @@ class DetectionPublisher(Publisher):
topic_base = "detection/" topic_base = "detection/"
def __init__(self, topic: DetectionTypeEnum) -> None: def __init__(self, topic: str) -> None:
topic = topic.value
super().__init__(topic) super().__init__(topic)
@ -29,16 +28,15 @@ class DetectionSubscriber(Subscriber):
topic_base = "detection/" topic_base = "detection/"
def __init__(self, topic: DetectionTypeEnum) -> None: def __init__(self, topic: str) -> None:
topic = topic.value
super().__init__(topic) super().__init__(topic)
def check_for_update( def check_for_update(
self, timeout: float = None self, timeout: float | None = None
) -> Optional[tuple[DetectionTypeEnum, Any]]: ) -> tuple[str, Any] | tuple[None, None] | None:
return super().check_for_update(timeout) return super().check_for_update(timeout)
def _return_object(self, topic: str, payload: Any) -> Any: def _return_object(self, topic: str, payload: Any) -> Any:
if payload is None: if payload is None:
return (None, None) return (None, None)
return (DetectionTypeEnum[topic[len(self.topic_base) :]], payload) return (topic[len(self.topic_base) :], payload)

View File

@ -3,24 +3,32 @@
import datetime import datetime
import json import json
import logging import logging
from typing import Any, Callable, Optional from typing import Any, Callable, Optional, cast
from frigate.camera import PTZMetrics from frigate.camera import PTZMetrics
from frigate.camera.activity_manager import CameraActivityManager from frigate.camera.activity_manager import AudioActivityManager, CameraActivityManager
from frigate.comms.base_communicator import Communicator from frigate.comms.base_communicator import Communicator
from frigate.comms.config_updater import ConfigPublisher
from frigate.comms.webpush import WebPushClient from frigate.comms.webpush import WebPushClient
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdatePublisher,
CameraConfigUpdateTopic,
)
from frigate.const import ( from frigate.const import (
CLEAR_ONGOING_REVIEW_SEGMENTS, CLEAR_ONGOING_REVIEW_SEGMENTS,
EXPIRE_AUDIO_ACTIVITY,
INSERT_MANY_RECORDINGS, INSERT_MANY_RECORDINGS,
INSERT_PREVIEW, INSERT_PREVIEW,
NOTIFICATION_TEST, NOTIFICATION_TEST,
REQUEST_REGION_GRID, REQUEST_REGION_GRID,
UPDATE_AUDIO_ACTIVITY,
UPDATE_BIRDSEYE_LAYOUT,
UPDATE_CAMERA_ACTIVITY, UPDATE_CAMERA_ACTIVITY,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
UPDATE_EVENT_DESCRIPTION, UPDATE_EVENT_DESCRIPTION,
UPDATE_MODEL_STATE, UPDATE_MODEL_STATE,
UPDATE_REVIEW_DESCRIPTION,
UPSERT_REVIEW_SEGMENT, UPSERT_REVIEW_SEGMENT,
) )
from frigate.models import Event, Previews, Recordings, ReviewSegment from frigate.models import Event, Previews, Recordings, ReviewSegment
@ -38,7 +46,7 @@ class Dispatcher:
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
config_updater: ConfigPublisher, config_updater: CameraConfigUpdatePublisher,
onvif: OnvifController, onvif: OnvifController,
ptz_metrics: dict[str, PTZMetrics], ptz_metrics: dict[str, PTZMetrics],
communicators: list[Communicator], communicators: list[Communicator],
@ -49,11 +57,13 @@ class Dispatcher:
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
self.comms = communicators self.comms = communicators
self.camera_activity = CameraActivityManager(config, self.publish) self.camera_activity = CameraActivityManager(config, self.publish)
self.model_state = {} self.audio_activity = AudioActivityManager(config, self.publish)
self.embeddings_reindex = {} self.model_state: dict[str, ModelStatusTypesEnum] = {}
self.embeddings_reindex: dict[str, Any] = {}
self.birdseye_layout: dict[str, Any] = {}
self._camera_settings_handlers: dict[str, Callable] = { self._camera_settings_handlers: dict[str, Callable] = {
"audio": self._on_audio_command, "audio": self._on_audio_command,
"audio_transcription": self._on_audio_transcription_command,
"detect": self._on_detect_command, "detect": self._on_detect_command,
"enabled": self._on_enabled_command, "enabled": self._on_enabled_command,
"improve_contrast": self._on_motion_improve_contrast_command, "improve_contrast": self._on_motion_improve_contrast_command,
@ -68,6 +78,8 @@ class Dispatcher:
"birdseye_mode": self._on_birdseye_mode_command, "birdseye_mode": self._on_birdseye_mode_command,
"review_alerts": self._on_alerts_command, "review_alerts": self._on_alerts_command,
"review_detections": self._on_detections_command, "review_detections": self._on_detections_command,
"object_descriptions": self._on_object_description_command,
"review_descriptions": self._on_review_description_command,
} }
self._global_settings_handlers: dict[str, Callable] = { self._global_settings_handlers: dict[str, Callable] = {
"notifications": self._on_global_notification_command, "notifications": self._on_global_notification_command,
@ -80,10 +92,12 @@ class Dispatcher:
(comm for comm in communicators if isinstance(comm, WebPushClient)), None (comm for comm in communicators if isinstance(comm, WebPushClient)), None
) )
def _receive(self, topic: str, payload: str) -> Optional[Any]: def _receive(self, topic: str, payload: Any) -> Optional[Any]:
"""Handle receiving of payload from communicators.""" """Handle receiving of payload from communicators."""
def handle_camera_command(command_type, camera_name, command, payload): def handle_camera_command(
command_type: str, camera_name: str, command: str, payload: str
) -> None:
try: try:
if command_type == "set": if command_type == "set":
self._camera_settings_handlers[command](camera_name, payload) self._camera_settings_handlers[command](camera_name, payload)
@ -92,13 +106,13 @@ class Dispatcher:
except KeyError: except KeyError:
logger.error(f"Invalid command type or handler: {command_type}") logger.error(f"Invalid command type or handler: {command_type}")
def handle_restart(): def handle_restart() -> None:
restart_frigate() restart_frigate()
def handle_insert_many_recordings(): def handle_insert_many_recordings() -> None:
Recordings.insert_many(payload).execute() Recordings.insert_many(payload).execute()
def handle_request_region_grid(): def handle_request_region_grid() -> Any:
camera = payload camera = payload
grid = get_camera_regions_grid( grid = get_camera_regions_grid(
camera, camera,
@ -107,26 +121,32 @@ class Dispatcher:
) )
return grid return grid
def handle_insert_preview(): def handle_insert_preview() -> None:
Previews.insert(payload).execute() Previews.insert(payload).execute()
def handle_upsert_review_segment(): def handle_upsert_review_segment() -> None:
ReviewSegment.insert(payload).on_conflict( ReviewSegment.insert(payload).on_conflict(
conflict_target=[ReviewSegment.id], conflict_target=[ReviewSegment.id],
update=payload, update=payload,
).execute() ).execute()
def handle_clear_ongoing_review_segments(): def handle_clear_ongoing_review_segments() -> None:
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where( ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
ReviewSegment.end_time.is_null(True) ReviewSegment.end_time.is_null(True)
).execute() ).execute()
def handle_update_camera_activity(): def handle_update_camera_activity() -> None:
self.camera_activity.update_activity(payload) self.camera_activity.update_activity(payload)
def handle_update_event_description(): def handle_update_audio_activity() -> None:
self.audio_activity.update_activity(payload)
def handle_expire_audio_activity() -> None:
self.audio_activity.expire_all(payload)
def handle_update_event_description() -> None:
event: Event = Event.get(Event.id == payload["id"]) event: Event = Event.get(Event.id == payload["id"])
event.data["description"] = payload["description"] cast(dict, event.data)["description"] = payload["description"]
event.save() event.save()
self.publish( self.publish(
"tracked_object_update", "tracked_object_update",
@ -140,31 +160,48 @@ class Dispatcher:
), ),
) )
def handle_update_model_state(): def handle_update_review_description() -> None:
final_data = payload["after"]
ReviewSegment.insert(final_data).on_conflict(
conflict_target=[ReviewSegment.id],
update=final_data,
).execute()
self.publish("reviews", json.dumps(payload))
def handle_update_model_state() -> None:
if payload: if payload:
model = payload["model"] model = payload["model"]
state = payload["state"] state = payload["state"]
self.model_state[model] = ModelStatusTypesEnum[state] self.model_state[model] = ModelStatusTypesEnum[state]
self.publish("model_state", json.dumps(self.model_state)) self.publish("model_state", json.dumps(self.model_state))
def handle_model_state(): def handle_model_state() -> None:
self.publish("model_state", json.dumps(self.model_state.copy())) self.publish("model_state", json.dumps(self.model_state.copy()))
def handle_update_embeddings_reindex_progress(): def handle_update_embeddings_reindex_progress() -> None:
self.embeddings_reindex = payload self.embeddings_reindex = payload
self.publish( self.publish(
"embeddings_reindex_progress", "embeddings_reindex_progress",
json.dumps(payload), json.dumps(payload),
) )
def handle_embeddings_reindex_progress(): def handle_embeddings_reindex_progress() -> None:
self.publish( self.publish(
"embeddings_reindex_progress", "embeddings_reindex_progress",
json.dumps(self.embeddings_reindex.copy()), json.dumps(self.embeddings_reindex.copy()),
) )
def handle_on_connect(): def handle_update_birdseye_layout() -> None:
if payload:
self.birdseye_layout = payload
self.publish("birdseye_layout", json.dumps(self.birdseye_layout))
def handle_birdseye_layout() -> None:
self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy()))
def handle_on_connect() -> None:
camera_status = self.camera_activity.last_camera_activity.copy() camera_status = self.camera_activity.last_camera_activity.copy()
audio_detections = self.audio_activity.current_audio_detections.copy()
cameras_with_status = camera_status.keys() cameras_with_status = camera_status.keys()
for camera in self.config.cameras.keys(): for camera in self.config.cameras.keys():
@ -177,6 +214,9 @@ class Dispatcher:
"snapshots": self.config.cameras[camera].snapshots.enabled, "snapshots": self.config.cameras[camera].snapshots.enabled,
"record": self.config.cameras[camera].record.enabled, "record": self.config.cameras[camera].record.enabled,
"audio": self.config.cameras[camera].audio.enabled, "audio": self.config.cameras[camera].audio.enabled,
"audio_transcription": self.config.cameras[
camera
].audio_transcription.live_enabled,
"notifications": self.config.cameras[camera].notifications.enabled, "notifications": self.config.cameras[camera].notifications.enabled,
"notifications_suspended": int( "notifications_suspended": int(
self.web_push_client.suspended_cameras.get(camera, 0) self.web_push_client.suspended_cameras.get(camera, 0)
@ -189,6 +229,12 @@ class Dispatcher:
].onvif.autotracking.enabled, ].onvif.autotracking.enabled,
"alerts": self.config.cameras[camera].review.alerts.enabled, "alerts": self.config.cameras[camera].review.alerts.enabled,
"detections": self.config.cameras[camera].review.detections.enabled, "detections": self.config.cameras[camera].review.detections.enabled,
"object_descriptions": self.config.cameras[
camera
].objects.genai.enabled,
"review_descriptions": self.config.cameras[
camera
].review.genai.enabled,
} }
self.publish("camera_activity", json.dumps(camera_status)) self.publish("camera_activity", json.dumps(camera_status))
@ -197,8 +243,10 @@ class Dispatcher:
"embeddings_reindex_progress", "embeddings_reindex_progress",
json.dumps(self.embeddings_reindex.copy()), json.dumps(self.embeddings_reindex.copy()),
) )
self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy()))
self.publish("audio_detections", json.dumps(audio_detections))
def handle_notification_test(): def handle_notification_test() -> None:
self.publish("notification_test", "Test notification") self.publish("notification_test", "Test notification")
# Dictionary mapping topic to handlers # Dictionary mapping topic to handlers
@ -209,13 +257,18 @@ class Dispatcher:
UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment, UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment,
CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments, CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity, UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
UPDATE_AUDIO_ACTIVITY: handle_update_audio_activity,
EXPIRE_AUDIO_ACTIVITY: handle_expire_audio_activity,
UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
UPDATE_MODEL_STATE: handle_update_model_state, UPDATE_MODEL_STATE: handle_update_model_state,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
NOTIFICATION_TEST: handle_notification_test, NOTIFICATION_TEST: handle_notification_test,
"restart": handle_restart, "restart": handle_restart,
"embeddingsReindexProgress": handle_embeddings_reindex_progress, "embeddingsReindexProgress": handle_embeddings_reindex_progress,
"modelState": handle_model_state, "modelState": handle_model_state,
"birdseyeLayout": handle_birdseye_layout,
"onConnect": handle_on_connect, "onConnect": handle_on_connect,
} }
@ -243,11 +296,12 @@ class Dispatcher:
logger.error( logger.error(
f"Received invalid {topic.split('/')[-1]} command: {topic}" f"Received invalid {topic.split('/')[-1]} command: {topic}"
) )
return return None
elif topic in topic_handlers: elif topic in topic_handlers:
return topic_handlers[topic]() return topic_handlers[topic]()
else: else:
self.publish(topic, payload, retain=False) self.publish(topic, payload, retain=False)
return None
def publish(self, topic: str, payload: Any, retain: bool = False) -> None: def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""Handle publishing to communicators.""" """Handle publishing to communicators."""
@ -273,8 +327,11 @@ class Dispatcher:
f"Turning on motion for {camera_name} due to detection being enabled." f"Turning on motion for {camera_name} due to detection being enabled."
) )
motion_settings.enabled = True motion_settings.enabled = True
self.config_updater.publish( self.config_updater.publish_update(
f"config/motion/{camera_name}", motion_settings CameraConfigUpdateTopic(
CameraConfigUpdateEnum.motion, camera_name
),
motion_settings,
) )
self.publish(f"{camera_name}/motion/state", payload, retain=True) self.publish(f"{camera_name}/motion/state", payload, retain=True)
elif payload == "OFF": elif payload == "OFF":
@ -282,7 +339,10 @@ class Dispatcher:
logger.info(f"Turning off detection for {camera_name}") logger.info(f"Turning off detection for {camera_name}")
detect_settings.enabled = False detect_settings.enabled = False
self.config_updater.publish(f"config/detect/{camera_name}", detect_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.detect, camera_name),
detect_settings,
)
self.publish(f"{camera_name}/detect/state", payload, retain=True) self.publish(f"{camera_name}/detect/state", payload, retain=True)
def _on_enabled_command(self, camera_name: str, payload: str) -> None: def _on_enabled_command(self, camera_name: str, payload: str) -> None:
@ -303,7 +363,10 @@ class Dispatcher:
logger.info(f"Turning off camera {camera_name}") logger.info(f"Turning off camera {camera_name}")
camera_settings.enabled = False camera_settings.enabled = False
self.config_updater.publish(f"config/enabled/{camera_name}", camera_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.enabled, camera_name),
camera_settings.enabled,
)
self.publish(f"{camera_name}/enabled/state", payload, retain=True) self.publish(f"{camera_name}/enabled/state", payload, retain=True)
def _on_motion_command(self, camera_name: str, payload: str) -> None: def _on_motion_command(self, camera_name: str, payload: str) -> None:
@ -326,7 +389,10 @@ class Dispatcher:
logger.info(f"Turning off motion for {camera_name}") logger.info(f"Turning off motion for {camera_name}")
motion_settings.enabled = False motion_settings.enabled = False
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
motion_settings,
)
self.publish(f"{camera_name}/motion/state", payload, retain=True) self.publish(f"{camera_name}/motion/state", payload, retain=True)
def _on_motion_improve_contrast_command( def _on_motion_improve_contrast_command(
@ -338,13 +404,16 @@ class Dispatcher:
if payload == "ON": if payload == "ON":
if not motion_settings.improve_contrast: if not motion_settings.improve_contrast:
logger.info(f"Turning on improve contrast for {camera_name}") logger.info(f"Turning on improve contrast for {camera_name}")
motion_settings.improve_contrast = True # type: ignore[union-attr] motion_settings.improve_contrast = True
elif payload == "OFF": elif payload == "OFF":
if motion_settings.improve_contrast: if motion_settings.improve_contrast:
logger.info(f"Turning off improve contrast for {camera_name}") logger.info(f"Turning off improve contrast for {camera_name}")
motion_settings.improve_contrast = False # type: ignore[union-attr] motion_settings.improve_contrast = False
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
motion_settings,
)
self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True) self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None: def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None:
@ -383,8 +452,11 @@ class Dispatcher:
motion_settings = self.config.cameras[camera_name].motion motion_settings = self.config.cameras[camera_name].motion
logger.info(f"Setting motion contour area for {camera_name}: {payload}") logger.info(f"Setting motion contour area for {camera_name}: {payload}")
motion_settings.contour_area = payload # type: ignore[union-attr] motion_settings.contour_area = payload
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
motion_settings,
)
self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True) self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True)
def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None: def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None:
@ -397,8 +469,11 @@ class Dispatcher:
motion_settings = self.config.cameras[camera_name].motion motion_settings = self.config.cameras[camera_name].motion
logger.info(f"Setting motion threshold for {camera_name}: {payload}") logger.info(f"Setting motion threshold for {camera_name}: {payload}")
motion_settings.threshold = payload # type: ignore[union-attr] motion_settings.threshold = payload
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
motion_settings,
)
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True) self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
def _on_global_notification_command(self, payload: str) -> None: def _on_global_notification_command(self, payload: str) -> None:
@ -409,9 +484,9 @@ class Dispatcher:
notification_settings = self.config.notifications notification_settings = self.config.notifications
logger.info(f"Setting all notifications: {payload}") logger.info(f"Setting all notifications: {payload}")
notification_settings.enabled = payload == "ON" # type: ignore[union-attr] notification_settings.enabled = payload == "ON"
self.config_updater.publish( self.config_updater.publisher.publish(
"config/notifications", {"_global_notifications": notification_settings} "config/notifications", notification_settings
) )
self.publish("notifications/state", payload, retain=True) self.publish("notifications/state", payload, retain=True)
@ -434,9 +509,43 @@ class Dispatcher:
logger.info(f"Turning off audio detection for {camera_name}") logger.info(f"Turning off audio detection for {camera_name}")
audio_settings.enabled = False audio_settings.enabled = False
self.config_updater.publish(f"config/audio/{camera_name}", audio_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.audio, camera_name),
audio_settings,
)
self.publish(f"{camera_name}/audio/state", payload, retain=True) self.publish(f"{camera_name}/audio/state", payload, retain=True)
def _on_audio_transcription_command(self, camera_name: str, payload: str) -> None:
"""Callback for live audio transcription topic."""
audio_transcription_settings = self.config.cameras[
camera_name
].audio_transcription
if payload == "ON":
if not self.config.cameras[
camera_name
].audio_transcription.enabled_in_config:
logger.error(
"Audio transcription must be enabled in the config to be turned on via MQTT."
)
return
if not audio_transcription_settings.live_enabled:
logger.info(f"Turning on live audio transcription for {camera_name}")
audio_transcription_settings.live_enabled = True
elif payload == "OFF":
if audio_transcription_settings.live_enabled:
logger.info(f"Turning off live audio transcription for {camera_name}")
audio_transcription_settings.live_enabled = False
self.config_updater.publish_update(
CameraConfigUpdateTopic(
CameraConfigUpdateEnum.audio_transcription, camera_name
),
audio_transcription_settings,
)
self.publish(f"{camera_name}/audio_transcription/state", payload, retain=True)
def _on_recordings_command(self, camera_name: str, payload: str) -> None: def _on_recordings_command(self, camera_name: str, payload: str) -> None:
"""Callback for recordings topic.""" """Callback for recordings topic."""
record_settings = self.config.cameras[camera_name].record record_settings = self.config.cameras[camera_name].record
@ -456,7 +565,10 @@ class Dispatcher:
logger.info(f"Turning off recordings for {camera_name}") logger.info(f"Turning off recordings for {camera_name}")
record_settings.enabled = False record_settings.enabled = False
self.config_updater.publish(f"config/record/{camera_name}", record_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.record, camera_name),
record_settings,
)
self.publish(f"{camera_name}/recordings/state", payload, retain=True) self.publish(f"{camera_name}/recordings/state", payload, retain=True)
def _on_snapshots_command(self, camera_name: str, payload: str) -> None: def _on_snapshots_command(self, camera_name: str, payload: str) -> None:
@ -472,6 +584,10 @@ class Dispatcher:
logger.info(f"Turning off snapshots for {camera_name}") logger.info(f"Turning off snapshots for {camera_name}")
snapshots_settings.enabled = False snapshots_settings.enabled = False
self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.snapshots, camera_name),
snapshots_settings,
)
self.publish(f"{camera_name}/snapshots/state", payload, retain=True) self.publish(f"{camera_name}/snapshots/state", payload, retain=True)
def _on_ptz_command(self, camera_name: str, payload: str) -> None: def _on_ptz_command(self, camera_name: str, payload: str) -> None:
@ -506,7 +622,10 @@ class Dispatcher:
logger.info(f"Turning off birdseye for {camera_name}") logger.info(f"Turning off birdseye for {camera_name}")
birdseye_settings.enabled = False birdseye_settings.enabled = False
self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.birdseye, camera_name),
birdseye_settings,
)
self.publish(f"{camera_name}/birdseye/state", payload, retain=True) self.publish(f"{camera_name}/birdseye/state", payload, retain=True)
def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None: def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None:
@ -527,7 +646,10 @@ class Dispatcher:
f"Setting birdseye mode for {camera_name} to {birdseye_settings.mode}" f"Setting birdseye mode for {camera_name} to {birdseye_settings.mode}"
) )
self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.birdseye, camera_name),
birdseye_settings,
)
self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True) self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True)
def _on_camera_notification_command(self, camera_name: str, payload: str) -> None: def _on_camera_notification_command(self, camera_name: str, payload: str) -> None:
@ -559,8 +681,9 @@ class Dispatcher:
): ):
self.web_push_client.suspended_cameras[camera_name] = 0 self.web_push_client.suspended_cameras[camera_name] = 0
self.config_updater.publish( self.config_updater.publish_update(
"config/notifications", {camera_name: notification_settings} CameraConfigUpdateTopic(CameraConfigUpdateEnum.notifications, camera_name),
notification_settings,
) )
self.publish(f"{camera_name}/notifications/state", payload, retain=True) self.publish(f"{camera_name}/notifications/state", payload, retain=True)
self.publish(f"{camera_name}/notifications/suspended", "0", retain=True) self.publish(f"{camera_name}/notifications/suspended", "0", retain=True)
@ -617,7 +740,10 @@ class Dispatcher:
logger.info(f"Turning off alerts for {camera_name}") logger.info(f"Turning off alerts for {camera_name}")
review_settings.alerts.enabled = False review_settings.alerts.enabled = False
self.config_updater.publish(f"config/review/{camera_name}", review_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.review, camera_name),
review_settings,
)
self.publish(f"{camera_name}/review_alerts/state", payload, retain=True) self.publish(f"{camera_name}/review_alerts/state", payload, retain=True)
def _on_detections_command(self, camera_name: str, payload: str) -> None: def _on_detections_command(self, camera_name: str, payload: str) -> None:
@ -639,5 +765,58 @@ class Dispatcher:
logger.info(f"Turning off detections for {camera_name}") logger.info(f"Turning off detections for {camera_name}")
review_settings.detections.enabled = False review_settings.detections.enabled = False
self.config_updater.publish(f"config/review/{camera_name}", review_settings) self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.review, camera_name),
review_settings,
)
self.publish(f"{camera_name}/review_detections/state", payload, retain=True) self.publish(f"{camera_name}/review_detections/state", payload, retain=True)
def _on_object_description_command(self, camera_name: str, payload: str) -> None:
"""Callback for object description topic."""
genai_settings = self.config.cameras[camera_name].objects.genai
if payload == "ON":
if not self.config.cameras[camera_name].objects.genai.enabled_in_config:
logger.error(
"GenAI must be enabled in the config to be turned on via MQTT."
)
return
if not genai_settings.enabled:
logger.info(f"Turning on object descriptions for {camera_name}")
genai_settings.enabled = True
elif payload == "OFF":
if genai_settings.enabled:
logger.info(f"Turning off object descriptions for {camera_name}")
genai_settings.enabled = False
self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.object_genai, camera_name),
genai_settings,
)
self.publish(f"{camera_name}/object_descriptions/state", payload, retain=True)
def _on_review_description_command(self, camera_name: str, payload: str) -> None:
"""Callback for review description topic."""
genai_settings = self.config.cameras[camera_name].review.genai
if payload == "ON":
if not self.config.cameras[camera_name].review.genai.enabled_in_config:
logger.error(
"GenAI Alerts or Detections must be enabled in the config to be turned on via MQTT."
)
return
if not genai_settings.enabled:
logger.info(f"Turning on review descriptions for {camera_name}")
genai_settings.enabled = True
elif payload == "OFF":
if genai_settings.enabled:
logger.info(f"Turning off review descriptions for {camera_name}")
genai_settings.enabled = False
self.config_updater.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.review_genai, camera_name),
genai_settings,
)
self.publish(f"{camera_name}/review_descriptions/state", payload, retain=True)

View File

@ -1,23 +1,36 @@
"""Facilitates communication between processes.""" """Facilitates communication between processes."""
import logging
from enum import Enum from enum import Enum
from typing import Any, Callable from typing import Any, Callable
import zmq import zmq
logger = logging.getLogger(__name__)
SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings" SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
class EmbeddingsRequestEnum(Enum): class EmbeddingsRequestEnum(Enum):
# audio
transcribe_audio = "transcribe_audio"
# custom classification
reload_classification_model = "reload_classification_model"
# face
clear_face_classifier = "clear_face_classifier" clear_face_classifier = "clear_face_classifier"
embed_description = "embed_description"
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
recognize_face = "recognize_face" recognize_face = "recognize_face"
register_face = "register_face" register_face = "register_face"
reprocess_face = "reprocess_face" reprocess_face = "reprocess_face"
reprocess_plate = "reprocess_plate" # semantic search
embed_description = "embed_description"
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
reindex = "reindex" reindex = "reindex"
# LPR
reprocess_plate = "reprocess_plate"
# Review Descriptions
summarize_review = "summarize_review"
class EmbeddingsResponder: class EmbeddingsResponder:
@ -34,9 +47,16 @@ class EmbeddingsResponder:
break break
try: try:
(topic, value) = self.socket.recv_json(flags=zmq.NOBLOCK) raw = self.socket.recv_json(flags=zmq.NOBLOCK)
response = process(topic, value) if isinstance(raw, list):
(topic, value) = raw
response = process(topic, value)
else:
logging.warning(
f"Received unexpected data type in ZMQ recv_json: {type(raw)}"
)
response = None
if response is not None: if response is not None:
self.socket.send_json(response) self.socket.send_json(response)
@ -58,7 +78,7 @@ class EmbeddingsRequestor:
self.socket = self.context.socket(zmq.REQ) self.socket = self.context.socket(zmq.REQ)
self.socket.connect(SOCKET_REP_REQ) self.socket.connect(SOCKET_REP_REQ)
def send_data(self, topic: str, data: Any) -> str: def send_data(self, topic: str, data: Any) -> Any:
"""Sends data and then waits for reply.""" """Sends data and then waits for reply."""
try: try:
self.socket.send_json((topic, data)) self.socket.send_json((topic, data))

View File

@ -15,7 +15,7 @@ class EventMetadataTypeEnum(str, Enum):
manual_event_end = "manual_event_end" manual_event_end = "manual_event_end"
regenerate_description = "regenerate_description" regenerate_description = "regenerate_description"
sub_label = "sub_label" sub_label = "sub_label"
recognized_license_plate = "recognized_license_plate" attribute = "attribute"
lpr_event_create = "lpr_event_create" lpr_event_create = "lpr_event_create"
save_lpr_snapshot = "save_lpr_snapshot" save_lpr_snapshot = "save_lpr_snapshot"
@ -28,8 +28,8 @@ class EventMetadataPublisher(Publisher):
def __init__(self) -> None: def __init__(self) -> None:
super().__init__() super().__init__()
def publish(self, topic: EventMetadataTypeEnum, payload: Any) -> None: def publish(self, payload: Any, sub_topic: str = "") -> None:
super().publish(payload, topic.value) super().publish(payload, sub_topic)
class EventMetadataSubscriber(Subscriber): class EventMetadataSubscriber(Subscriber):
@ -40,9 +40,10 @@ class EventMetadataSubscriber(Subscriber):
def __init__(self, topic: EventMetadataTypeEnum) -> None: def __init__(self, topic: EventMetadataTypeEnum) -> None:
super().__init__(topic.value) super().__init__(topic.value)
def _return_object(self, topic: str, payload: tuple) -> tuple: def _return_object(
self, topic: str, payload: tuple | None
) -> tuple[str, Any] | tuple[None, None]:
if payload is None: if payload is None:
return (None, None) return (None, None)
topic = EventMetadataTypeEnum[topic[len(self.topic_base) :]]
return (topic, payload) return (topic, payload)

View File

@ -7,7 +7,9 @@ from frigate.events.types import EventStateEnum, EventTypeEnum
from .zmq_proxy import Publisher, Subscriber from .zmq_proxy import Publisher, Subscriber
class EventUpdatePublisher(Publisher): class EventUpdatePublisher(
Publisher[tuple[EventTypeEnum, EventStateEnum, str | None, str, dict[str, Any]]]
):
"""Publishes events (objects, audio, manual).""" """Publishes events (objects, audio, manual)."""
topic_base = "event/" topic_base = "event/"
@ -16,9 +18,11 @@ class EventUpdatePublisher(Publisher):
super().__init__("update") super().__init__("update")
def publish( def publish(
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, Any]] self,
payload: tuple[EventTypeEnum, EventStateEnum, str | None, str, dict[str, Any]],
sub_topic: str = "",
) -> None: ) -> None:
super().publish(payload) super().publish(payload, sub_topic)
class EventUpdateSubscriber(Subscriber): class EventUpdateSubscriber(Subscriber):
@ -30,7 +34,9 @@ class EventUpdateSubscriber(Subscriber):
super().__init__("update") super().__init__("update")
class EventEndPublisher(Publisher): class EventEndPublisher(
Publisher[tuple[EventTypeEnum, EventStateEnum, str, dict[str, Any]]]
):
"""Publishes events that have ended.""" """Publishes events that have ended."""
topic_base = "event/" topic_base = "event/"
@ -39,9 +45,11 @@ class EventEndPublisher(Publisher):
super().__init__("finalized") super().__init__("finalized")
def publish( def publish(
self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, Any]] self,
payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, Any]],
sub_topic: str = "",
) -> None: ) -> None:
super().publish(payload) super().publish(payload, sub_topic)
class EventEndSubscriber(Subscriber): class EventEndSubscriber(Subscriber):

View File

@ -1,5 +1,6 @@
"""Facilitates communication between processes.""" """Facilitates communication between processes."""
import logging
import multiprocessing as mp import multiprocessing as mp
import threading import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
@ -9,6 +10,8 @@ import zmq
from frigate.comms.base_communicator import Communicator from frigate.comms.base_communicator import Communicator
logger = logging.getLogger(__name__)
SOCKET_REP_REQ = "ipc:///tmp/cache/comms" SOCKET_REP_REQ = "ipc:///tmp/cache/comms"
@ -19,7 +22,7 @@ class InterProcessCommunicator(Communicator):
self.socket.bind(SOCKET_REP_REQ) self.socket.bind(SOCKET_REP_REQ)
self.stop_event: MpEvent = mp.Event() self.stop_event: MpEvent = mp.Event()
def publish(self, topic: str, payload: str, retain: bool) -> None: def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""There is no communication back to the processes.""" """There is no communication back to the processes."""
pass pass
@ -37,9 +40,16 @@ class InterProcessCommunicator(Communicator):
break break
try: try:
(topic, value) = self.socket.recv_json(flags=zmq.NOBLOCK) raw = self.socket.recv_json(flags=zmq.NOBLOCK)
response = self._dispatcher(topic, value) if isinstance(raw, list):
(topic, value) = raw
response = self._dispatcher(topic, value)
else:
logging.warning(
f"Received unexpected data type in ZMQ recv_json: {type(raw)}"
)
response = None
if response is not None: if response is not None:
self.socket.send_json(response) self.socket.send_json(response)

View File

@ -11,7 +11,7 @@ from frigate.config import FrigateConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class MqttClient(Communicator): # type: ignore[misc] class MqttClient(Communicator):
"""Frigate wrapper for mqtt client.""" """Frigate wrapper for mqtt client."""
def __init__(self, config: FrigateConfig) -> None: def __init__(self, config: FrigateConfig) -> None:
@ -75,7 +75,7 @@ class MqttClient(Communicator): # type: ignore[misc]
) )
self.publish( self.publish(
f"{camera_name}/improve_contrast/state", f"{camera_name}/improve_contrast/state",
"ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr] "ON" if camera.motion.improve_contrast else "OFF",
retain=True, retain=True,
) )
self.publish( self.publish(
@ -85,12 +85,12 @@ class MqttClient(Communicator): # type: ignore[misc]
) )
self.publish( self.publish(
f"{camera_name}/motion_threshold/state", f"{camera_name}/motion_threshold/state",
camera.motion.threshold, # type: ignore[union-attr] camera.motion.threshold,
retain=True, retain=True,
) )
self.publish( self.publish(
f"{camera_name}/motion_contour_area/state", f"{camera_name}/motion_contour_area/state",
camera.motion.contour_area, # type: ignore[union-attr] camera.motion.contour_area,
retain=True, retain=True,
) )
self.publish( self.publish(
@ -122,6 +122,16 @@ class MqttClient(Communicator): # type: ignore[misc]
"ON" if camera.review.detections.enabled_in_config else "OFF", "ON" if camera.review.detections.enabled_in_config else "OFF",
retain=True, retain=True,
) )
self.publish(
f"{camera_name}/object_descriptions/state",
"ON" if camera.objects.genai.enabled_in_config else "OFF",
retain=True,
)
self.publish(
f"{camera_name}/review_descriptions/state",
"ON" if camera.review.genai.enabled_in_config else "OFF",
retain=True,
)
if self.config.notifications.enabled_in_config: if self.config.notifications.enabled_in_config:
self.publish( self.publish(
@ -145,7 +155,7 @@ class MqttClient(Communicator): # type: ignore[misc]
client: mqtt.Client, client: mqtt.Client,
userdata: Any, userdata: Any,
flags: Any, flags: Any,
reason_code: mqtt.ReasonCode, reason_code: mqtt.ReasonCode, # type: ignore[name-defined]
properties: Any, properties: Any,
) -> None: ) -> None:
"""Mqtt connection callback.""" """Mqtt connection callback."""
@ -177,7 +187,7 @@ class MqttClient(Communicator): # type: ignore[misc]
client: mqtt.Client, client: mqtt.Client,
userdata: Any, userdata: Any,
flags: Any, flags: Any,
reason_code: mqtt.ReasonCode, reason_code: mqtt.ReasonCode, # type: ignore[name-defined]
properties: Any, properties: Any,
) -> None: ) -> None:
"""Mqtt disconnection callback.""" """Mqtt disconnection callback."""
@ -215,6 +225,7 @@ class MqttClient(Communicator): # type: ignore[misc]
"birdseye_mode", "birdseye_mode",
"review_alerts", "review_alerts",
"review_detections", "review_detections",
"genai",
] ]
for name in self.config.cameras.keys(): for name in self.config.cameras.keys():

Some files were not shown because too many files have changed in this diff Show More