Merge branch 'dev' into recordings-summary

This commit is contained in:
Josh Hawkins 2025-02-09 16:31:59 -06:00
commit 78cbe0783e
189 changed files with 6898 additions and 1372 deletions

View File

@ -2,6 +2,7 @@ aarch
absdiff
airockchip
Alloc
alpr
Amcrest
amdgpu
analyzeduration
@ -61,6 +62,7 @@ dsize
dtype
ECONNRESET
edgetpu
facenet
fastapi
faststart
fflags
@ -114,6 +116,8 @@ itemsize
Jellyfin
jetson
jetsons
jina
jinaai
joserfc
jsmpeg
jsonify
@ -187,6 +191,7 @@ openai
opencv
openvino
OWASP
paddleocr
paho
passwordless
popleft
@ -308,4 +313,4 @@ yolo
yolonas
yolox
zeep
zerolatency
zerolatency

View File

@ -33,9 +33,9 @@ runs:
with:
string: ${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
with:

View File

@ -19,7 +19,7 @@ env:
jobs:
amd64_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: AMD64 Build
steps:
- name: Check out code
@ -42,7 +42,7 @@ jobs:
tags: ${{ steps.setup.outputs.image-name }}-amd64
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
arm64_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: ARM Build
steps:
- name: Check out code
@ -66,8 +66,9 @@ jobs:
${{ steps.setup.outputs.image-name }}-standard-arm64
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
- name: Build and push RPi build
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: rpi
files: docker/rpi/rpi.hcl
@ -76,7 +77,8 @@ jobs:
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
jetson_jp4_build:
runs-on: ubuntu-latest
if: false
runs-on: ubuntu-22.04
name: Jetson Jetpack 4
steps:
- name: Check out code
@ -94,8 +96,9 @@ jobs:
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
@ -104,7 +107,8 @@ jobs:
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
jetson_jp5_build:
runs-on: ubuntu-latest
if: false
runs-on: ubuntu-22.04
name: Jetson Jetpack 5
steps:
- name: Check out code
@ -122,8 +126,9 @@ jobs:
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
@ -132,7 +137,7 @@ jobs:
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
amd64_extra_builds:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: AMD64 Extra Build
needs:
- amd64_build
@ -149,8 +154,9 @@ jobs:
- name: Build and push TensorRT (x86 GPU)
env:
COMPUTE_LEVEL: "50 60 70 80 90"
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
@ -158,8 +164,21 @@ jobs:
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v6
with:
source: .
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha
arm64_extra_builds:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: ARM Extra Build
needs:
- arm64_build
@ -174,8 +193,9 @@ jobs:
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Rockchip build
uses: docker/bake-action@v3
uses: docker/bake-action@v6
with:
source: .
push: true
targets: rk
files: docker/rockchip/rk.hcl
@ -183,7 +203,7 @@ jobs:
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
combined_extra_builds:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: Combined Extra Builds
needs:
- amd64_build
@ -199,8 +219,9 @@ jobs:
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Hailo-8l build
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: h8l
files: docker/hailo8l/h8l.hcl
@ -208,22 +229,10 @@ jobs:
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image
assemble_default_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: Assemble and push default build
needs:
- amd64_build

View File

@ -6,7 +6,7 @@ on:
- "docs/**"
env:
DEFAULT_PYTHON: 3.9
DEFAULT_PYTHON: 3.11
jobs:
build_devcontainer:

View File

@ -1,7 +1,7 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.15.0
VERSION = 0.16.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
BOARDS= #Initialized empty

View File

@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
object_detector.cleanup()
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
######

View File

@ -5,6 +5,7 @@ ARG DEBIAN_FRONTEND=noninteractive
# Build Python wheels
FROM wheels AS h8l-wheels
RUN python3 -m pip config set global.break-system-packages true
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
@ -30,6 +31,7 @@ COPY --from=hailort /hailo-wheels /deps/hailo-wheels
COPY --from=hailort /rootfs/ /
# Install the wheels
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 install -U /deps/h8l-wheels/*.whl
RUN pip3 install -U /deps/hailo-wheels/*.whl

View File

@ -2,7 +2,7 @@
set -euxo pipefail
hailo_version="4.19.0"
hailo_version="4.20.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
@ -15,5 +15,5 @@ wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_ver
mkdir -p /hailo-wheels
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"

View File

@ -4,6 +4,7 @@
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget
hailo_version="4.20.0"
arch=$(uname -m)
if [[ $arch == "x86_64" ]]; then
@ -13,7 +14,7 @@ else
fi
# Clone the HailoRT driver repository
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
git clone --depth 1 --branch v${hailo_version} https://github.com/hailo-ai/hailort-drivers.git
# Build and install the HailoRT driver
cd hailort-drivers/linux/pcie

View File

@ -3,12 +3,12 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG BASE_IMAGE=debian:11
ARG SLIM_BASE=debian:11-slim
ARG BASE_IMAGE=debian:12
ARG SLIM_BASE=debian:12-slim
FROM ${BASE_IMAGE} AS base
FROM --platform=${BUILDPLATFORM} debian:11 AS base_host
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
FROM ${SLIM_BASE} AS slim-base
@ -66,8 +66,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" \
&& pip install -r /requirements-ov.txt
&& python3 get-pip.py "pip" --break-system-packages \
&& pip install --break-system-packages -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
@ -139,24 +139,17 @@ ARG TARGETARCH
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https \
gnupg \
wget \
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
apt-transport-https wget \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3.9 \
python3.9-dev \
python3 \
python3-dev \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
gfortran openexr libatlas-base-dev libssl-dev\
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# sqlite3 dependencies
tclsh \
@ -164,14 +157,11 @@ RUN apt-get -qq update \
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
&& python3 get-pip.py "pip" --break-system-packages
COPY docker/main/requirements.txt /requirements.txt
RUN pip3 install -r /requirements.txt
RUN pip3 install -r /requirements.txt --break-system-packages
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
@ -215,15 +205,14 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
ENV OPENCV_FFMPEG_LOGLEVEL=8
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
ENV LIBAVFORMAT_VERSION_MAJOR=60
# Install dependencies
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
python3 -m pip install --upgrade pip --break-system-packages && \
pip3 install -U /deps/wheels/*.whl --break-system-packages
COPY --from=deps-rootfs / /
@ -270,7 +259,7 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt
pip3 install -r requirements-dev.txt --break-system-packages
HEALTHCHECK NONE

View File

@ -8,10 +8,16 @@ SECURE_TOKEN_MODULE_VERSION="1.5"
SET_MISC_MODULE_VERSION="v0.33"
NGX_DEVEL_KIT_VERSION="v0.3.3"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
apt-get update
source /etc/os-release
if [[ "$VERSION_ID" == "12" ]]; then
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
else
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
fi
apt-get update
apt-get -yqq build-dep nginx
apt-get -yqq install --no-install-recommends ca-certificates wget

View File

@ -4,7 +4,7 @@ from openvino.tools import mo
ov_model = mo.convert_model(
"/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb",
compress_to_fp16=True,
transformations_config="/usr/local/lib/python3.9/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
transformations_config="/usr/local/lib/python3.11/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config",
reverse_input_channels=True,
)

View File

@ -4,8 +4,15 @@ set -euxo pipefail
SQLITE_VEC_VERSION="0.1.3"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
source /etc/os-release
if [[ "$VERSION_ID" == "12" ]]; then
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
else
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
fi
apt-get update
apt-get -yqq build-dep sqlite3 gettext git

View File

@ -11,33 +11,34 @@ apt-get -qq install --no-install-recommends -y \
lbzip2 \
procps vainfo \
unzip locales tzdata libxml2 xz-utils \
python3.9 \
python3 \
python3-pip \
curl \
lsof \
jq \
nethogs
# ensure python3 defaults to python3.9
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
nethogs \
libgl1 \
libglib2.0-0 \
libusb-1.0.0
mkdir -p -m 600 /root/.gnupg
# add coral repo
curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
gpg --dearmor -o /etc/apt/trusted.gpg.d/google-cloud-packages-archive-keyring.gpg
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list
echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
# install coral runtime
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb"
unset DEBIAN_FRONTEND
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
rm /tmp/libedgetpu1-max.deb
# enable non-free repo in Debian
if grep -q "Debian" /etc/issue; then
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
# install python3 & tflite runtime
if [[ "${TARGETARCH}" == "amd64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl
fi
# coral drivers
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libedgetpu1-max python3-tflite-runtime python3-pycoral
if [[ "${TARGETARCH}" == "arm64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl
fi
# btbn-ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then
@ -65,23 +66,15 @@ fi
# arch specific packages
if [[ "${TARGETARCH}" == "amd64" ]]; then
# use debian bookworm for amd / intel-i965 driver packages
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
apt-get -qq update
# install amd / intel-i965 driver packages
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver intel-gpu-tools onevpl-tools \
libva-drm2 \
mesa-va-drivers radeontop
# something about this dependency requires it to be installed in a separate call rather than in the line above
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver-shaders
# intel packages use zst compression so we need to update dpkg
apt-get install -y dpkg
rm -f /etc/apt/sources.list.d/debian-bookworm.list
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list

View File

@ -10,10 +10,10 @@ imutils == 0.5.*
joserfc == 1.0.*
pathvalidate == 3.2.*
markupsafe == 2.1.*
python-multipart == 0.0.12
# General
mypy == 1.6.1
numpy == 1.26.*
onvif_zeep == 0.2.12
opencv-python-headless == 4.9.0.*
onvif-zeep-async == 3.1.*
paho-mqtt == 2.1.*
pandas == 2.2.*
peewee == 3.17.*
@ -27,15 +27,19 @@ ruamel.yaml == 0.18.*
tzlocal == 5.2
requests == 2.32.*
types-requests == 2.32.*
scipy == 1.13.*
norfair == 2.2.*
setproctitle == 1.3.*
ws4py == 0.5.*
unidecode == 1.3.*
# Image Manipulation
numpy == 1.26.*
opencv-python-headless == 4.10.0.*
opencv-contrib-python == 4.9.0.*
scipy == 1.14.*
# OpenVino & ONNX
openvino == 2024.3.*
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
openvino == 2024.4.*
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64'
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
# Embeddings
transformers == 4.45.*
# Generative AI
@ -45,3 +49,7 @@ openai == 1.51.*
# push notifications
py-vapid == 1.9.*
pywebpush == 2.0.*
# alpr
pyclipper == 1.3.*
shapely == 2.0.*
prometheus-client == 0.21.*

View File

@ -1,2 +1,2 @@
scikit-build == 0.17.*
scikit-build == 0.18.*
nvidia-pyindex

View File

@ -42,8 +42,14 @@ function migrate_db_path() {
fi
}
function set_libva_version() {
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
}
echo "[INFO] Preparing Frigate..."
migrate_db_path
set_libva_version
echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"

View File

@ -43,6 +43,11 @@ function get_ip_and_port_from_supervisor() {
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
}
function set_libva_version() {
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
}
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Removing stale config from last run..."
rm /dev/shm/go2rtc.yaml
@ -61,6 +66,8 @@ else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi
set_libva_version
readonly config_path="/config"
if [[ -x "${config_path}/go2rtc" ]]; then

View File

@ -0,0 +1,45 @@
import json
import os
import shutil
import sys
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.const import (
DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS,
)
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
try:
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:
config: dict[str, any] = {}
path = config.get("ffmpeg", {}).get("path", "default")
if path == "default":
if shutil.which("ffmpeg") is None:
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
else:
print("ffmpeg")
elif path in INCLUDED_FFMPEG_VERSIONS:
print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg")
else:
print(f"{path}/bin/ffmpeg")

View File

@ -81,6 +81,9 @@ http {
open_file_cache_errors on;
aio on;
# file upload size
client_max_body_size 10M;
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
vod_open_file_thread_pool default;

View File

@ -0,0 +1,20 @@
./subset/000000005001.jpg
./subset/000000038829.jpg
./subset/000000052891.jpg
./subset/000000075612.jpg
./subset/000000098261.jpg
./subset/000000181542.jpg
./subset/000000215245.jpg
./subset/000000277005.jpg
./subset/000000288685.jpg
./subset/000000301421.jpg
./subset/000000334371.jpg
./subset/000000348481.jpg
./subset/000000373353.jpg
./subset/000000397681.jpg
./subset/000000414673.jpg
./subset/000000419312.jpg
./subset/000000465822.jpg
./subset/000000475732.jpg
./subset/000000559707.jpg
./subset/000000574315.jpg

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 201 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 233 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 272 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

View File

@ -7,21 +7,26 @@ FROM wheels as rk-wheels
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
RUN rm -rf /rk-wheels/opencv_python-*
FROM deps AS rk-frigate
ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install -U /deps/rk-wheels/*.whl
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rockchip/COCO /COCO
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"

View File

@ -0,0 +1,82 @@
import os
import rknn
import yaml
from rknn.api import RKNN
try:
with open(rknn.__path__[0] + "/VERSION") as file:
tk_version = file.read().strip()
except FileNotFoundError:
pass
try:
with open("/config/conv2rknn.yaml", "r") as config_file:
configuration = yaml.safe_load(config_file)
except FileNotFoundError:
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
if configuration["config"] != None:
rknn_config = configuration["config"]
else:
rknn_config = {}
if not os.path.isdir("/config/model_cache/rknn_cache/onnx"):
raise Exception(
"Place the onnx models you want to convert to rknn format in /config/model_cache/rknn_cache/onnx"
)
if "soc" not in configuration:
try:
with open("/proc/device-tree/compatible") as file:
soc = file.read().split(",")[-1].strip("\x00")
except FileNotFoundError:
raise Exception("Make sure to run docker in privileged mode.")
configuration["soc"] = [
soc,
]
if "quantization" not in configuration:
configuration["quantization"] = False
if "output_name" not in configuration:
configuration["output_name"] = "{{input_basename}}"
for input_filename in os.listdir("/config/model_cache/rknn_cache/onnx"):
for soc in configuration["soc"]:
quant = "i8" if configuration["quantization"] else "fp16"
input_path = "/config/model_cache/rknn_cache/onnx/" + input_filename
input_basename = input_filename[: input_filename.rfind(".")]
output_filename = (
configuration["output_name"].format(
quant=quant,
input_basename=input_basename,
soc=soc,
tk_version=tk_version,
)
+ ".rknn"
)
output_path = "/config/model_cache/rknn_cache/" + output_filename
rknn_config["target_platform"] = soc
rknn = RKNN(verbose=True)
rknn.config(**rknn_config)
if rknn.load_onnx(model=input_path) != 0:
raise Exception("Error loading model.")
if (
rknn.build(
do_quantization=configuration["quantization"],
dataset="/COCO/coco_subset_20.txt",
)
!= 0
):
raise Exception("Error building model.")
if rknn.export_rknn(output_path) != 0:
raise Exception("Error exporting rknn model.")

View File

@ -1 +1,2 @@
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp39-cp39-linux_aarch64.whl
rknn-toolkit2 == 2.3.0
rknn-toolkit-lite2 == 2.3.0

View File

@ -34,7 +34,7 @@ RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
#######################################################################
FROM --platform=linux/amd64 debian:11 as debian-base
FROM --platform=linux/amd64 debian:12 as debian-base
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
@ -51,7 +51,7 @@ COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
RUN ln -s /opt/rocm-$ROCM /opt/rocm
RUN apt-get -y install g++ cmake
RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev
RUN apt-get -y install python3-pybind11 python3-distutils python3-dev
WORKDIR /opt/build
@ -70,10 +70,11 @@ RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN python3 -m pip install --upgrade pip \
&& pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
# Temporarily disabled to see if a new wheel can be built to support py3.11
#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
#RUN python3 -m pip install --upgrade pip \
# && pip3 uninstall -y onnxruntime-openvino \
# && pip3 install -r /requirements.txt
#######################################################################
FROM scratch AS rocm-dist
@ -86,12 +87,12 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
#######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0
ENV HSA_ENABLE_SDMA=0
\
ENV HSA_ENABLE_SDMA=0
COPY --from=rocm-dist / /

View File

@ -12,7 +12,5 @@ RUN rm -rf /usr/lib/btbn-ffmpeg/
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh
ENV LIBAVFORMAT_VERSION_MAJOR=58
WORKDIR /opt/frigate/
COPY --from=rootfs / /

View File

@ -18,13 +18,14 @@ apt-get -qq install --no-install-recommends -y \
mkdir -p -m 600 /root/.gnupg
# enable non-free repo
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
echo "deb http://deb.debian.org/debian bookworm main contrib non-free non-free-firmware" | tee -a /etc/apt/sources.list
apt update
# ffmpeg -> arm64
if [[ "${TARGETARCH}" == "arm64" ]]; then
# add raspberry pi repo
gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
fi

View File

@ -7,18 +7,19 @@ ARG DEBIAN_FRONTEND=noninteractive
FROM wheels as trt-wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
RUN python3 -m pip config set global.break-system-packages true
# Add TensorRT wheels to another folder
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3
ENV TRT_VER=8.6.1
RUN python3 -m pip config set global.break-system-packages true
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \
ldconfig
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
COPY --from=rootfs / /
@ -31,4 +32,4 @@ COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages

View File

@ -41,11 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND

View File

@ -3,7 +3,7 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
# Build TensorRT-specific library
FROM ${TRT_BASE} AS trt-deps

View File

@ -1,6 +1,8 @@
/usr/local/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.9/dist-packages/tensorrt
/usr/local/cuda/lib64
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.11/dist-packages/tensorrt
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib

View File

@ -1,14 +1,14 @@
# NVidia TensorRT Support (amd64 only)
--extra-index-url 'https://pypi.nvidia.com'
numpy < 1.24; platform_machine == 'x86_64'
tensorrt == 8.5.3.*; platform_machine == 'x86_64'
cuda-python == 11.8; platform_machine == 'x86_64'
cython == 0.29.*; platform_machine == 'x86_64'
tensorrt == 8.6.1.*; platform_machine == 'x86_64'
cuda-python == 11.8.*; platform_machine == 'x86_64'
cython == 3.0.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -4,7 +4,9 @@ title: Advanced Options
sidebar_label: Advanced Options
---
### `logger`
### Logging
#### Frigate `logger`
Change the default log level for troubleshooting purposes.
@ -28,6 +30,18 @@ Examples of available modules are:
- `watchdog.<camera_name>`
- `ffmpeg.<camera_name>.<sorted_roles>` NOTE: All FFmpeg logs are sent as `error` level.
#### Go2RTC Logging
See [the go2rtc docs](for logging configuration)
```yaml
go2rtc:
streams:
...
log:
exec: trace
```
### `environment_vars`
This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS)
@ -189,16 +203,16 @@ When frigate starts up, it checks whether your config file is valid, and if it i
### Via API
Frigate can accept a new configuration file as JSON at the `/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid.
Frigate can accept a new configuration file as JSON at the `/api/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid.
```bash
curl -X POST http://frigate_host:5000/config/save -d @config.json
curl -X POST http://frigate_host:5000/api/config/save -d @config.json
```
if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json:
```bash
yq r -j config.yml | curl -X POST http://frigate_host:5000/config/save -d @-
yq r -j config.yml | curl -X POST http://frigate_host:5000/api/config/save -d @-
```
### Via Command Line

View File

@ -24,6 +24,11 @@ On startup, an admin user and password are generated and printed in the logs. It
In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file.
```yaml
auth:
reset_admin_password: true
```
## Login failure rate limiting
In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with SlowApi, and the string notation for valid values is available in [the documentation](https://limits.readthedocs.io/en/stable/quickstart.html#examples).

View File

@ -167,3 +167,7 @@ To maintain object tracking during PTZ moves, Frigate tracks the motion of your
### Calibration seems to have completed, but the camera is not actually moving to track my object. Why?
Some cameras have firmware that reports that FOV RelativeMove, the ONVIF command that Frigate uses for autotracking, is supported. However, if the camera does not pan or tilt when an object comes into the required zone, your camera's firmware does not actually support FOV RelativeMove. One such camera is the Uniview IPC672LR-AX4DUPK. It actually moves its zoom motor instead of panning and tilting and does not follow the ONVIF standard whatsoever.
### Frigate reports an error saying that calibration has failed. Why?
Calibration measures the amount of time it takes for Frigate to make a series of movements with your PTZ. This error message is recorded in the log if these values are too high for Frigate to support calibrated autotracking. This is often the case when your camera's motor or network connection is too slow or your camera's firmware doesn't report the motor status in a timely manner. You can try running without calibration (just remove the `movement_weights` line from your config and restart), but if calibration fails, this often means that autotracking will behave unpredictably.

View File

@ -65,19 +65,32 @@ ffmpeg:
## Model/vendor specific setup
### Amcrest & Dahua
Amcrest & Dahua cameras should be connected to via RTSP using the following format:
```
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=0 # this is the main stream
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=1 # this is the sub stream, typically supporting low resolutions only
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=2 # higher end cameras support a third stream with a mid resolution (1280x720, 1920x1080)
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=3 # new higher end cameras support a fourth stream with another mid resolution (1280x720, 1920x1080)
```
### Annke C800
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be adjusted using the `apple_compatibility` config.
```yaml
cameras:
annkec800: # <------ Name the camera
ffmpeg:
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
output_args:
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
record: preset-record-generic-audio-aac
inputs:
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
- path: rtsp://USERNAME:PASSWORD@CAMERA-IP/H264/ch1/main/av_stream # <----- Update for your camera
roles:
- detect
- record
@ -95,6 +108,29 @@ ffmpeg:
input_args: preset-rtsp-blue-iris
```
### Hikvision Cameras
Hikvision cameras should be connected to via RTSP using the following format:
```
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/101 # this is the main stream
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/102 # this is the sub stream, typically supporting low resolutions only
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/103 # higher end cameras support a third stream with a mid resolution (1280x720, 1920x1080)
```
:::note
[Some users have reported](https://www.reddit.com/r/frigate_nvr/comments/1hg4ze7/hikvision_security_settings) that newer Hikvision cameras require adjustments to the security settings:
```
RTSP Authentication - digest/basic
RTSP Digest Algorithm - MD5
WEB Authentication - digest/basic
WEB Digest Algorithm - MD5
```
:::
### Reolink Cameras
Reolink has older cameras (ex: 410 & 520) as well as newer camera (ex: 520a & 511wa) which support different subsets of options. In both cases using the http stream is recommended.
@ -156,7 +192,9 @@ cameras:
#### Reolink Doorbell
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
```yaml
go2rtc:

View File

@ -0,0 +1,59 @@
---
id: face_recognition
title: Face Recognition
---
Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.
Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database.
## Minimum System Requirements
Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all.
## Configuration
Face recognition is disabled by default and requires semantic search to be enabled, face recognition must be enabled in your config file before it can be used. Semantic Search and face recognition are global configuration settings.
```yaml
face_recognition:
enabled: true
```
## Dataset
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
- Diversity of the dataset: A dataset with diverse images, including variations in lighting, pose, and facial expressions, will require fewer images per person than a less diverse dataset.
- Desired accuracy: The higher the desired accuracy, the more images are typically needed.
However, here are some general guidelines:
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.
## Creating a Robust Training Set
The accuracy of face recognition is heavily dependent on the quality of data given to it for training. It is recommended to build the face training library in phases.
:::tip
When choosing images to include in the face training set it is recommended to always follow these recommendations:
- If it is difficult to make out details in a persons face it will not be helpful in training.
- Avoid images with under/over-exposure.
- Avoid blurry / pixelated images.
- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the training.
- Do not upload too many images at the same time, it is recommended to train 4-6 images for each person each day so it is easier to know if the previously added images helped or hurt performance.
:::
### Step 1 - Building a Strong Foundation
When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-2 photos taken by a smartphone for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point.
Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step.
### Step 2 - Expanding The Dataset
Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.

View File

@ -15,9 +15,9 @@ Semantic Search must be enabled to use Generative AI.
## Configuration
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 providers available to integrate with Frigate.
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
```yaml
genai:
@ -116,6 +116,12 @@ genai:
model: gpt-4o
```
:::note
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
:::
## Azure OpenAI
Microsoft offers several vision models through Azure OpenAI. A subscription is required.

View File

@ -175,6 +175,16 @@ For more information on the various values across different distributions, see h
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
#### Stats for SR-IOV devices
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
```yaml
telemetry:
stats:
sriov: True
```
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.

View File

@ -203,14 +203,13 @@ detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:

View File

@ -0,0 +1,45 @@
---
id: license_plate_recognition
title: License Plate Recognition (LPR)
---
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera.
Users running a Frigate+ model should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
LPR is most effective when the vehicles license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining its detection and keeping the most confident result. LPR will not run on stationary vehicles.
## Minimum System Requirements
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and run on your CPU. At least 4GB of RAM is required.
## Configuration
License plate recognition is disabled by default. Enable it in your config file:
```yaml
lpr:
enabled: true
```
## Advanced Configuration
Several options are available to fine-tune the LPR feature. For example, you can adjust the `min_area` setting, which defines the minimum size in pixels a license plate must be before LPR runs. The default is 500 pixels.
Additionally, you can define `known_plates` as strings or regular expressions, allowing Frigate to label tracked vehicles with custom sub_labels when a recognized plate is detected. This information is then accessible in the UI, filters, and notifications.
```yaml
lpr:
enabled: true
min_area: 500
known_plates:
Wife's Car:
- "ABC-1234"
- "ABC-I234"
Johnny:
- "J*N-*234" # Using wildcards for H/M and 1/I
Sally:
- "[S5]LL-1234" # Matches SLL-1234 and 5LL-1234
```
In this example, "Wife's Car" will appear as the label for any vehicle matching the plate "ABC-1234." The model might occasionally interpret the digit 1 as a capital I (e.g., "ABC-I234"), so both variations are listed. Similarly, multiple possible variations are specified for Johnny and Sally.

View File

@ -29,7 +29,7 @@ The default video and audio codec on your camera may not always be compatible wi
### Audio Support
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml
go2rtc:
@ -138,3 +138,13 @@ services:
:::
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
### Two way talk
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)

View File

@ -0,0 +1,99 @@
---
id: metrics
title: Metrics
---
# Metrics
Frigate exposes Prometheus metrics at the `/api/metrics` endpoint that can be used to monitor the performance and health of your Frigate instance.
## Available Metrics
### System Metrics
- `frigate_cpu_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process CPU usage percentage
- `frigate_mem_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process memory usage percentage
- `frigate_gpu_usage_percent{gpu_name=""}` - GPU utilization percentage
- `frigate_gpu_mem_usage_percent{gpu_name=""}` - GPU memory usage percentage
### Camera Metrics
- `frigate_camera_fps{camera_name=""}` - Frames per second being consumed from your camera
- `frigate_detection_fps{camera_name=""}` - Number of times detection is run per second
- `frigate_process_fps{camera_name=""}` - Frames per second being processed
- `frigate_skipped_fps{camera_name=""}` - Frames per second skipped for processing
- `frigate_detection_enabled{camera_name=""}` - Detection enabled status for camera
- `frigate_audio_dBFS{camera_name=""}` - Audio dBFS for camera
- `frigate_audio_rms{camera_name=""}` - Audio RMS for camera
### Detector Metrics
- `frigate_detector_inference_speed_seconds{name=""}` - Time spent running object detection in seconds
- `frigate_detection_start{name=""}` - Detector start time (unix timestamp)
### Storage Metrics
- `frigate_storage_free_bytes{storage=""}` - Storage free bytes
- `frigate_storage_total_bytes{storage=""}` - Storage total bytes
- `frigate_storage_used_bytes{storage=""}` - Storage used bytes
- `frigate_storage_mount_type{mount_type="", storage=""}` - Storage mount type info
### Service Metrics
- `frigate_service_uptime_seconds` - Uptime in seconds
- `frigate_service_last_updated_timestamp` - Stats recorded time (unix timestamp)
- `frigate_device_temperature{device=""}` - Device Temperature
### Event Metrics
- `frigate_camera_events{camera="", label=""}` - Count of camera events since exporter started
## Configuring Prometheus
To scrape metrics from Frigate, add the following to your Prometheus configuration:
```yaml
scrape_configs:
- job_name: 'frigate'
metrics_path: '/api/metrics'
static_configs:
- targets: ['frigate:5000']
scrape_interval: 15s
```
## Example Queries
Here are some example PromQL queries that might be useful:
```promql
# Average CPU usage across all processes
avg(frigate_cpu_usage_percent)
# Total GPU memory usage
sum(frigate_gpu_mem_usage_percent)
# Detection FPS by camera
rate(frigate_detection_fps{camera_name="front_door"}[5m])
# Storage usage percentage
(frigate_storage_used_bytes / frigate_storage_total_bytes) * 100
# Event count by camera in last hour
increase(frigate_camera_events[1h])
```
## Grafana Dashboard
You can use these metrics to create Grafana dashboards to monitor your Frigate instance. Here's an example of metrics you might want to track:
- CPU, Memory and GPU usage over time
- Camera FPS and detection rates
- Storage usage and trends
- Event counts by camera
- System temperatures
A sample Grafana dashboard JSON will be provided in a future update.
## Metric Types
The metrics exposed by Frigate use the following Prometheus metric types:
- **Counter**: Cumulative values that only increase (e.g., `frigate_camera_events`)
- **Gauge**: Values that can go up and down (e.g., `frigate_cpu_usage_percent`)
- **Info**: Key-value pairs for metadata (e.g., `frigate_storage_mount_type`)
For more information about Prometheus metric types, see the [Prometheus documentation](https://prometheus.io/docs/concepts/metric_types/).

View File

@ -33,6 +33,14 @@ Frigate supports multiple different detectors that work on different types of ha
:::
:::note
Multiple detectors can not be mixed for object detection (ex: OpenVINO and Coral EdgeTPU can not be used for object detection at the same time).
This does not affect using hardware for accelerating other tasks such as [semantic search](./semantic_search.md)
:::
# Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
@ -116,6 +124,30 @@ detectors:
device: pci
```
## Hailo-8l
This detector is available for use with Hailo-8 AI Acceleration Module.
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
### Configuration
```yaml
detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
```
## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
@ -144,7 +176,9 @@ detectors:
#### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
```yaml
detectors:
@ -254,6 +288,7 @@ yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-416
yolov7-320
yolov7x-640
yolov7x-320
@ -282,6 +317,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml
detectors:
tensorrt:
@ -501,11 +538,12 @@ detectors:
cpu1:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
cpu2:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
@ -544,7 +582,7 @@ Hardware accelerated object detection is supported on the following SoCs:
- RK3576
- RK3588
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
### Prerequisites
@ -619,26 +657,36 @@ $ cat /sys/kernel/debug/rknpu/load
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
## Hailo-8l
### Converting your own onnx model to rknn format
This detector is available for use with Hailo-8 AI Acceleration Module.
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
- Place one ore more models in onnx format in the directory `config/model_cache/rknn_cache/onnx` on your docker host (this might require `sudo` privileges).
- Save the configuration file under `config/conv2rknn.yaml` (see below for details).
- Run `docker exec <frigate_container_id> python3 /opt/conv2rknn.py`. If the conversion was successful, the rknn models will be placed in `config/model_cache/rknn_cache`.
### Configuration
This is an example configuration file that you need to adjust to your specific onnx model:
```yaml
detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
quantization: false
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
output_name: "{input_basename}"
config:
mean_values: [[0, 0, 0]]
std_values: [[255, 255, 255]]
quant_img_rgb2bgr: true
```
Explanation of the paramters:
- `soc`: A list of all SoCs you want to build the rknn model for. If you don't specify this parameter, the script tries to find out your SoC and builds the rknn model for this one.
- `quantization`: true: 8 bit integer (i8) quantization, false: 16 bit float (fp16). Default: false.
- `output_name`: The output name of the model. The following variables are available:
- `quant`: "i8" or "fp16" depending on the config
- `input_basename`: the basename of the input model (e.g. "my_model" if the input model is calles "my_model.onnx")
- `soc`: the SoC this model was build for (e.g. "rk3588")
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).

View File

@ -34,7 +34,7 @@ False positives can also be reduced by filtering a detection based on its shape.
### Object Area
`min_area` and `max_area` filter on the area of an objects bounding box in pixels and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter.
`min_area` and `max_area` filter on the area of an objects bounding box and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter. These values can either be in pixels or as a percentage of the frame (for example, 0.12 represents 12% of the frame).
### Object Proportions

View File

@ -52,7 +52,7 @@ detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
@ -117,25 +117,27 @@ auth:
hash_iterations: 600000
# Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Optional: path to the model (default: automatic based on detector)
# Required: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Optional: path to the labelmap (default: shown below)
# Required: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Object detection model input colorspace
# Required: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Optional: Object detection model input tensor format
# Required: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Required: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
# Required: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model)
@ -242,6 +244,8 @@ ffmpeg:
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
apple_compatibility: false
# Optional: Detect configuration
# NOTE: Can be overridden at the camera level
@ -308,9 +312,11 @@ objects:
# Optional: filters to reduce false positives for specific object types
filters:
person:
# Optional: minimum width*height of the bounding box for the detected object (default: 0)
# Optional: minimum size of the bounding box for the detected object (default: 0).
# Can be specified as an integer for width*height in pixels or as a decimal representing the percentage of the frame (0.000001 to 0.99).
min_area: 5000
# Optional: maximum width*height of the bounding box for the detected object (default: 24000000)
# Optional: maximum size of the bounding box for the detected object (default: 24000000).
# Can be specified as an integer for width*height in pixels or as a decimal representing the percentage of the frame (0.000001 to 0.99).
max_area: 100000
# Optional: minimum width/height of the bounding box for the detected object (default: 0)
min_ratio: 0.5
@ -522,6 +528,14 @@ semantic_search:
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for face recognition capability
face_recognition:
# Optional: Enable semantic search (default: shown below)
enabled: False
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for AI generated tracked object descriptions
# NOTE: Semantic Search must be enabled for this to do anything.
# WARNING: Depending on the provider, this will send thumbnails over the internet
@ -546,6 +560,8 @@ genai:
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
# NOTE: The default go2rtc API port (1984) must be used,
# changing this port for the integrated go2rtc instance is not supported.
go2rtc:
# Optional: Live stream configuration for WebUI.
@ -803,11 +819,13 @@ telemetry:
- lo
# Optional: Configure system stats
stats:
# Enable AMD GPU stats (default: shown below)
# Optional: Enable AMD GPU stats (default: shown below)
amd_gpu_stats: True
# Enable Intel GPU stats (default: shown below)
# Optional: Enable Intel GPU stats (default: shown below)
intel_gpu_stats: True
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
sriov: False
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
network_bandwidth: False
# Optional: Enable the latest version outbound check (default: shown below)

View File

@ -1,6 +1,6 @@
---
id: semantic_search
title: Using Semantic Search
title: Semantic Search
---
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.

View File

@ -13,20 +13,19 @@ Many users have reported various issues with Reolink cameras, so I do not recomm
Here are some of the camera's I recommend:
- <a href="https://amzn.to/3uFLtxB" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) T5442TM-AS-LED</a> (affiliate link)
- <a href="https://amzn.to/3isJ3gU" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T5442TM-AS</a> (affiliate link)
- <a href="https://amzn.to/2ZWNWIA" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-28MM</a> (affiliate link)
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
## Server
My current favorite is the Beelink EQ12 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
| Beelink EQ12 (<a href="https://amzn.to/3OlTMJY" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
| Beelink EQ13 (<a href="https://amzn.to/4iQaBKu" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
## Detectors
@ -52,24 +51,25 @@ The OpenVINO detector type is able to run on:
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
| Name | Inference Speed | Notes |
| -------------------- | --------------- | --------------------------------------------------------------------- |
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were 150 - 200 ms |
| Intel Celeron N3060 | 130 - 150 ms | Inference speeds on CPU were ~ 550 ms |
| Intel Celeron N3205U | ~ 120 ms | Inference speeds on CPU were ~ 380 ms |
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
| Intel i3 6100T | 15 - 35 ms | Inference speeds on CPU were 60 - 120 ms |
| Intel i3 8100 | ~ 15 ms | Inference speeds on CPU were ~ 65 ms |
| Intel i5 4590 | ~ 20 ms | Inference speeds on CPU were ~ 230 ms |
| Intel i5 6500 | ~ 15 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7200u | 15 - 25 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
| Intel i5 1135G7 | 10 - 15 ms | |
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
| Intel Arc A750 | ~ 4 ms | |
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes |
| -------------------- | -------------------------- | ------------------------- | -------------------------------------- |
| Intel Celeron J4105 | ~ 25 ms | | Can only run one detector instance |
| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance |
| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance |
| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads |
| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance |
| Intel i3 8100 | ~ 15 ms | | |
| Intel i5 4590 | ~ 20 ms | | |
| Intel i5 6500 | ~ 15 ms | | |
| Intel i5 7200u | 15 - 25 ms | | |
| Intel i5 7500 | ~ 15 ms | | |
| Intel i5 1135G7 | 10 - 15 ms | | |
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | |
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | |
### TensorRT - Nvidia GPU
@ -78,29 +78,35 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
Inference speeds will vary greatly depending on the GPU and the model used.
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
| Name | Inference Speed |
| --------------- | --------------- |
| GTX 1060 6GB | ~ 7 ms |
| GTX 1070 | ~ 6 ms |
| GTX 1660 SUPER | ~ 4 ms |
| RTX 3050 | 5 - 7 ms |
| RTX 3070 Mobile | ~ 5 ms |
| Quadro P400 2GB | 20 - 25 ms |
| Quadro P2000 | ~ 12 ms |
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time |
| --------------- | --------------------- | ------------------------- |
| GTX 1060 6GB | ~ 7 ms | |
| GTX 1070 | ~ 6 ms | |
| GTX 1660 SUPER | ~ 4 ms | |
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms |
| RTX 3070 Mobile | ~ 5 ms | |
| Quadro P400 2GB | 20 - 25 ms | |
| Quadro P2000 | ~ 12 ms | |
#### AMD GPUs
### AMD GPUs
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs.
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
### Community Supported:
### Hailo-8l PCIe
#### Nvidia Jetson
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
## Community Supported Detectors
### Nvidia Jetson
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
#### Rockchip platform
### Rockchip platform
Frigate supports hardware video processing on all Rockchip boards. However, hardware object detection is only supported on these boards:
@ -112,12 +118,6 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
#### Hailo-8l PCIe
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.

View File

@ -111,7 +111,7 @@ For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simpl
For other installations, follow these steps for installation:
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/41c9b13d2fffce508b32dfc971fa529b49295fbd/docker/hailo8l/user_installation.sh).
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
4. Run the script with `./user_installation.sh`
@ -305,8 +305,15 @@ To install make sure you have the [community app plugin here](https://forums.unr
## Proxmox
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers.
:::warning
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
:::
Suggestions include:
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`

View File

@ -7,7 +7,7 @@ title: Configuring go2rtc
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
- Live stream support for cameras in Home Assistant Integration
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams

View File

@ -47,7 +47,7 @@ that card.
## Configuration
When configuring the integration, you will be asked for the `URL` of your Frigate instance which needs to be pointed at the internal unauthenticated port (`5000`) for your instance. This may look like `http://<host>:5000/`.
When configuring the integration, you will be asked for the `URL` of your Frigate instance which can be pointed at the internal unauthenticated port (`5000`) or the authenticated port (`8971`) for your instance. This may look like `http://<host>:5000/`.
### Docker Compose Examples
@ -55,7 +55,7 @@ If you are running Home Assistant Core and Frigate with Docker Compose on the sa
#### Home Assistant running with host networking
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` when configuring the integration.
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` or `http://172.17.0.1:8971` when configuring the integration.
```yaml
services:
@ -75,7 +75,7 @@ services:
#### Home Assistant _not_ running with host networking or in a separate compose file
In this example, you would use `http://frigate:5000` when configuring the integration. There is no need to map the port for the Frigate container.
In this example, it is recommended to connect to the authenticated port, for example, `http://frigate:8971` when configuring the integration. There is no need to map the port for the Frigate container.
```yaml
services:
@ -97,20 +97,21 @@ services:
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
| Addon Version | URL |
| ------------------------------ | -------------------------------------- |
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
| Addon Version | URL |
| ------------------------------ | ----------------------------------------- |
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` |
### Frigate running on a separate machine
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 5000.
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 8971.
#### Local network
Use `http://<frigate_device_ip>:5000` as the URL for the integration. If you want to protect access to port 5000, you can use firewall rules to limit access to the device running Home Assistant.
Use `http://<frigate_device_ip>:8971` as the URL for the integration so that authentication is required.
```yaml
services:
@ -118,7 +119,7 @@ services:
image: ghcr.io/blakeblackshear/frigate:stable
...
ports:
- "5000:5000"
- "8971:8971"
...
```
@ -195,12 +196,30 @@ To load a snapshot for a tracked object:
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
```
To load a video clip of a tracked object:
To load a video clip of a tracked object using an Android device:
```
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
```
To load a video clip of a tracked object using an iOS device:
```
https://HA_URL/api/frigate/notifications/<event-id>/master.m3u8
```
To load a preview gif of a tracked object:
```
https://HA_URL/api/frigate/notifications/<event-id>/event_preview.gif
```
To load a preview gif of a review item:
```
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
```
<a name="streams"></a>
## RTSP stream
@ -282,3 +301,7 @@ which server they are referring to.
#### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.
#### I have set up automations based on the occupancy sensors. Sometimes the automation runs because the sensors are turned on, but then I look at Frigate I can't find the object that triggered the sensor. Is this a bug?
No. The occupancy sensors have fewer checks in place because they are often used for things like turning the lights on where latency needs to be as low as possible. So false positives can sometimes trigger these sensors. If you want false positive filtering, you should use an mqtt sensor on the `frigate/events` or `frigate/reviews` topic.

View File

@ -29,7 +29,9 @@ You cannot use the `environment_vars` section of your Frigate configuration file
## Submit examples
Once your API key is configured, you can submit examples directly from the Explore page in Frigate using the `Frigate+` button.
Once your API key is configured, you can submit examples directly from the Explore page in Frigate. From the More Filters menu, select "Has a Snapshot - Yes" and "Submitted to Frigate+ - No", and press Apply at the bottom of the pane. Then, click on a thumbnail and select the Snapshot tab.
You can use your keyboard's left and right arrow keys to quickly navigate between the tracked object snapshots.
:::note
@ -37,8 +39,6 @@ Snapshots must be enabled to be able to submit examples to Frigate+
:::
![Send To Plus](/img/plus/send-to-plus.jpg)
![Submit To Plus](/img/plus/submit-to-plus.jpg)
### Annotate and verify

View File

@ -19,6 +19,10 @@ Please use your own knowledge to assess and vet them before you install anything
It supports automatically setting the sub labels in Frigate for person objects that are detected and recognized.
This is a fork (with fixed errors and new features) of [original Double Take](https://github.com/jakowenko/double-take) project which, unfortunately, isn't being maintained by author.
## [Frigate Notify](https://github.com/0x2142/frigate-notify)
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.

View File

@ -5,7 +5,7 @@ title: Requesting your first model
## Step 1: Upload and annotate your images
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
Before requesting your first model, you will need to upload and verify at least 10 images to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.

View File

@ -13,7 +13,7 @@ You may find that Frigate+ models result in more false positives initially, but
For the best results, follow the following guidelines.
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused. You can exclude labels that you don't want detected on any of your cameras.
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
@ -21,7 +21,7 @@ For the best results, follow the following guidelines.
**Label objects hard to identify as difficult**: When objects are truly difficult to make out, such as a car barely visible through a bush, or a dog that is hard to distinguish from the background at night, flag it as 'difficult'. This is not used in the model training as of now, but will in the future.
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
**Delivery logos such as `amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
![Fedex Logo](/img/plus/fedex-logo.jpg)

View File

@ -17,7 +17,7 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ
## Available model types
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
@ -32,7 +32,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
:::warning
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later.
:::
@ -48,11 +48,19 @@ _\* Requires Frigate 0.15_
## Available label types
Frigate+ models support a more relevant set of objects for security cameras. Currently, only the following objects are supported: `person`, `face`, `car`, `license_plate`, `amazon`, `ups`, `fedex`, `package`, `dog`, `cat`, `deer`. Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
Frigate+ models support a more relevant set of objects for security cameras. Currently, the following objects are supported:
- **People**: `person`, `face`
- **Vehicles**: `car`, `motorcycle`, `bicycle`, `boat`, `license_plate`
- **Delivery Logos**: `amazon`, `usps`, `ups`, `fedex`, `dhl`, `an_post`, `purolator`, `postnl`, `nzpost`, `postnord`, `gls`, `dpd`
- **Animals**: `dog`, `cat`, `deer`, `horse`, `bird`, `raccoon`, `fox`, `bear`, `cow`, `squirrel`, `goat`, `rabbit`
- **Other**: `package`, `waste_bin`, `bbq_grill`, `robot_lawnmower`, `umbrella`
Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
### Label attributes
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
In order to have Frigate start using these attribute labels, you will need to add them to the list of objects to track:
@ -75,6 +83,6 @@ When using Frigate+ models, Frigate will choose the snapshot of a person object
![Face Attribute](/img/plus/attribute-example-face.jpg)
`amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
Delivery logos such as `amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
![Fedex Attribute](/img/plus/attribute-example-fedex.jpg)

View File

@ -54,6 +54,17 @@ The most common reason for the PCIe Coral not being detected is that the driver
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
### Not detected on Raspberry Pi5
A kernel update to the RPi5 means an upate to config.txt is required, see [the raspberry pi forum for more info](https://forums.raspberrypi.com/viewtopic.php?t=363682&sid=cb59b026a412f0dc041595951273a9ca&start=25)
Specifically, add the following to config.txt
```
dtoverlay=pciex1-compat-pi5,no-mip
dtoverlay=pcie-32bit-dma-pi5
```
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
Coral Dual EdgeTPU is one card with two identical TPU cores. Each core has it's own PCIe interface and motherboard needs to have two PCIe busses on the m.2 slot to make them both work.

View File

@ -17,6 +17,10 @@ ffmpeg:
record: preset-record-generic-audio-aac
```
### How can I get sound in live view?
Audio is only supported for live view when go2rtc is configured, see [the live docs](../configuration/live.md) for more information.
### I can't view recordings in the Web UI.
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).

View File

@ -3,7 +3,15 @@ id: recordings
title: Troubleshooting Recordings
---
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
You'll want to:
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
@ -40,6 +48,7 @@ On linux, some helpful tools/commands in diagnosing would be:
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
```yaml
version: "3.9"
services:
@ -54,6 +63,7 @@ services:
```
**Run command example**
```
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
```

View File

@ -33,9 +33,11 @@ const sidebars: SidebarsConfig = {
'configuration/object_detectors',
'configuration/audio_detectors',
],
'Semantic Search': [
Classifiers: [
'configuration/semantic_search',
'configuration/genai',
'configuration/face_recognition',
'configuration/license_plate_recognition',
],
Cameras: [
'configuration/cameras',
@ -82,6 +84,7 @@ const sidebars: SidebarsConfig = {
items: frigateHttpApiSidebar,
},
'integrations/mqtt',
'configuration/metrics',
'integrations/third_party_extensions',
],
'Frigate+': [

View File

@ -3,12 +3,15 @@ import faulthandler
import signal
import sys
import threading
from typing import Union
import ruamel.yaml
from pydantic import ValidationError
from frigate.app import FrigateApp
from frigate.config import FrigateConfig
from frigate.log import setup_logging
from frigate.util.config import find_config_file
def main() -> None:
@ -42,10 +45,51 @@ def main() -> None:
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************")
print("*************************************************************\n")
# Attempt to get the original config file for line number tracking
config_path = find_config_file()
with open(config_path, "r") as f:
yaml_config = ruamel.yaml.YAML()
yaml_config.preserve_quotes = True
full_config = yaml_config.load(f)
for error in e.errors():
location = ".".join(str(item) for item in error["loc"])
print(f"{location}: {error['msg']}")
error_path = error["loc"]
current = full_config
line_number = "Unknown"
last_line_number = "Unknown"
try:
for i, part in enumerate(error_path):
key: Union[int, str] = (
int(part) if isinstance(part, str) and part.isdigit() else part
)
if isinstance(current, ruamel.yaml.comments.CommentedMap):
current = current[key]
elif isinstance(current, list):
if isinstance(key, int):
current = current[key]
if hasattr(current, "lc"):
last_line_number = current.lc.line
if i == len(error_path) - 1:
if hasattr(current, "lc"):
line_number = current.lc.line
else:
line_number = last_line_number
except Exception as traverse_error:
print(f"Could not determine exact line number: {traverse_error}")
if current != full_config:
print(f"Line # : {line_number}")
print(f"Key : {' -> '.join(map(str, error_path))}")
print(f"Value : {error.get('input', '-')}")
print(f"Message : {error.get('msg', error.get('type', 'Unknown'))}\n")
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")

View File

@ -7,15 +7,19 @@ import os
import traceback
from datetime import datetime, timedelta
from functools import reduce
from io import StringIO
from typing import Any, Optional
import requests
import ruamel.yaml
from fastapi import APIRouter, Body, Path, Request, Response
from fastapi.encoders import jsonable_encoder
from fastapi.params import Depends
from fastapi.responses import JSONResponse, PlainTextResponse
from markupsafe import escape
from peewee import operator
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
from pydantic import ValidationError
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody
@ -105,6 +109,12 @@ def stats_history(request: Request, keys: str = None):
return JSONResponse(content=request.app.stats_emitter.get_stats_history(keys))
@router.get("/metrics")
def metrics():
"""Expose Prometheus metrics endpoint"""
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
@router.get("/config")
def config(request: Request):
config_obj: FrigateConfig = request.app.frigate_config
@ -153,6 +163,8 @@ def config(request: Request):
config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()}
config["model"]["colormap"] = config_obj.model.colormap
config["model"]["all_attributes"] = config_obj.model.all_attributes
config["model"]["non_logo_attributes"] = config_obj.model.non_logo_attributes
# use merged labelamp
for detector_config in config["detectors"].values():
@ -185,7 +197,6 @@ def config_raw():
@router.post("/config/save")
def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
new_config = body.decode()
if not new_config:
return JSONResponse(
content=(
@ -196,13 +207,64 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
# Validate the config schema
try:
# Use ruamel to parse and preserve line numbers
yaml_config = ruamel.yaml.YAML()
yaml_config.preserve_quotes = True
full_config = yaml_config.load(StringIO(new_config))
FrigateConfig.parse_yaml(new_config)
except ValidationError as e:
error_message = []
for error in e.errors():
error_path = error["loc"]
current = full_config
line_number = "Unknown"
last_line_number = "Unknown"
try:
for i, part in enumerate(error_path):
key = int(part) if part.isdigit() else part
if isinstance(current, ruamel.yaml.comments.CommentedMap):
current = current[key]
elif isinstance(current, list):
current = current[key]
if hasattr(current, "lc"):
last_line_number = current.lc.line
if i == len(error_path) - 1:
if hasattr(current, "lc"):
line_number = current.lc.line
else:
line_number = last_line_number
except Exception:
line_number = "Unable to determine"
error_message.append(
f"Line {line_number}: {' -> '.join(map(str, error_path))} - {error.get('msg', error.get('type', 'Unknown'))}"
)
return JSONResponse(
content=(
{
"success": False,
"message": "Your configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n"
+ "\n".join(error_message),
}
),
status_code=400,
)
except Exception:
return JSONResponse(
content=(
{
"success": False,
"message": f"\nConfig Error:\n\n{escape(str(traceback.format_exc()))}",
"message": f"\nYour configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n{escape(str(traceback.format_exc()))}",
}
),
status_code=400,

View File

@ -0,0 +1,178 @@
"""Object classification APIs."""
import logging
import os
import random
import shutil
import string
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filename
from frigate.api.defs.tags import Tags
from frigate.const import FACE_DIR
from frigate.embeddings import EmbeddingsContext
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.events])
@router.get("/faces")
def get_faces():
face_dict: dict[str, list[str]] = {}
for name in os.listdir(FACE_DIR):
face_dir = os.path.join(FACE_DIR, name)
if not os.path.isdir(face_dir):
continue
face_dict[name] = []
for file in sorted(
os.listdir(face_dir),
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
reverse=True,
):
face_dict[name].append(file)
return JSONResponse(status_code=200, content=face_dict)
@router.post("/faces/reprocess")
def reclassify_face(request: Request, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
training_file = os.path.join(
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
)
if not training_file or not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file}",
}
),
status_code=404,
)
context: EmbeddingsContext = request.app.embeddings
response = context.reprocess_face(training_file)
return JSONResponse(
content=response,
status_code=200,
)
@router.post("/faces/train/{name}/classify")
def train_face(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
training_file = os.path.join(
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
)
if not training_file or not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file}",
}
),
status_code=404,
)
sanitized_name = sanitize_filename(name)
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
new_name = f"{sanitized_name}-{rand_id}.webp"
new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}")
shutil.move(training_file, new_file)
context: EmbeddingsContext = request.app.embeddings
context.clear_face_classifier()
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully saved {training_file} as {new_name}.",
}
),
status_code=200,
)
@router.post("/faces/{name}/create")
async def create_face(request: Request, name: str):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
os.makedirs(
os.path.join(FACE_DIR, sanitize_filename(name.replace(" ", "_"))), exist_ok=True
)
return JSONResponse(
status_code=200,
content={"success": False, "message": "Successfully created face folder."},
)
@router.post("/faces/{name}/register")
async def register_face(request: Request, name: str, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
context: EmbeddingsContext = request.app.embeddings
result = context.register_face(name, await file.read())
return JSONResponse(
status_code=200 if result.get("success", True) else 400,
content=result,
)
@router.post("/faces/{name}/delete")
def deregister_faces(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
content=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
context: EmbeddingsContext = request.app.embeddings
context.delete_face_ids(
name, map(lambda file: sanitize_filename(file), list_of_ids)
)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200,
)

View File

@ -20,6 +20,7 @@ class MediaLatestFrameQueryParams(BaseModel):
regions: Optional[int] = None
quality: Optional[int] = 70
height: Optional[int] = None
store: Optional[int] = None
class MediaEventsSnapshotQueryParams(BaseModel):

View File

@ -8,6 +8,9 @@ class EventsSubLabelBody(BaseModel):
subLabelScore: Optional[float] = Field(
title="Score for sub label", default=None, gt=0.0, le=1.0
)
camera: Optional[str] = Field(
title="Camera this object is detected on.", default=None
)
class EventsDescriptionBody(BaseModel):

View File

@ -10,4 +10,5 @@ class Tags(Enum):
review = "Review"
export = "Export"
events = "Events"
classification = "classification"
auth = "Auth"

View File

@ -909,38 +909,59 @@ def set_sub_label(
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
if not body.camera:
return JSONResponse(
content=(
{
"success": False,
"message": "Event "
+ event_id
+ " not found and camera is not provided.",
}
),
status_code=404,
)
event = None
if request.app.detected_frames_processor:
tracked_obj: TrackedObject = (
request.app.detected_frames_processor.camera_states[
event.camera if event else body.camera
].tracked_objects.get(event_id)
)
else:
tracked_obj = None
if not event and not tracked_obj:
return JSONResponse(
content=({"success": False, "message": "Event " + event_id + " not found"}),
content=(
{"success": False, "message": "Event " + event_id + " not found."}
),
status_code=404,
)
new_sub_label = body.subLabel
new_score = body.subLabelScore
if not event.end_time:
# update tracked object
tracked_obj: TrackedObject = (
request.app.detected_frames_processor.camera_states[
event.camera
].tracked_objects.get(event.id)
)
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
).where(Timeline.source_id == event_id).execute()
event.sub_label = new_sub_label
if event:
event.sub_label = new_sub_label
if new_score:
data = event.data
data["sub_label_score"] = new_score
event.data = data
if new_score:
data = event.data
data["sub_label_score"] = new_score
event.data = data
event.save()
event.save()
return JSONResponse(
content=(
{

View File

@ -11,7 +11,16 @@ from starlette_context import middleware, plugins
from starlette_context.plugins import Plugin
from frigate.api import app as main_app
from frigate.api import auth, event, export, media, notification, preview, review
from frigate.api import (
auth,
classification,
event,
export,
media,
notification,
preview,
review,
)
from frigate.api.auth import get_jwt_secret, limiter
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
@ -26,14 +35,13 @@ from frigate.storage import StorageMaintainer
logger = logging.getLogger(__name__)
def check_csrf(request: Request):
def check_csrf(request: Request) -> bool:
if request.method in ["GET", "HEAD", "OPTIONS", "TRACE"]:
pass
return True
if "origin" in request.headers and "x-csrf-token" not in request.headers:
return JSONResponse(
content={"success": False, "message": "Missing CSRF header"},
status_code=401,
)
return False
return True
# Used to retrieve the remote-user header: https://starlette-context.readthedocs.io/en/latest/plugins.html#easy-mode
@ -71,7 +79,12 @@ def create_fastapi_app(
@app.middleware("http")
async def frigate_middleware(request: Request, call_next):
# Before request
check_csrf(request)
if not check_csrf(request):
return JSONResponse(
content={"success": False, "message": "Missing CSRF header"},
status_code=401,
)
if database.is_closed():
database.connect()
@ -99,6 +112,7 @@ def create_fastapi_app(
# Routes
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
app.include_router(auth.router)
app.include_router(classification.router)
app.include_router(review.router)
app.include_router(main_app.router)
app.include_router(preview.router)

View File

@ -134,6 +134,15 @@ def latest_frame(
"regions": params.regions,
}
quality = params.quality
mime_type = extension
if extension == "png":
quality_params = None
elif extension == "webp":
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality]
else:
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
mime_type = "jpeg"
if camera_name in request.app.frigate_config.cameras:
frame = frame_processor.get_current_frame(camera_name, draw_options)
@ -174,13 +183,16 @@ def latest_frame(
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, img = cv2.imencode(
f".{extension}", frame, [int(cv2.IMWRITE_WEBP_QUALITY), quality]
)
_, img = cv2.imencode(f".{extension}", frame, quality_params)
return Response(
content=img.tobytes(),
media_type=f"image/{extension}",
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
media_type=f"image/{mime_type}",
headers={
"Content-Type": f"image/{mime_type}",
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
)
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
frame = cv2.cvtColor(
@ -193,13 +205,16 @@ def latest_frame(
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, img = cv2.imencode(
f".{extension}", frame, [int(cv2.IMWRITE_WEBP_QUALITY), quality]
)
_, img = cv2.imencode(f".{extension}", frame, quality_params)
return Response(
content=img.tobytes(),
media_type=f"image/{extension}",
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
media_type=f"image/{mime_type}",
headers={
"Content-Type": f"image/{mime_type}",
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
)
else:
return JSONResponse(
@ -242,6 +257,7 @@ def get_snapshot_from_recording(
recording: Recordings = recording_query.get()
time_in_segment = frame_time - recording.start_time
codec = "png" if format == "png" else "mjpeg"
mime_type = "png" if format == "png" else "jpeg"
config: FrigateConfig = request.app.frigate_config
image_data = get_image_from_recording(
@ -258,7 +274,7 @@ def get_snapshot_from_recording(
),
status_code=404,
)
return Response(image_data, headers={"Content-Type": f"image/{format}"})
return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
except DoesNotExist:
return JSONResponse(
content={

View File

@ -110,6 +110,28 @@ def review(params: ReviewQueryParams = Depends()):
return JSONResponse(content=[r for r in review])
@router.get("/review_ids", response_model=list[ReviewSegmentResponse])
def review_ids(ids: str):
ids = ids.split(",")
if not ids:
return JSONResponse(
content=({"success": False, "message": "Valid list of ids must be sent"}),
status_code=400,
)
try:
reviews = (
ReviewSegment.select().where(ReviewSegment.id << ids).dicts().iterator()
)
return JSONResponse(list(reviews))
except Exception:
return JSONResponse(
content=({"success": False, "message": "Review segments not found"}),
status_code=400,
)
@router.get("/review/summary", response_model=ReviewSummaryResponse)
def review_summary(params: ReviewSummaryQueryParams = Depends()):
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
@ -490,8 +512,6 @@ def set_not_reviewed(review_id: str):
review.save()
return JSONResponse(
content=(
{"success": True, "message": "Set Review " + review_id + " as not viewed"}
),
content=({"success": True, "message": f"Set Review {review_id} as not viewed"}),
status_code=200,
)

View File

@ -34,10 +34,12 @@ from frigate.const import (
CLIPS_DIR,
CONFIG_DIR,
EXPORT_DIR,
FACE_DIR,
MODEL_CACHE_DIR,
RECORD_DIR,
SHM_FRAMES_VAR,
)
from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings
from frigate.events.audio import AudioProcessor
@ -88,6 +90,9 @@ class FrigateApp:
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.camera_metrics: dict[str, CameraMetrics] = {}
self.embeddings_metrics: DataProcessorMetrics | None = (
DataProcessorMetrics() if config.semantic_search.enabled else None
)
self.ptz_metrics: dict[str, PTZMetrics] = {}
self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None
@ -96,14 +101,19 @@ class FrigateApp:
self.config = config
def ensure_dirs(self) -> None:
for d in [
dirs = [
CONFIG_DIR,
RECORD_DIR,
f"{CLIPS_DIR}/cache",
CACHE_DIR,
MODEL_CACHE_DIR,
EXPORT_DIR,
]:
]
if self.config.face_recognition.enabled:
dirs.append(FACE_DIR)
for d in dirs:
if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}")
os.makedirs(d)
@ -229,7 +239,10 @@ class FrigateApp:
embedding_process = util.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(self.config,),
args=(
self.config,
self.embeddings_metrics,
),
)
embedding_process.daemon = True
self.embedding_process = embedding_process
@ -491,7 +504,11 @@ class FrigateApp:
self.stats_emitter = StatsEmitter(
self.config,
stats_init(
self.config, self.camera_metrics, self.detectors, self.processes
self.config,
self.camera_metrics,
self.embeddings_metrics,
self.detectors,
self.processes,
),
self.stop_event,
)

View File

@ -0,0 +1,130 @@
"""Manage camera activity and updating listeners."""
from collections import Counter
from typing import Callable
from frigate.config.config import FrigateConfig
class CameraActivityManager:
def __init__(
self, config: FrigateConfig, publish: Callable[[str, any], None]
) -> None:
self.config = config
self.publish = publish
self.last_camera_activity: dict[str, dict[str, any]] = {}
self.camera_all_object_counts: dict[str, Counter] = {}
self.camera_active_object_counts: dict[str, Counter] = {}
self.zone_all_object_counts: dict[str, Counter] = {}
self.zone_active_object_counts: dict[str, Counter] = {}
self.all_zone_labels: dict[str, set[str]] = {}
for camera_config in config.cameras.values():
if not camera_config.enabled:
continue
self.last_camera_activity[camera_config.name] = {}
self.camera_all_object_counts[camera_config.name] = Counter()
self.camera_active_object_counts[camera_config.name] = Counter()
for zone, zone_config in camera_config.zones.items():
if zone not in self.all_zone_labels:
self.zone_all_object_counts[zone] = Counter()
self.zone_active_object_counts[zone] = Counter()
self.all_zone_labels[zone] = set()
self.all_zone_labels[zone].update(zone_config.objects)
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
all_objects: list[dict[str, any]] = []
for camera in new_activity.keys():
new_objects = new_activity[camera].get("objects", [])
all_objects.extend(new_objects)
if self.last_camera_activity.get(camera, {}).get("objects") != new_objects:
self.compare_camera_activity(camera, new_objects)
# run through every zone, getting a count of objects in that zone right now
for zone, labels in self.all_zone_labels.items():
all_zone_objects = Counter(
obj["label"].replace("-verified", "")
for obj in all_objects
if zone in obj["current_zones"]
)
active_zone_objects = Counter(
obj["label"].replace("-verified", "")
for obj in all_objects
if zone in obj["current_zones"] and not obj["stationary"]
)
any_changed = False
# run through each object and check what topics need to be updated for this zone
for label in labels:
new_count = all_zone_objects[label]
new_active_count = active_zone_objects[label]
if (
new_count != self.zone_all_object_counts[zone][label]
or label not in self.zone_all_object_counts[zone]
):
any_changed = True
self.publish(f"{zone}/{label}", new_count)
self.zone_all_object_counts[zone][label] = new_count
if (
new_active_count != self.zone_active_object_counts[zone][label]
or label not in self.zone_active_object_counts[zone]
):
any_changed = True
self.publish(f"{zone}/{label}/active", new_active_count)
self.zone_active_object_counts[zone][label] = new_active_count
if any_changed:
self.publish(f"{zone}/all", sum(list(all_zone_objects.values())))
self.publish(
f"{zone}/all/active", sum(list(active_zone_objects.values()))
)
self.last_camera_activity = new_activity
def compare_camera_activity(
self, camera: str, new_activity: dict[str, any]
) -> None:
all_objects = Counter(
obj["label"].replace("-verified", "") for obj in new_activity
)
active_objects = Counter(
obj["label"].replace("-verified", "")
for obj in new_activity
if not obj["stationary"]
)
any_changed = False
# run through each object and check what topics need to be updated
for label in self.config.cameras[camera].objects.track:
if label in self.config.model.non_logo_attributes:
continue
new_count = all_objects[label]
new_active_count = active_objects[label]
if (
new_count != self.camera_all_object_counts[camera][label]
or label not in self.camera_all_object_counts[camera]
):
any_changed = True
self.publish(f"{camera}/{label}", new_count)
self.camera_all_object_counts[camera][label] = new_count
if (
new_active_count != self.camera_active_object_counts[camera][label]
or label not in self.camera_active_object_counts[camera]
):
any_changed = True
self.publish(f"{camera}/{label}/active", new_active_count)
self.camera_active_object_counts[camera][label] = new_active_count
if any_changed:
self.publish(f"{camera}/all", sum(list(all_objects.values())))
self.publish(f"{camera}/all/active", sum(list(active_objects.values())))

View File

@ -7,6 +7,7 @@ from abc import ABC, abstractmethod
from typing import Any, Callable, Optional
from frigate.camera import PTZMetrics
from frigate.camera.activity_manager import CameraActivityManager
from frigate.comms.config_updater import ConfigPublisher
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import (
@ -64,7 +65,7 @@ class Dispatcher:
self.onvif = onvif
self.ptz_metrics = ptz_metrics
self.comms = communicators
self.camera_activity = {}
self.camera_activity = CameraActivityManager(config, self.publish)
self.model_state = {}
self.embeddings_reindex = {}
@ -130,7 +131,7 @@ class Dispatcher:
).execute()
def handle_update_camera_activity():
self.camera_activity = payload
self.camera_activity.update_activity(payload)
def handle_update_event_description():
event: Event = Event.get(Event.id == payload["id"])
@ -171,7 +172,7 @@ class Dispatcher:
)
def handle_on_connect():
camera_status = self.camera_activity.copy()
camera_status = self.camera_activity.last_camera_activity.copy()
for camera in camera_status.keys():
camera_status[camera]["config"] = {

View File

@ -9,9 +9,12 @@ SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
class EmbeddingsRequestEnum(Enum):
clear_face_classifier = "clear_face_classifier"
embed_description = "embed_description"
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
register_face = "register_face"
reprocess_face = "reprocess_face"
class EmbeddingsResponder:
@ -22,7 +25,7 @@ class EmbeddingsResponder:
def check_for_request(self, process: Callable) -> None:
while True: # load all messages that are queued
has_message, _, _ = zmq.select([self.socket], [], [], 0.1)
has_message, _, _ = zmq.select([self.socket], [], [], 0.01)
if not has_message:
break

View File

@ -151,7 +151,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
camera: str = payload["after"]["camera"]
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
message = f"Detected on {camera.replace('_', ' ').title()}"
image = f'{payload["after"]["thumb_path"].replace("/media/frigate", "")}'
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
# if event is ongoing open to live view otherwise open to recordings view
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"

View File

@ -3,13 +3,13 @@ from frigate.detectors import DetectorConfig, ModelConfig # noqa: F401
from .auth import * # noqa: F403
from .camera import * # noqa: F403
from .camera_group import * # noqa: F403
from .classification import * # noqa: F403
from .config import * # noqa: F403
from .database import * # noqa: F403
from .logger import * # noqa: F403
from .mqtt import * # noqa: F403
from .notification import * # noqa: F403
from .proxy import * # noqa: F403
from .semantic_search import * # noqa: F403
from .telemetry import * # noqa: F403
from .tls import * # noqa: F403
from .ui import * # noqa: F403

View File

@ -167,7 +167,7 @@ class CameraConfig(FrigateBaseModel):
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(
self.ffmpeg.output_args.record,
self.ffmpeg.output_args._force_record_hvc1,
self.ffmpeg.apple_compatibility,
)
or self.ffmpeg.output_args.record
)

View File

@ -2,7 +2,7 @@ import shutil
from enum import Enum
from typing import Union
from pydantic import Field, PrivateAttr, field_validator
from pydantic import Field, field_validator
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
@ -42,7 +42,6 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.",
)
_force_record_hvc1: bool = PrivateAttr(default=False)
class FfmpegConfig(FrigateBaseModel):
@ -64,6 +63,10 @@ class FfmpegConfig(FrigateBaseModel):
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
)
apple_compatibility: bool = Field(
default=False,
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
)
@property
def ffmpeg_path(self) -> str:

View File

@ -1,6 +1,6 @@
from typing import Any, Optional, Union
from pydantic import Field, field_serializer
from pydantic import Field, PrivateAttr, field_serializer
from ..base import FrigateBaseModel
@ -11,11 +11,13 @@ DEFAULT_TRACKED_OBJECTS = ["person"]
class FilterConfig(FrigateBaseModel):
min_area: int = Field(
default=0, title="Minimum area of bounding box for object to be counted."
min_area: Union[int, float] = Field(
default=0,
title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
)
max_area: int = Field(
default=24000000, title="Maximum area of bounding box for object to be counted."
max_area: Union[int, float] = Field(
default=24000000,
title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
)
min_ratio: float = Field(
default=0,
@ -53,3 +55,20 @@ class ObjectConfig(FrigateBaseModel):
default_factory=dict, title="Object filters."
)
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
_all_objects: list[str] = PrivateAttr()
@property
def all_objects(self) -> list[str]:
return self._all_objects
def parse_all_objects(self, cameras):
if "_all_objects" in self:
return
# get list of unique enabled labels for tracking
enabled_labels = set(self.track)
for camera in cameras.values():
enabled_labels.update(camera.objects.track)
self._all_objects = list(enabled_labels)

View File

@ -64,7 +64,9 @@ class PtzAutotrackConfig(FrigateBaseModel):
raise ValueError("Invalid type for movement_weights")
if len(weights) != 5:
raise ValueError("movement_weights must have exactly 5 floats")
raise ValueError(
"movement_weights must have exactly 5 floats, remove this line from your config and run autotracking calibration"
)
return weights

Some files were not shown because too many files have changed in this diff Show More