Merge branch 'dev' of https://github.com/blakeblackshear/frigate into motion_improvements

This commit is contained in:
p-boon 2025-02-21 10:13:58 +01:00
commit d628850c87
43 changed files with 1460 additions and 558 deletions

View File

@ -19,7 +19,7 @@ sudo chown -R "$(id -u):$(id -g)" /media/frigate
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the # When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
# s6 service file. For dev, where frigate is started from an interactive # s6 service file. For dev, where frigate is started from an interactive
# shell, we define it in .bashrc instead. # shell, we define it in .bashrc instead.
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(/usr/lib/ffmpeg/7.0/bin/ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc echo 'export LIBAVFORMAT_VERSION_MAJOR=$("$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)" -version | grep -Po "libavformat\W+\K\d+")' >> "$HOME/.bashrc"
make version make version

View File

@ -14,16 +14,18 @@ ARG BASE_HOOK=
FROM ${BASE_IMAGE} AS base FROM ${BASE_IMAGE} AS base
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK
RUN ${BASE_HOOK} RUN sh -c "$BASE_HOOK"
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
FROM ${SLIM_BASE} AS slim-base FROM ${SLIM_BASE} AS slim-base
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK
RUN ${BASE_HOOK} RUN sh -c "$BASE_HOOK"
FROM slim-base AS wget FROM slim-base AS wget
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
@ -229,8 +231,13 @@ ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PA
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh /deps/install_deps.sh
ENV DEFAULT_FFMPEG_VERSION="7.0"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl pip3 install -U /deps/wheels/*.whl
COPY --from=deps-rootfs / / COPY --from=deps-rootfs / /

View File

@ -6,13 +6,13 @@ apt-get -qq update
apt-get -qq install --no-install-recommends -y \ apt-get -qq install --no-install-recommends -y \
apt-transport-https \ apt-transport-https \
ca-certificates \
gnupg \ gnupg \
wget \ wget \
lbzip2 \ lbzip2 \
procps vainfo \ procps vainfo \
unzip locales tzdata libxml2 xz-utils \ unzip locales tzdata libxml2 xz-utils \
python3.11 \ python3.11 \
python3-pip \
curl \ curl \
lsof \ lsof \
jq \ jq \
@ -31,28 +31,28 @@ unset DEBIAN_FRONTEND
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
rm /tmp/libedgetpu1-max.deb rm /tmp/libedgetpu1-max.deb
# btbn-ffmpeg -> amd64 # ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then if [[ "${TARGETARCH}" == "amd64" ]]; then
mkdir -p /usr/lib/ffmpeg/5.0 mkdir -p /usr/lib/ffmpeg/5.0
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
rm -rf ffmpeg.tar.xz
mkdir -p /usr/lib/ffmpeg/7.0 mkdir -p /usr/lib/ffmpeg/7.0
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz" wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay rm -rf ffmpeg.tar.xz
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
fi fi
# ffmpeg -> arm64 # ffmpeg -> arm64
if [[ "${TARGETARCH}" == "arm64" ]]; then if [[ "${TARGETARCH}" == "arm64" ]]; then
mkdir -p /usr/lib/ffmpeg/5.0 mkdir -p /usr/lib/ffmpeg/5.0
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
rm -f ffmpeg.tar.xz
mkdir -p /usr/lib/ffmpeg/7.0 mkdir -p /usr/lib/ffmpeg/7.0
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay rm -f ffmpeg.tar.xz
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
fi fi
# arch specific packages # arch specific packages

View File

@ -70,5 +70,5 @@ verboselogs==1.7.*
virtualenv==20.17.* virtualenv==20.17.*
prometheus-client == 0.21.* prometheus-client == 0.21.*
# TFLite # TFLite
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64' tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64' tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'

View File

@ -43,8 +43,10 @@ function migrate_db_path() {
} }
function set_libva_version() { function set_libva_version() {
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) local ffmpeg_path
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+") ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
export LIBAVFORMAT_VERSION_MAJOR
} }
echo "[INFO] Preparing Frigate..." echo "[INFO] Preparing Frigate..."

View File

@ -44,10 +44,14 @@ function get_ip_and_port_from_supervisor() {
} }
function set_libva_version() { function set_libva_version() {
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) local ffmpeg_path
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+") ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
export LIBAVFORMAT_VERSION_MAJOR
} }
set_libva_version
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Removing stale config from last run..." echo "[INFO] Removing stale config from last run..."
rm /dev/shm/go2rtc.yaml rm /dev/shm/go2rtc.yaml
@ -66,8 +70,6 @@ else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually." echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi fi
set_libva_version
readonly config_path="/config" readonly config_path="/config"
if [[ -x "${config_path}/go2rtc" ]]; then if [[ -x "${config_path}/go2rtc" ]]; then

View File

@ -1,6 +1,5 @@
import json import json
import os import os
import shutil
import sys import sys
from ruamel.yaml import YAML from ruamel.yaml import YAML
@ -35,10 +34,7 @@ except FileNotFoundError:
path = config.get("ffmpeg", {}).get("path", "default") path = config.get("ffmpeg", {}).get("path", "default")
if path == "default": if path == "default":
if shutil.which("ffmpeg") is None: print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
else:
print("ffmpeg")
elif path in INCLUDED_FFMPEG_VERSIONS: elif path in INCLUDED_FFMPEG_VERSIONS:
print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg") print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg")
else: else:

View File

@ -2,7 +2,6 @@
import json import json
import os import os
import shutil
import sys import sys
from pathlib import Path from pathlib import Path
@ -13,6 +12,7 @@ from frigate.const import (
BIRDSEYE_PIPE, BIRDSEYE_PIPE,
DEFAULT_FFMPEG_VERSION, DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS, INCLUDED_FFMPEG_VERSIONS,
LIBAVFORMAT_VERSION_MAJOR,
) )
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
@ -115,10 +115,7 @@ else:
# ensure ffmpeg path is set correctly # ensure ffmpeg path is set correctly
path = config.get("ffmpeg", {}).get("path", "default") path = config.get("ffmpeg", {}).get("path", "default")
if path == "default": if path == "default":
if shutil.which("ffmpeg") is None: ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
elif path in INCLUDED_FFMPEG_VERSIONS: elif path in INCLUDED_FFMPEG_VERSIONS:
ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg" ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg"
else: else:
@ -130,14 +127,12 @@ elif go2rtc_config["ffmpeg"].get("bin") is None:
go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path
# need to replace ffmpeg command when using ffmpeg4 # need to replace ffmpeg command when using ffmpeg4
if int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") < 59: if LIBAVFORMAT_VERSION_MAJOR < 59:
if go2rtc_config["ffmpeg"].get("rtsp") is None: rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
go2rtc_config["ffmpeg"]["rtsp"] = (
"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
)
else:
if go2rtc_config.get("ffmpeg") is None: if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = {"path": ""} go2rtc_config["ffmpeg"] = {"rtsp": rtsp_args}
elif go2rtc_config["ffmpeg"].get("rtsp") is None:
go2rtc_config["ffmpeg"]["rtsp"] = rtsp_args
for name in go2rtc_config.get("streams", {}): for name in go2rtc_config.get("streams", {}):
stream = go2rtc_config["streams"][name] stream = go2rtc_config["streams"][name]

View File

@ -3,6 +3,9 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable # https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM wheels as rk-wheels FROM wheels as rk-wheels
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
@ -13,6 +16,7 @@ RUN rm -rf /rk-wheels/opencv_python-*
FROM deps AS rk-frigate FROM deps AS rk-frigate
ARG TARGETARCH ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install --no-deps -U /deps/rk-wheels/*.whl pip3 install --no-deps -U /deps/rk-wheels/*.whl
@ -24,8 +28,7 @@ COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/ ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}" ENV DEFAULT_FFMPEG_VERSION="6.0"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}"

View File

@ -6,11 +6,12 @@ ARG DEBIAN_FRONTEND=noninteractive
FROM deps AS rpi-deps FROM deps AS rpi-deps
ARG TARGETARCH ARG TARGETARCH
RUN rm -rf /usr/lib/btbn-ffmpeg/
# Install dependencies # Install dependencies
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \ RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh /deps/install_deps.sh
ENV DEFAULT_FFMPEG_VERSION="rpi"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}"
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
COPY --from=rootfs / / COPY --from=rootfs / /

View File

@ -28,4 +28,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list
apt-get -qq update apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
mkdir -p /usr/lib/ffmpeg/rpi/bin
ln -svf /usr/bin/ffmpeg /usr/lib/ffmpeg/rpi/bin/ffmpeg
ln -svf /usr/bin/ffprobe /usr/lib/ffmpeg/rpi/bin/ffprobe
fi fi

View File

@ -76,8 +76,9 @@ RUN apt-get update \
&& apt-get install -y python-is-python3 libprotobuf23 \ && apt-get install -y python-is-python3 libprotobuf23 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN rm -rf /usr/lib/btbn-ffmpeg/
COPY --from=jetson-ffmpeg /rootfs / COPY --from=jetson-ffmpeg /rootfs /
ENV DEFAULT_FFMPEG_VERSION="jetson"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}"
# ffmpeg runtime dependencies # ffmpeg runtime dependencies
RUN apt-get -qq update \ RUN apt-get -qq update \

View File

@ -5,7 +5,7 @@
set -euxo pipefail set -euxo pipefail
INSTALL_PREFIX=/rootfs/usr/local INSTALL_PREFIX=/rootfs/usr/lib/ffmpeg/jetson
apt-get -qq update apt-get -qq update
apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config

View File

@ -20,7 +20,7 @@ FIRST_MODEL=true
MODEL_DOWNLOAD="" MODEL_DOWNLOAD=""
MODEL_CONVERT="" MODEL_CONVERT=""
if [ -z "$YOLO_MODELS"]; then if [ -z "$YOLO_MODELS" ]; then
echo "tensorrt model preparation disabled" echo "tensorrt model preparation disabled"
exit 0 exit 0
fi fi
@ -64,7 +64,7 @@ fi
# order to run libyolo here. # order to run libyolo here.
# On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image. # On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image.
if [[ "$(arch)" == "aarch64" ]]; then if [[ "$(arch)" == "aarch64" ]]; then
if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra && ! -e /usr/lib/aarch64-linux-gnu/tegra-egl ]]; then
echo "ERROR: Container must be launched with nvidia runtime" echo "ERROR: Container must be launched with nvidia runtime"
exit 1 exit 1
elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 || elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 ||

View File

@ -1,12 +1,14 @@
# NVidia TensorRT Support (amd64 only) # NVidia TensorRT Support (amd64 only)
--extra-index-url 'https://pypi.nvidia.com' --extra-index-url 'https://pypi.nvidia.com'
numpy < 1.24; platform_machine == 'x86_64' numpy < 1.24; platform_machine == 'x86_64'
tensorrt == 8.6.1.*; platform_machine == 'x86_64' tensorrt == 8.6.1; platform_machine == 'x86_64'
tensorrt_bindings == 8.6.1; platform_machine == 'x86_64'
cuda-python == 11.8.*; platform_machine == 'x86_64' cuda-python == 11.8.*; platform_machine == 'x86_64'
cython == 3.0.*; platform_machine == 'x86_64' cython == 3.0.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64' nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64'

View File

@ -14,12 +14,17 @@ variable "COMPUTE_LEVEL" {
default = "" default = ""
} }
variable "BASE_HOOK" { variable "BASE_HOOK" {
# Ensure an up-to-date python 3.11 is available in tensorrt/jetson image # Ensure an up-to-date python 3.11 is available in jetson images
default = <<EOT default = <<EOT
if grep -iq \"ubuntu\" /etc/os-release; then if grep -iq \"ubuntu\" /etc/os-release; then
apt-get update && . /etc/os-release
apt-get install -y software-properties-common &&
add-apt-repository ppa:deadsnakes/ppa; # Add the deadsnakes PPA repository
echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
# Add deadsnakes signing key
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
fi fi
EOT EOT
} }

View File

@ -37,7 +37,7 @@ See [the go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#modul
```yaml ```yaml
go2rtc: go2rtc:
streams: streams:
... # ...
log: log:
exec: trace exec: trace
``` ```
@ -176,15 +176,13 @@ listen [::]:5000 ipv6only=off;
### Custom ffmpeg build ### Custom ffmpeg build
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built ffmpeg binary can be downloaded to /config and used. Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built `ffmpeg` and `ffprobe` binaries can be placed in `/config/custom-ffmpeg/bin` for Frigate to use.
To do this: To do this:
1. Download your ffmpeg build and uncompress to the Frigate config folder. 1. Download your ffmpeg build and uncompress it to the `/config/custom-ffmpeg` folder. Verify that both the `ffmpeg` and `ffprobe` binaries are located in `/config/custom-ffmpeg/bin`.
2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings. 2. Update the `ffmpeg.path` in your Frigate config to `/config/custom-ffmpeg`.
3. Restart Frigate and the custom version will be used if the mapping was done correctly. 3. Restart Frigate and the custom version will be used if the steps above were done correctly.
NOTE: The folder that is set for the config needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then the `ffmpeg -> path` field should be `/config/custom-ffmpeg/bin`.
### Custom go2rtc version ### Custom go2rtc version
@ -192,7 +190,7 @@ Frigate currently includes go2rtc v1.9.2, there may be certain cases where you w
To do this: To do this:
1. Download the go2rtc build to the /config folder. 1. Download the go2rtc build to the `/config` folder.
2. Rename the build to `go2rtc`. 2. Rename the build to `go2rtc`.
3. Give `go2rtc` execute permission. 3. Give `go2rtc` execute permission.
4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs. 4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs.

View File

@ -3,13 +3,28 @@ id: license_plate_recognition
title: License Plate Recognition (LPR) title: License Plate Recognition (LPR)
--- ---
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera. Frigate can recognize license plates on vehicles and automatically add the detected characters or recognized name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles.
When a plate is recognized, the detected characters or recognized name is:
- Added as a `sub_label` to the `car` tracked object.
- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore.
- Filterable through the More Filters menu in Explore.
- Published via the `frigate/events` MQTT topic as a `sub_label` for the tracked object.
## Model Requirements
Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model. Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
Users without a model that detects license plates can still run LPR. A small, CPU inference, YOLOv9 license plate detection model will be used instead. You should _not_ define `license_plate` in your list of objects to track. Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU. In this case, you should _not_ define `license_plate` in your list of objects to track.
LPR is most effective when the vehicles license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining recognition and keeping the most confident result. LPR will not run on stationary vehicles. :::note
Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably.
:::
## Minimum System Requirements ## Minimum System Requirements
@ -24,6 +39,8 @@ lpr:
enabled: True enabled: True
``` ```
Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run.
## Advanced Configuration ## Advanced Configuration
Fine-tune the LPR feature using these optional parameters: Fine-tune the LPR feature using these optional parameters:
@ -41,11 +58,12 @@ Fine-tune the LPR feature using these optional parameters:
- **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a sub label. - **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a sub label.
- Default: `0.9`. - Default: `0.9`.
- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub-label to an object. - **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub label to an object.
- Use this to filter out short, incomplete, or incorrect detections. - Use this to filter out short, incomplete, or incorrect detections.
- **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded. - **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded.
- `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7" - `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7"
- `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC" - `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC"
- Websites like https://regex101.com/ can help test regular expressions for your plates.
### Matching ### Matching
@ -53,9 +71,9 @@ Fine-tune the LPR feature using these optional parameters:
- These labels appear in the UI, filters, and notifications. - These labels appear in the UI, filters, and notifications.
- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate. - **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
- For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. - For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
- This parameter will not operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`. - This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
### Examples ## Configuration Examples
```yaml ```yaml
lpr: lpr:
@ -69,7 +87,9 @@ lpr:
Johnny: Johnny:
- "J*N-*234" # Matches JHN-1234 and JMN-I234, but also note that "*" matches any number of characters - "J*N-*234" # Matches JHN-1234 and JMN-I234, but also note that "*" matches any number of characters
Sally: Sally:
- "[S5]LL-1234" # Matches both SLL-1234 and 5LL-1234 - "[S5]LL 1234" # Matches both SLL 1234 and 5LL 1234
Work Trucks:
- "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z
``` ```
```yaml ```yaml
@ -77,12 +97,54 @@ lpr:
enabled: True enabled: True
min_area: 4000 # Run recognition on larger plates only min_area: 4000 # Run recognition on larger plates only
recognition_threshold: 0.85 recognition_threshold: 0.85
format: "^[A-Z]{3}-[0-9]{4}$" # Only recognize plates that are three letters, followed by a dash, followed by 4 numbers format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers
match_distance: 1 # Allow one character variation in plate matching match_distance: 1 # Allow one character variation in plate matching
known_plates: known_plates:
Delivery Van: Delivery Van:
- "RJK-5678" - "RJ K5678"
- "UPS-1234" - "UP A1234"
Employee Parking: Supervisor:
- "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z - "MN D3163"
``` ```
## FAQ
### Why isn't my license plate being detected and recognized?
Ensure that:
- Your camera has a clear, well-lit view of the plate.
- The plate is large enough in the image (try adjusting `min_area`).
- A `car` is detected first, as LPR only runs on recognized vehicles.
If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track.
If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track.
### Can I run LPR without detecting `car` objects?
No, Frigate requires a `car` to be detected first before recognizing a license plate.
### How can I improve detection accuracy?
- Use high-quality cameras with good resolution.
- Adjust `detection_threshold` and `recognition_threshold` values.
- Define a `format` regex to filter out invalid detections.
### Does LPR work at night?
Yes, but performance depends on camera quality, lighting, and infrared capabilities. Make sure your camera can capture clear images of plates at night.
### How can I match known plates with minor variations?
Use `match_distance` to allow small character mismatches. Alternatively, define multiple variations in `known_plates`.
### How do I debug LPR issues?
- View MQTT messages for `frigate/events` to verify detected plates.
- Adjust `detection_threshold` and `recognition_threshold` settings.
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`.
- Enable debug logs for LPR by adding `frigate.data_processing.real_time.license_plate_processor: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary.
### Will LPR slow down my system?
LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results.

View File

@ -140,12 +140,12 @@ cameras:
zones: zones:
street: street:
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428 coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428
distances: 10,12,11,13.5 distances: 10,12,11,13.5 # in meters or feet
``` ```
Each number in the `distance` field represents the real-world distance between the points in the `coordinates` list. So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI. Each number in the `distance` field represents the real-world distance between the points in the `coordinates` list. So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI.
The `distance` values are measured in meters or feet, depending on how `unit_system` is configured in your `ui` config: The `distance` values are measured in meters (metric) or feet (imperial), depending on how `unit_system` is configured in your `ui` config:
```yaml ```yaml
ui: ui:
@ -153,7 +153,9 @@ ui:
unit_system: metric unit_system: metric
``` ```
The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents). These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph), depending on how `unit_system` is configured in your `ui` config. The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents).
These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph). For miles per hour, set `unit_system` to `imperial`. For kilometers per hour, set `unit_system` to `metric`.
#### Best practices and caveats #### Best practices and caveats

View File

@ -12,7 +12,7 @@ class EventResponse(BaseModel):
end_time: Optional[float] end_time: Optional[float]
false_positive: Optional[bool] false_positive: Optional[bool]
zones: list[str] zones: list[str]
thumbnail: str thumbnail: Optional[str]
has_clip: bool has_clip: bool
has_snapshot: bool has_snapshot: bool
retain_indefinitely: bool retain_indefinitely: bool

View File

@ -33,7 +33,11 @@ class CameraActivityManager:
self.zone_active_object_counts[zone] = Counter() self.zone_active_object_counts[zone] = Counter()
self.all_zone_labels[zone] = set() self.all_zone_labels[zone] = set()
self.all_zone_labels[zone].update(zone_config.objects) self.all_zone_labels[zone].update(
zone_config.objects
if zone_config.objects
else camera_config.objects.track
)
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None: def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
all_objects: list[dict[str, any]] = [] all_objects: list[dict[str, any]] = []

View File

@ -32,7 +32,9 @@ class ConfigPublisher:
class ConfigSubscriber: class ConfigSubscriber:
"""Simplifies receiving an updated config.""" """Simplifies receiving an updated config."""
def __init__(self, topic: str) -> None: def __init__(self, topic: str, exact=False) -> None:
self.topic = topic
self.exact = exact
self.context = zmq.Context() self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB) self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic) self.socket.setsockopt_string(zmq.SUBSCRIBE, topic)
@ -42,7 +44,12 @@ class ConfigSubscriber:
"""Returns updated config or None if no update.""" """Returns updated config or None if no update."""
try: try:
topic = self.socket.recv_string(flags=zmq.NOBLOCK) topic = self.socket.recv_string(flags=zmq.NOBLOCK)
return (topic, self.socket.recv_pyobj()) obj = self.socket.recv_pyobj()
if not self.exact or self.topic == topic:
return (topic, obj)
else:
return (None, None)
except zmq.ZMQError: except zmq.ZMQError:
return (None, None) return (None, None)

View File

@ -1,4 +1,3 @@
import shutil
from enum import Enum from enum import Enum
from typing import Union from typing import Union
@ -71,10 +70,7 @@ class FfmpegConfig(FrigateBaseModel):
@property @property
def ffmpeg_path(self) -> str: def ffmpeg_path(self) -> str:
if self.path == "default": if self.path == "default":
if shutil.which("ffmpeg") is None: return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
else:
return "ffmpeg"
elif self.path in INCLUDED_FFMPEG_VERSIONS: elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg" return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
else: else:
@ -83,10 +79,7 @@ class FfmpegConfig(FrigateBaseModel):
@property @property
def ffprobe_path(self) -> str: def ffprobe_path(self) -> str:
if self.path == "default": if self.path == "default":
if shutil.which("ffprobe") is None: return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
else:
return "ffprobe"
elif self.path in INCLUDED_FFMPEG_VERSIONS: elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe" return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
else: else:

View File

@ -1,3 +1,4 @@
import os
import re import re
CONFIG_DIR = "/config" CONFIG_DIR = "/config"
@ -61,8 +62,9 @@ MAX_WAL_SIZE = 10 # MB
# Ffmpeg constants # Ffmpeg constants
DEFAULT_FFMPEG_VERSION = "7.0" DEFAULT_FFMPEG_VERSION = os.environ.get("DEFAULT_FFMPEG_VERSION", "")
INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"] INCLUDED_FFMPEG_VERSIONS = os.environ.get("INCLUDED_FFMPEG_VERSIONS", "").split(":")
LIBAVFORMAT_VERSION_MAJOR = int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59"))
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia" FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi" FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan" FFMPEG_HWACCEL_VULKAN = "preset-vulkan"

View File

@ -16,7 +16,12 @@ from shapely.geometry import Polygon
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import FRIGATE_LOCALHOST from frigate.const import FRIGATE_LOCALHOST
from frigate.embeddings.functions.onnx import GenericONNXEmbedding, ModelTypeEnum from frigate.embeddings.onnx.lpr_embedding import (
LicensePlateDetector,
PaddleOCRClassification,
PaddleOCRDetection,
PaddleOCRRecognition,
)
from frigate.util.image import area from frigate.util.image import area
from ..types import DataProcessorMetrics from ..types import DataProcessorMetrics
@ -52,49 +57,26 @@ class LicensePlateProcessor(RealTimeProcessorApi):
self.lpr_recognition_model = None self.lpr_recognition_model = None
if self.config.lpr.enabled: if self.config.lpr.enabled:
self.detection_model = GenericONNXEmbedding( self.detection_model = PaddleOCRDetection(
model_name="paddleocr-onnx",
model_file="detection.onnx",
download_urls={
"detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx"
},
model_size="large", model_size="large",
model_type=ModelTypeEnum.lpr_detect,
requestor=self.requestor, requestor=self.requestor,
device="CPU", device="CPU",
) )
self.classification_model = GenericONNXEmbedding( self.classification_model = PaddleOCRClassification(
model_name="paddleocr-onnx",
model_file="classification.onnx",
download_urls={
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
},
model_size="large", model_size="large",
model_type=ModelTypeEnum.lpr_classify,
requestor=self.requestor, requestor=self.requestor,
device="CPU", device="CPU",
) )
self.recognition_model = GenericONNXEmbedding( self.recognition_model = PaddleOCRRecognition(
model_name="paddleocr-onnx",
model_file="recognition.onnx",
download_urls={
"recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx"
},
model_size="large", model_size="large",
model_type=ModelTypeEnum.lpr_recognize,
requestor=self.requestor, requestor=self.requestor,
device="CPU", device="CPU",
) )
self.yolov9_detection_model = GenericONNXEmbedding(
model_name="yolov9_license_plate", self.yolov9_detection_model = LicensePlateDetector(
model_file="yolov9-256-license-plates.onnx",
download_urls={
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
},
model_size="large", model_size="large",
model_type=ModelTypeEnum.yolov9_lpr_detect,
requestor=self.requestor, requestor=self.requestor,
device="CPU", device="CPU",
) )

View File

@ -22,7 +22,7 @@ from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from frigate.util.path import get_event_thumbnail_bytes from frigate.util.path import get_event_thumbnail_bytes
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -97,36 +97,14 @@ class Embeddings:
}, },
) )
self.text_embedding = GenericONNXEmbedding( self.text_embedding = JinaV1TextEmbedding(
model_name="jinaai/jina-clip-v1",
model_file="text_model_fp16.onnx",
tokenizer_file="tokenizer",
download_urls={
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
model_size=config.semantic_search.model_size, model_size=config.semantic_search.model_size,
model_type=ModelTypeEnum.text,
requestor=self.requestor, requestor=self.requestor,
device="CPU", device="CPU",
) )
model_file = ( self.vision_embedding = JinaV1ImageEmbedding(
"vision_model_fp16.onnx"
if self.config.semantic_search.model_size == "large"
else "vision_model_quantized.onnx"
)
download_urls = {
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
}
self.vision_embedding = GenericONNXEmbedding(
model_name="jinaai/jina-clip-v1",
model_file=model_file,
download_urls=download_urls,
model_size=config.semantic_search.model_size, model_size=config.semantic_search.model_size,
model_type=ModelTypeEnum.vision,
requestor=self.requestor, requestor=self.requestor,
device="GPU" if config.semantic_search.model_size == "large" else "CPU", device="GPU" if config.semantic_search.model_size == "large" else "CPU",
) )

View File

@ -1,325 +0,0 @@
import logging
import os
import warnings
from enum import Enum
from io import BytesIO
from typing import Dict, List, Optional, Union
import cv2
import numpy as np
import requests
from PIL import Image
# importing this without pytorch or others causes a warning
# https://github.com/huggingface/transformers/issues/27214
# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1
from transformers import AutoFeatureExtractor, AutoTokenizer
from transformers.utils.logging import disable_progress_bar
from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
from frigate.util.model import ONNXModelRunner
warnings.filterwarnings(
"ignore",
category=FutureWarning,
message="The class CLIPFeatureExtractor is deprecated",
)
# disables the progress bar for downloading tokenizers and feature extractors
disable_progress_bar()
logger = logging.getLogger(__name__)
FACE_EMBEDDING_SIZE = 160
LPR_EMBEDDING_SIZE = 256
class ModelTypeEnum(str, Enum):
face = "face"
vision = "vision"
text = "text"
lpr_detect = "lpr_detect"
lpr_classify = "lpr_classify"
lpr_recognize = "lpr_recognize"
yolov9_lpr_detect = "yolov9_lpr_detect"
class GenericONNXEmbedding:
"""Generic embedding function for ONNX models (text and vision)."""
def __init__(
self,
model_name: str,
model_file: str,
download_urls: Dict[str, str],
model_size: str,
model_type: ModelTypeEnum,
requestor: InterProcessRequestor,
tokenizer_file: Optional[str] = None,
device: str = "AUTO",
):
self.model_name = model_name
self.model_file = model_file
self.tokenizer_file = tokenizer_file
self.requestor = requestor
self.download_urls = download_urls
self.model_type = model_type
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None
self.feature_extractor = None
self.runner = None
files_names = list(self.download_urls.keys()) + (
[self.tokenizer_file] if self.tokenizer_file else []
)
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _download_model(self, path: str):
try:
file_name = os.path.basename(path)
if file_name in self.download_urls:
ModelDownloader.download_from_url(self.download_urls[file_name], path)
elif (
file_name == self.tokenizer_file
and self.model_type == ModelTypeEnum.text
):
if not os.path.exists(path + "/" + self.model_name):
logger.info(f"Downloading {self.model_name} tokenizer")
tokenizer = AutoTokenizer.from_pretrained(
self.model_name,
trust_remote_code=True,
cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer",
clean_up_tokenization_spaces=True,
)
tokenizer.save_pretrained(path)
self.downloader.requestor.send_data(
UPDATE_MODEL_STATE,
{
"model": f"{self.model_name}-{file_name}",
"state": ModelStatusTypesEnum.downloaded,
},
)
except Exception:
self.downloader.requestor.send_data(
UPDATE_MODEL_STATE,
{
"model": f"{self.model_name}-{file_name}",
"state": ModelStatusTypesEnum.error,
},
)
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
if self.model_type == ModelTypeEnum.text:
self.tokenizer = self._load_tokenizer()
elif self.model_type == ModelTypeEnum.vision:
self.feature_extractor = self._load_feature_extractor()
elif self.model_type == ModelTypeEnum.face:
self.feature_extractor = []
elif self.model_type == ModelTypeEnum.lpr_detect:
self.feature_extractor = []
elif self.model_type == ModelTypeEnum.lpr_classify:
self.feature_extractor = []
elif self.model_type == ModelTypeEnum.lpr_recognize:
self.feature_extractor = []
elif self.model_type == ModelTypeEnum.yolov9_lpr_detect:
self.feature_extractor = []
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _load_tokenizer(self):
tokenizer_path = os.path.join(f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer")
return AutoTokenizer.from_pretrained(
self.model_name,
cache_dir=tokenizer_path,
trust_remote_code=True,
clean_up_tokenization_spaces=True,
)
def _load_feature_extractor(self):
return AutoFeatureExtractor.from_pretrained(
f"{MODEL_CACHE_DIR}/{self.model_name}",
)
def _preprocess_inputs(self, raw_inputs: any) -> any:
if self.model_type == ModelTypeEnum.text:
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
return [
self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=max_length,
return_tensors="np",
)
for text in raw_inputs
]
elif self.model_type == ModelTypeEnum.vision:
processed_images = [self._process_image(img) for img in raw_inputs]
return [
self.feature_extractor(images=image, return_tensors="np")
for image in processed_images
]
elif self.model_type == ModelTypeEnum.face:
if isinstance(raw_inputs, list):
raise ValueError("Face embedding does not support batch inputs.")
pil = self._process_image(raw_inputs)
# handle images larger than input size
width, height = pil.size
if width != FACE_EMBEDDING_SIZE or height != FACE_EMBEDDING_SIZE:
if width > height:
new_height = int(((height / width) * FACE_EMBEDDING_SIZE) // 4 * 4)
pil = pil.resize((FACE_EMBEDDING_SIZE, new_height))
else:
new_width = int(((width / height) * FACE_EMBEDDING_SIZE) // 4 * 4)
pil = pil.resize((new_width, FACE_EMBEDDING_SIZE))
og = np.array(pil).astype(np.float32)
# Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE
og_h, og_w, channels = og.shape
frame = np.full(
(FACE_EMBEDDING_SIZE, FACE_EMBEDDING_SIZE, channels),
(0, 0, 0),
dtype=np.float32,
)
# compute center offset
x_center = (FACE_EMBEDDING_SIZE - og_w) // 2
y_center = (FACE_EMBEDDING_SIZE - og_h) // 2
# copy img image into center of result image
frame[y_center : y_center + og_h, x_center : x_center + og_w] = og
frame = np.expand_dims(frame, axis=0)
return [{"input_2": frame}]
elif self.model_type == ModelTypeEnum.lpr_detect:
preprocessed = []
for x in raw_inputs:
preprocessed.append(x)
return [{"x": preprocessed[0]}]
elif self.model_type == ModelTypeEnum.lpr_classify:
processed = []
for img in raw_inputs:
processed.append({"x": img})
return processed
elif self.model_type == ModelTypeEnum.lpr_recognize:
processed = []
for img in raw_inputs:
processed.append({"x": img})
return processed
elif self.model_type == ModelTypeEnum.yolov9_lpr_detect:
if isinstance(raw_inputs, list):
raise ValueError(
"License plate embedding does not support batch inputs."
)
# Get image as numpy array
img = self._process_image(raw_inputs)
height, width, channels = img.shape
# Resize maintaining aspect ratio
if width > height:
new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4)
img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height))
else:
new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4)
img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE))
# Get new dimensions after resize
og_h, og_w, channels = img.shape
# Create black square frame
frame = np.full(
(LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels),
(0, 0, 0),
dtype=np.float32,
)
# Center the resized image in the square frame
x_center = (LPR_EMBEDDING_SIZE - og_w) // 2
y_center = (LPR_EMBEDDING_SIZE - og_h) // 2
frame[y_center : y_center + og_h, x_center : x_center + og_w] = img
# Normalize to 0-1
frame = frame / 255.0
# Convert from HWC to CHW format and add batch dimension
frame = np.transpose(frame, (2, 0, 1))
frame = np.expand_dims(frame, axis=0)
return [{"images": frame}]
else:
raise ValueError(f"Unable to preprocess inputs for {self.model_type}")
def _process_image(self, image, output: str = "RGB") -> Image.Image:
if isinstance(image, str):
if image.startswith("http"):
response = requests.get(image)
image = Image.open(BytesIO(response.content)).convert(output)
elif isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert(output)
return image
def __call__(
self, inputs: Union[List[str], List[Image.Image], List[str]]
) -> List[np.ndarray]:
self._load_model_and_utils()
if self.runner is None or (
self.tokenizer is None and self.feature_extractor is None
):
logger.error(
f"{self.model_name} model or tokenizer/feature extractor is not loaded."
)
return []
processed_inputs = self._preprocess_inputs(inputs)
input_names = self.runner.get_input_names()
onnx_inputs = {name: [] for name in input_names}
input: dict[str, any]
for input in processed_inputs:
for key, value in input.items():
if key in input_names:
onnx_inputs[key].append(value[0])
for key in input_names:
if onnx_inputs.get(key):
onnx_inputs[key] = np.stack(onnx_inputs[key])
else:
logger.warning(f"Expected input '{key}' not found in onnx_inputs")
embeddings = self.runner.run(onnx_inputs)[0]
return [embedding for embedding in embeddings]

View File

@ -0,0 +1,95 @@
"""Base class for onnx embedding implementations."""
import logging
import os
from abc import ABC, abstractmethod
from enum import Enum
from io import BytesIO
import numpy as np
import requests
from PIL import Image
from frigate.const import UPDATE_MODEL_STATE
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
logger = logging.getLogger(__name__)
class EmbeddingTypeEnum(str, Enum):
thumbnail = "thumbnail"
description = "description"
class BaseEmbedding(ABC):
"""Base embedding class."""
def __init__(self, model_name: str, model_file: str, download_urls: dict[str, str]):
self.model_name = model_name
self.model_file = model_file
self.download_urls = download_urls
self.downloader: ModelDownloader = None
def _download_model(self, path: str):
try:
file_name = os.path.basename(path)
if file_name in self.download_urls:
ModelDownloader.download_from_url(self.download_urls[file_name], path)
self.downloader.requestor.send_data(
UPDATE_MODEL_STATE,
{
"model": f"{self.model_name}-{file_name}",
"state": ModelStatusTypesEnum.downloaded,
},
)
except Exception:
self.downloader.requestor.send_data(
UPDATE_MODEL_STATE,
{
"model": f"{self.model_name}-{file_name}",
"state": ModelStatusTypesEnum.error,
},
)
@abstractmethod
def _load_model_and_utils(self):
pass
@abstractmethod
def _preprocess_inputs(self, raw_inputs: any) -> any:
pass
def _process_image(self, image, output: str = "RGB") -> Image.Image:
if isinstance(image, str):
if image.startswith("http"):
response = requests.get(image)
image = Image.open(BytesIO(response.content)).convert(output)
elif isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert(output)
return image
def __call__(
self, inputs: list[str] | list[Image.Image] | list[str]
) -> list[np.ndarray]:
self._load_model_and_utils()
processed = self._preprocess_inputs(inputs)
input_names = self.runner.get_input_names()
onnx_inputs = {name: [] for name in input_names}
input: dict[str, any]
for input in processed:
for key, value in input.items():
if key in input_names:
onnx_inputs[key].append(value[0])
for key in input_names:
if onnx_inputs.get(key):
onnx_inputs[key] = np.stack(onnx_inputs[key])
else:
logger.warning(f"Expected input '{key}' not found in onnx_inputs")
embeddings = self.runner.run(onnx_inputs)[0]
return [embedding for embedding in embeddings]

View File

@ -0,0 +1,216 @@
"""JinaV1 Embeddings."""
import logging
import os
import warnings
# importing this without pytorch or others causes a warning
# https://github.com/huggingface/transformers/issues/27214
# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1
from transformers import AutoFeatureExtractor, AutoTokenizer
from transformers.utils.logging import disable_progress_bar
from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
from .base_embedding import BaseEmbedding
from .runner import ONNXModelRunner
warnings.filterwarnings(
"ignore",
category=FutureWarning,
message="The class CLIPFeatureExtractor is deprecated",
)
# disables the progress bar for downloading tokenizers and feature extractors
disable_progress_bar()
logger = logging.getLogger(__name__)
class JinaV1TextEmbedding(BaseEmbedding):
def __init__(
self,
model_size: str,
requestor: InterProcessRequestor,
device: str = "AUTO",
):
super().__init__(
model_name="jinaai/jina-clip-v1",
model_file="text_model_fp16.onnx",
download_urls={
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
)
self.tokenizer_file = "tokenizer"
self.requestor = requestor
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None
self.feature_extractor = None
self.runner = None
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _download_model(self, path: str):
try:
file_name = os.path.basename(path)
if file_name in self.download_urls:
ModelDownloader.download_from_url(self.download_urls[file_name], path)
elif file_name == self.tokenizer_file:
if not os.path.exists(path + "/" + self.model_name):
logger.info(f"Downloading {self.model_name} tokenizer")
tokenizer = AutoTokenizer.from_pretrained(
self.model_name,
trust_remote_code=True,
cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer",
clean_up_tokenization_spaces=True,
)
tokenizer.save_pretrained(path)
self.downloader.requestor.send_data(
UPDATE_MODEL_STATE,
{
"model": f"{self.model_name}-{file_name}",
"state": ModelStatusTypesEnum.downloaded,
},
)
except Exception:
self.downloader.requestor.send_data(
UPDATE_MODEL_STATE,
{
"model": f"{self.model_name}-{file_name}",
"state": ModelStatusTypesEnum.error,
},
)
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
tokenizer_path = os.path.join(
f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer"
)
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_name,
cache_dir=tokenizer_path,
trust_remote_code=True,
clean_up_tokenization_spaces=True,
)
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
return [
self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=max_length,
return_tensors="np",
)
for text in raw_inputs
]
class JinaV1ImageEmbedding(BaseEmbedding):
def __init__(
self,
model_size: str,
requestor: InterProcessRequestor,
device: str = "AUTO",
):
model_file = (
"vision_model_fp16.onnx"
if model_size == "large"
else "vision_model_quantized.onnx"
)
super().__init__(
model_name="jinaai/jina-clip-v1",
model_file=model_file,
download_urls={
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
},
)
self.requestor = requestor
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.feature_extractor = None
self.runner: ONNXModelRunner | None = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.feature_extractor = AutoFeatureExtractor.from_pretrained(
f"{MODEL_CACHE_DIR}/{self.model_name}",
)
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
processed_images = [self._process_image(img) for img in raw_inputs]
return [
self.feature_extractor(images=image, return_tensors="np")
for image in processed_images
]

View File

@ -0,0 +1,297 @@
import logging
import os
import warnings
import cv2
import numpy as np
from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
from .base_embedding import BaseEmbedding
from .runner import ONNXModelRunner
warnings.filterwarnings(
"ignore",
category=FutureWarning,
message="The class CLIPFeatureExtractor is deprecated",
)
logger = logging.getLogger(__name__)
LPR_EMBEDDING_SIZE = 256
class PaddleOCRDetection(BaseEmbedding):
def __init__(
self,
model_size: str,
requestor: InterProcessRequestor,
device: str = "AUTO",
):
super().__init__(
model_name="paddleocr-onnx",
model_file="detection.onnx",
download_urls={
"detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx"
},
)
self.requestor = requestor
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.runner: ONNXModelRunner | None = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
preprocessed = []
for x in raw_inputs:
preprocessed.append(x)
return [{"x": preprocessed[0]}]
class PaddleOCRClassification(BaseEmbedding):
def __init__(
self,
model_size: str,
requestor: InterProcessRequestor,
device: str = "AUTO",
):
super().__init__(
model_name="paddleocr-onnx",
model_file="classification.onnx",
download_urls={
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
},
)
self.requestor = requestor
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.runner: ONNXModelRunner | None = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
processed = []
for img in raw_inputs:
processed.append({"x": img})
return processed
class PaddleOCRRecognition(BaseEmbedding):
def __init__(
self,
model_size: str,
requestor: InterProcessRequestor,
device: str = "AUTO",
):
super().__init__(
model_name="paddleocr-onnx",
model_file="recognition.onnx",
download_urls={
"recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx"
},
)
self.requestor = requestor
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.runner: ONNXModelRunner | None = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
processed = []
for img in raw_inputs:
processed.append({"x": img})
return processed
class LicensePlateDetector(BaseEmbedding):
def __init__(
self,
model_size: str,
requestor: InterProcessRequestor,
device: str = "AUTO",
):
super().__init__(
model_name="yolov9_license_plate",
model_file="yolov9-256-license-plates.onnx",
download_urls={
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
},
)
self.requestor = requestor
self.model_size = model_size
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.runner: ONNXModelRunner | None = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
ModelDownloader.mark_files_state(
self.requestor,
self.model_name,
files_names,
ModelStatusTypesEnum.downloaded,
)
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
if isinstance(raw_inputs, list):
raise ValueError("License plate embedding does not support batch inputs.")
# Get image as numpy array
img = self._process_image(raw_inputs)
height, width, channels = img.shape
# Resize maintaining aspect ratio
if width > height:
new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4)
img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height))
else:
new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4)
img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE))
# Get new dimensions after resize
og_h, og_w, channels = img.shape
# Create black square frame
frame = np.full(
(LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels),
(0, 0, 0),
dtype=np.float32,
)
# Center the resized image in the square frame
x_center = (LPR_EMBEDDING_SIZE - og_w) // 2
y_center = (LPR_EMBEDDING_SIZE - og_h) // 2
frame[y_center : y_center + og_h, x_center : x_center + og_w] = img
# Normalize to 0-1
frame = frame / 255.0
# Convert from HWC to CHW format and add batch dimension
frame = np.transpose(frame, (2, 0, 1))
frame = np.expand_dims(frame, axis=0)
return [{"images": frame}]

View File

@ -0,0 +1,79 @@
"""Convenience runner for onnx models."""
import logging
from typing import Any
import onnxruntime as ort
from frigate.util.model import get_ort_providers
try:
import openvino as ov
except ImportError:
# openvino is not included
pass
logger = logging.getLogger(__name__)
class ONNXModelRunner:
"""Run onnx models optimally based on available hardware."""
def __init__(self, model_path: str, device: str, requires_fp16: bool = False):
self.model_path = model_path
self.ort: ort.InferenceSession = None
self.ov: ov.Core = None
providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
self.interpreter = None
if "OpenVINOExecutionProvider" in providers:
try:
# use OpenVINO directly
self.type = "ov"
self.ov = ov.Core()
self.ov.set_property(
{ov.properties.cache_dir: "/config/model_cache/openvino"}
)
self.interpreter = self.ov.compile_model(
model=model_path, device_name=device
)
except Exception as e:
logger.warning(
f"OpenVINO failed to build model, using CPU instead: {e}"
)
self.interpreter = None
# Use ONNXRuntime
if self.interpreter is None:
self.type = "ort"
self.ort = ort.InferenceSession(
model_path,
providers=providers,
provider_options=options,
)
def get_input_names(self) -> list[str]:
if self.type == "ov":
input_names = []
for input in self.interpreter.inputs:
input_names.extend(input.names)
return input_names
elif self.type == "ort":
return [input.name for input in self.ort.get_inputs()]
def run(self, input: dict[str, Any]) -> Any:
if self.type == "ov":
infer_request = self.interpreter.create_infer_request()
input_tensor = list(input.values())
if len(input_tensor) == 1:
input_tensor = ov.Tensor(array=input_tensor[0])
else:
input_tensor = ov.Tensor(array=input_tensor)
infer_request.infer(input_tensor)
return [infer_request.get_output_tensor().data]
elif self.type == "ort":
return self.ort.run(None, input)

View File

@ -10,6 +10,7 @@ from frigate.const import (
FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_NVIDIA,
FFMPEG_HWACCEL_VAAPI, FFMPEG_HWACCEL_VAAPI,
FFMPEG_HWACCEL_VULKAN, FFMPEG_HWACCEL_VULKAN,
LIBAVFORMAT_VERSION_MAJOR,
) )
from frigate.util.services import vainfo_hwaccel from frigate.util.services import vainfo_hwaccel
from frigate.version import VERSION from frigate.version import VERSION
@ -51,9 +52,8 @@ class LibvaGpuSelector:
return "" return ""
LIBAV_VERSION = int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") FPS_VFR_PARAM = "-fps_mode vfr" if LIBAVFORMAT_VERSION_MAJOR >= 59 else "-vsync 2"
FPS_VFR_PARAM = "-fps_mode vfr" if LIBAV_VERSION >= 59 else "-vsync 2" TIMEOUT_PARAM = "-timeout" if LIBAVFORMAT_VERSION_MAJOR >= 59 else "-stimeout"
TIMEOUT_PARAM = "-timeout" if LIBAV_VERSION >= 59 else "-stimeout"
_gpu_selector = LibvaGpuSelector() _gpu_selector = LibvaGpuSelector()
_user_agent_args = [ _user_agent_args = [
@ -65,8 +65,8 @@ PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m", "preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m", "preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi", FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17 "preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAVFORMAT_VERSION_MAJOR >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17 "preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAVFORMAT_VERSION_MAJOR >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda", FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}", "preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}", "preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",

View File

@ -49,7 +49,7 @@ class ImprovedMotionDetector(MotionDetector):
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8) self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
self.contrast_values[:, 1:2] = 255 self.contrast_values[:, 1:2] = 255
self.contrast_values_index = 0 self.contrast_values_index = 0
self.config_subscriber = ConfigSubscriber(f"config/motion/{name}") self.config_subscriber = ConfigSubscriber(f"config/motion/{name}", True)
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
self.last_stop_time = None self.last_stop_time = None

View File

@ -172,7 +172,9 @@ class PreviewRecorder:
# create communication for finished previews # create communication for finished previews
self.requestor = InterProcessRequestor() self.requestor = InterProcessRequestor()
self.config_subscriber = ConfigSubscriber(f"config/record/{self.config.name}") self.config_subscriber = ConfigSubscriber(
f"config/record/{self.config.name}", True
)
y, u1, u2, v1, v2 = get_yuv_crop( y, u1, u2, v1, v2 = get_yuv_crop(
self.config.frame_shape_yuv, self.config.frame_shape_yuv,

View File

@ -2,18 +2,11 @@
import logging import logging
import os import os
from typing import Any
import cv2 import cv2
import numpy as np import numpy as np
import onnxruntime as ort import onnxruntime as ort
try:
import openvino as ov
except ImportError:
# openvino is not included
pass
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
### Post Processing ### Post Processing
@ -124,66 +117,3 @@ def get_ort_providers(
options.append({}) options.append({})
return (providers, options) return (providers, options)
class ONNXModelRunner:
"""Run onnx models optimally based on available hardware."""
def __init__(self, model_path: str, device: str, requires_fp16: bool = False):
self.model_path = model_path
self.ort: ort.InferenceSession = None
self.ov: ov.Core = None
providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
self.interpreter = None
if "OpenVINOExecutionProvider" in providers:
try:
# use OpenVINO directly
self.type = "ov"
self.ov = ov.Core()
self.ov.set_property(
{ov.properties.cache_dir: "/config/model_cache/openvino"}
)
self.interpreter = self.ov.compile_model(
model=model_path, device_name=device
)
except Exception as e:
logger.warning(
f"OpenVINO failed to build model, using CPU instead: {e}"
)
self.interpreter = None
# Use ONNXRuntime
if self.interpreter is None:
self.type = "ort"
self.ort = ort.InferenceSession(
model_path,
providers=providers,
provider_options=options,
)
def get_input_names(self) -> list[str]:
if self.type == "ov":
input_names = []
for input in self.interpreter.inputs:
input_names.extend(input.names)
return input_names
elif self.type == "ort":
return [input.name for input in self.ort.get_inputs()]
def run(self, input: dict[str, Any]) -> Any:
if self.type == "ov":
infer_request = self.interpreter.create_infer_request()
input_tensor = list(input.values())
if len(input_tensor) == 1:
input_tensor = ov.Tensor(array=input_tensor[0])
else:
input_tensor = ov.Tensor(array=input_tensor)
infer_request.infer(input_tensor)
return [infer_request.get_output_tensor().data]
elif self.type == "ort":
return self.ort.run(None, input)

View File

@ -539,7 +539,7 @@ def process_frames(
exit_on_empty: bool = False, exit_on_empty: bool = False,
): ):
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}") config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True)
fps_tracker = EventsPerSecond() fps_tracker = EventsPerSecond()
fps_tracker.start() fps_tracker.start()

169
web/package-lock.json generated
View File

@ -25,7 +25,7 @@
"@radix-ui/react-select": "^2.1.2", "@radix-ui/react-select": "^2.1.2",
"@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-separator": "^1.1.0",
"@radix-ui/react-slider": "^1.2.1", "@radix-ui/react-slider": "^1.2.1",
"@radix-ui/react-slot": "^1.1.0", "@radix-ui/react-slot": "^1.1.2",
"@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-switch": "^1.1.1",
"@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-tabs": "^1.1.1",
"@radix-ui/react-toggle": "^1.1.0", "@radix-ui/react-toggle": "^1.1.0",
@ -1176,6 +1176,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-arrow": { "node_modules/@radix-ui/react-arrow": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.0.tgz",
@ -1293,6 +1311,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-compose-refs": { "node_modules/@radix-ui/react-compose-refs": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz",
@ -1417,6 +1453,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-direction": { "node_modules/@radix-ui/react-direction": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz",
@ -1685,6 +1739,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-popover": { "node_modules/@radix-ui/react-popover": {
"version": "1.1.2", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.2.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.2.tgz",
@ -1737,6 +1809,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-popper": { "node_modules/@radix-ui/react-popper": {
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.0.tgz",
@ -1840,6 +1930,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-radio-group": { "node_modules/@radix-ui/react-radio-group": {
"version": "1.2.1", "version": "1.2.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.2.1.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.2.1.tgz",
@ -2022,6 +2130,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-separator": { "node_modules/@radix-ui/react-separator": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.0.tgz",
@ -2094,12 +2220,12 @@
} }
}, },
"node_modules/@radix-ui/react-slot": { "node_modules/@radix-ui/react-slot": {
"version": "1.1.0", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@radix-ui/react-compose-refs": "1.1.0" "@radix-ui/react-compose-refs": "1.1.1"
}, },
"peerDependencies": { "peerDependencies": {
"@types/react": "*", "@types/react": "*",
@ -2111,6 +2237,21 @@
} }
} }
}, },
"node_modules/@radix-ui/react-slot/node_modules/@radix-ui/react-compose-refs": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.1.tgz",
"integrity": "sha512-Y9VzoRDSJtgFMUCoiZBDVo084VQ5hfpXxVE+NgkdNsjiDBByiImMZKKhxMwCbdHvhlENG6a833CbFkOQvTricw==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-switch": { "node_modules/@radix-ui/react-switch": {
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.1.1.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.1.1.tgz",
@ -2303,6 +2444,24 @@
} }
} }
}, },
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
"integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.0"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-use-callback-ref": { "node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz",

View File

@ -31,7 +31,7 @@
"@radix-ui/react-select": "^2.1.2", "@radix-ui/react-select": "^2.1.2",
"@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-separator": "^1.1.0",
"@radix-ui/react-slider": "^1.2.1", "@radix-ui/react-slider": "^1.2.1",
"@radix-ui/react-slot": "^1.1.0", "@radix-ui/react-slot": "^1.1.2",
"@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-switch": "^1.1.1",
"@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-tabs": "^1.1.1",
"@radix-ui/react-toggle": "^1.1.0", "@radix-ui/react-toggle": "^1.1.0",

View File

@ -15,6 +15,7 @@ type ObjectPathProps = {
pointRadius?: number; pointRadius?: number;
imgRef: React.RefObject<HTMLImageElement>; imgRef: React.RefObject<HTMLImageElement>;
onPointClick?: (index: number) => void; onPointClick?: (index: number) => void;
visible?: boolean;
}; };
const typeColorMap: Partial< const typeColorMap: Partial<
@ -37,6 +38,7 @@ export function ObjectPath({
pointRadius = 4, pointRadius = 4,
imgRef, imgRef,
onPointClick, onPointClick,
visible = true,
}: ObjectPathProps) { }: ObjectPathProps) {
const getAbsolutePositions = useCallback(() => { const getAbsolutePositions = useCallback(() => {
if (!imgRef.current || !positions) return []; if (!imgRef.current || !positions) return [];
@ -69,7 +71,7 @@ export function ObjectPath({
return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`; return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`;
}; };
if (!imgRef.current) return null; if (!imgRef.current || !visible) return null;
const absolutePositions = getAbsolutePositions(); const absolutePositions = getAbsolutePositions();
const lineColor = `rgb(${color.join(",")})`; const lineColor = `rgb(${color.join(",")})`;

View File

@ -0,0 +1,281 @@
import { useState, useEffect, useMemo, useRef } from "react";
import useSWR from "swr";
import { useApiHost } from "@/api";
import type { SearchResult } from "@/types/search";
import { ObjectPath } from "./ObjectPath";
import type { FrigateConfig } from "@/types/frigateConfig";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { Card, CardContent } from "@/components/ui/card";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { useTimezone } from "@/hooks/use-date-utils";
import { Button } from "@/components/ui/button";
import { LuX } from "react-icons/lu";
import {
Pagination,
PaginationContent,
PaginationItem,
PaginationLink,
PaginationNext,
PaginationPrevious,
} from "@/components/ui/pagination";
export default function ObjectPathPlotter() {
const apiHost = useApiHost();
const [timeRange, setTimeRange] = useState("1d");
const { data: config } = useSWR<FrigateConfig>("config");
const imgRef = useRef<HTMLImageElement>(null);
const timezone = useTimezone(config);
const [selectedCamera, setSelectedCamera] = useState<string>("");
const [selectedEvent, setSelectedEvent] = useState<SearchResult | null>(null);
const [currentPage, setCurrentPage] = useState(1);
const eventsPerPage = 20;
useEffect(() => {
if (config && !selectedCamera) {
setSelectedCamera(Object.keys(config.cameras)[0]);
}
}, [config, selectedCamera]);
const searchQuery = useMemo(() => {
if (!selectedCamera) return null;
return [
"events",
{
cameras: selectedCamera,
after: Math.floor(Date.now() / 1000) - getTimeRangeInSeconds(timeRange),
before: Math.floor(Date.now() / 1000),
has_clip: 1,
include_thumbnails: 0,
limit: 1000,
timezone,
},
];
}, [selectedCamera, timeRange, timezone]);
const { data: events } = useSWR<SearchResult[]>(searchQuery);
const aspectRatio = useMemo(() => {
if (!config || !selectedCamera) return 16 / 9;
return (
config.cameras[selectedCamera].detect.width /
config.cameras[selectedCamera].detect.height
);
}, [config, selectedCamera]);
const pathPoints = useMemo(() => {
if (!events) return [];
return events.flatMap(
(event) =>
event.data.path_data?.map(
([coords, timestamp]: [number[], number]) => ({
x: coords[0],
y: coords[1],
timestamp,
event,
}),
) || [],
);
}, [events]);
const getRandomColor = () => {
return [
Math.floor(Math.random() * 256),
Math.floor(Math.random() * 256),
Math.floor(Math.random() * 256),
];
};
const eventColors = useMemo(() => {
if (!events) return {};
return events.reduce(
(acc, event) => {
acc[event.id] = getRandomColor();
return acc;
},
{} as Record<string, number[]>,
);
}, [events]);
const [imageLoaded, setImageLoaded] = useState(false);
useEffect(() => {
if (!selectedCamera) return;
const img = new Image();
img.src = selectedEvent
? `${apiHost}api/${selectedCamera}/recordings/${selectedEvent.start_time}/snapshot.jpg`
: `${apiHost}api/${selectedCamera}/latest.jpg?h=500`;
img.onload = () => {
if (imgRef.current) {
imgRef.current.src = img.src;
setImageLoaded(true);
}
};
}, [apiHost, selectedCamera, selectedEvent]);
const handleEventClick = (event: SearchResult) => {
setSelectedEvent(event.id === selectedEvent?.id ? null : event);
};
const clearSelectedEvent = () => {
setSelectedEvent(null);
};
const totalPages = Math.ceil((events?.length || 0) / eventsPerPage);
const paginatedEvents = events?.slice(
(currentPage - 1) * eventsPerPage,
currentPage * eventsPerPage,
);
return (
<Card className="p-4">
<CardContent>
<div className="mb-4 flex items-center justify-between">
<h2 className="text-2xl font-bold">Tracked Object Paths</h2>
<div className="flex space-x-2">
<Select value={selectedCamera} onValueChange={setSelectedCamera}>
<SelectTrigger className="w-[180px]">
<SelectValue placeholder="Select camera" />
</SelectTrigger>
<SelectContent>
{config &&
Object.keys(config.cameras).map((cameraName) => (
<SelectItem key={cameraName} value={cameraName}>
{cameraName}
</SelectItem>
))}
</SelectContent>
</Select>
<Select value={timeRange} onValueChange={setTimeRange}>
<SelectTrigger className="w-[180px]">
<SelectValue placeholder="Select time range" />
</SelectTrigger>
<SelectContent>
<SelectItem value="1h">Last 1 hour</SelectItem>
<SelectItem value="6h">Last 6 hours</SelectItem>
<SelectItem value="12h">Last 12 hours</SelectItem>
<SelectItem value="1d">Last 24 hours</SelectItem>
</SelectContent>
</Select>
</div>
</div>
<div className="relative" style={{ aspectRatio }}>
<img
ref={imgRef}
src="/placeholder.svg"
alt={`Latest from ${selectedCamera}`}
className="h-auto w-full"
/>
{imgRef.current && imageLoaded && (
<svg
viewBox={`0 0 ${imgRef.current.width} ${imgRef.current.height}`}
className="absolute inset-0"
>
{events?.map((event) => (
<ObjectPath
key={event.id}
positions={pathPoints.filter(
(point) => point.event.id === event.id,
)}
color={eventColors[event.id]}
width={2}
imgRef={imgRef}
visible={
selectedEvent === null || selectedEvent.id === event.id
}
/>
))}
</svg>
)}
</div>
<div className="mt-4">
<div className="mb-2 flex items-center justify-between">
<h3 className="text-xl font-semibold">Legend</h3>
{selectedEvent && (
<Button
variant="outline"
size="sm"
onClick={clearSelectedEvent}
className="flex items-center"
>
<LuX className="mr-1" /> Clear Selection
</Button>
)}
</div>
<div className="mb-4 grid grid-cols-2 gap-1">
{paginatedEvents?.map((event) => (
<div
key={event.id}
className={`flex cursor-pointer items-center rounded p-1 ${
selectedEvent?.id === event.id ? "bg-secondary" : ""
}`}
onClick={() => handleEventClick(event)}
>
<div
className="mr-2 h-4 w-4 flex-shrink-0"
style={{
backgroundColor: `rgb(${eventColors[event.id].join(",")})`,
}}
/>
<span className="text-sm">
<strong className="mr-1 capitalize">{event.label}</strong>
{formatUnixTimestampToDateTime(event.start_time, {
timezone: config?.ui.timezone,
})}
</span>
</div>
))}
</div>
<Pagination>
<PaginationContent className="cursor-pointer">
<PaginationItem>
<PaginationPrevious
onClick={() =>
setCurrentPage((prev) => Math.max(prev - 1, 1))
}
/>
</PaginationItem>
{[...Array(totalPages)].map((_, index) => (
<PaginationItem key={index}>
<PaginationLink
onClick={() => setCurrentPage(index + 1)}
isActive={currentPage === index + 1}
>
{index + 1}
</PaginationLink>
</PaginationItem>
))}
<PaginationItem>
<PaginationNext
onClick={() =>
setCurrentPage((prev) => Math.min(prev + 1, totalPages))
}
/>
</PaginationItem>
</PaginationContent>
</Pagination>
</div>
</CardContent>
</Card>
);
}
function getTimeRangeInSeconds(range: string): number {
switch (range) {
case "1h":
return 60 * 60;
case "6h":
return 6 * 60 * 60;
case "12h":
return 12 * 60 * 60;
case "1d":
return 24 * 60 * 60;
default:
return 24 * 60 * 60;
}
}

View File

@ -0,0 +1,117 @@
import * as React from "react"
import { ChevronLeft, ChevronRight, MoreHorizontal } from "lucide-react"
import { cn } from "@/lib/utils"
import { ButtonProps, buttonVariants } from "@/components/ui/button"
const Pagination = ({ className, ...props }: React.ComponentProps<"nav">) => (
<nav
role="navigation"
aria-label="pagination"
className={cn("mx-auto flex w-full justify-center", className)}
{...props}
/>
)
Pagination.displayName = "Pagination"
const PaginationContent = React.forwardRef<
HTMLUListElement,
React.ComponentProps<"ul">
>(({ className, ...props }, ref) => (
<ul
ref={ref}
className={cn("flex flex-row items-center gap-1", className)}
{...props}
/>
))
PaginationContent.displayName = "PaginationContent"
const PaginationItem = React.forwardRef<
HTMLLIElement,
React.ComponentProps<"li">
>(({ className, ...props }, ref) => (
<li ref={ref} className={cn("", className)} {...props} />
))
PaginationItem.displayName = "PaginationItem"
type PaginationLinkProps = {
isActive?: boolean
} & Pick<ButtonProps, "size"> &
React.ComponentProps<"a">
const PaginationLink = ({
className,
isActive,
size = "icon",
...props
}: PaginationLinkProps) => (
<a
aria-current={isActive ? "page" : undefined}
className={cn(
buttonVariants({
variant: isActive ? "outline" : "ghost",
size,
}),
className
)}
{...props}
/>
)
PaginationLink.displayName = "PaginationLink"
const PaginationPrevious = ({
className,
...props
}: React.ComponentProps<typeof PaginationLink>) => (
<PaginationLink
aria-label="Go to previous page"
size="default"
className={cn("gap-1 pl-2.5", className)}
{...props}
>
<ChevronLeft className="h-4 w-4" />
<span>Previous</span>
</PaginationLink>
)
PaginationPrevious.displayName = "PaginationPrevious"
const PaginationNext = ({
className,
...props
}: React.ComponentProps<typeof PaginationLink>) => (
<PaginationLink
aria-label="Go to next page"
size="default"
className={cn("gap-1 pr-2.5", className)}
{...props}
>
<span>Next</span>
<ChevronRight className="h-4 w-4" />
</PaginationLink>
)
PaginationNext.displayName = "PaginationNext"
const PaginationEllipsis = ({
className,
...props
}: React.ComponentProps<"span">) => (
<span
aria-hidden
className={cn("flex h-9 w-9 items-center justify-center", className)}
{...props}
>
<MoreHorizontal className="h-4 w-4" />
<span className="sr-only">More pages</span>
</span>
)
PaginationEllipsis.displayName = "PaginationEllipsis"
export {
Pagination,
PaginationContent,
PaginationEllipsis,
PaginationItem,
PaginationLink,
PaginationNext,
PaginationPrevious,
}

View File

@ -30,6 +30,7 @@ import SummaryTimeline from "@/components/timeline/SummaryTimeline";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import IconPicker, { IconElement } from "@/components/icons/IconPicker"; import IconPicker, { IconElement } from "@/components/icons/IconPicker";
import { useTimelineZoom } from "@/hooks/use-timeline-zoom"; import { useTimelineZoom } from "@/hooks/use-timeline-zoom";
import ObjectPathPlotter from "@/components/overlay/detail/ObjectPathPlotter";
// Color data // Color data
const colors = [ const colors = [
@ -224,6 +225,8 @@ function UIPlayground() {
<div className="no-scrollbar mr-5 mt-4 flex-1 content-start gap-2 overflow-y-auto"> <div className="no-scrollbar mr-5 mt-4 flex-1 content-start gap-2 overflow-y-auto">
<Heading as="h2">UI Playground</Heading> <Heading as="h2">UI Playground</Heading>
<ObjectPathPlotter />
<IconPicker <IconPicker
selectedIcon={selectedIcon} selectedIcon={selectedIcon}
setSelectedIcon={setSelectedIcon} setSelectedIcon={setSelectedIcon}

View File

@ -57,6 +57,7 @@ export type SearchResult = {
description?: string; description?: string;
average_estimated_speed: number; average_estimated_speed: number;
velocity_angle: number; velocity_angle: number;
path_data: [number[], number][];
}; };
}; };