Merge branch 'dev' of https://github.com/blakeblackshear/frigate into motion_improvements

This commit is contained in:
p-boon 2025-02-18 20:20:20 +01:00
commit 5789cff757
60 changed files with 1589 additions and 866 deletions

View File

@ -44,6 +44,7 @@ codeproject
colormap
colorspace
comms
cooldown
coro
ctypeslib
CUDA

View File

@ -1,5 +1,11 @@
## Proposed change
<!--
Thank you!
If you're introducing a new feature or significantly refactoring existing functionality,
we encourage you to start a discussion first. This helps ensure your idea aligns with
Frigate's development goals.
Describe what this pull request does and how it will benefit users of Frigate.
Please describe in detail any considerations, breaking changes, etc. that are
made in this pull request.

View File

@ -76,36 +76,6 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
jetson_jp4_build:
if: false
runs-on: ubuntu-22.04
name: Jetson Jetpack 4
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push TensorRT (Jetson, Jetpack 4)
env:
ARCH: arm64
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
set: |
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp4
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
jetson_jp5_build:
if: false
runs-on: ubuntu-22.04
@ -136,6 +106,35 @@ jobs:
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
jetson_jp6_build:
runs-on: ubuntu-22.04
name: Jetson Jetpack 6
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push TensorRT (Jetson, Jetpack 6)
env:
ARCH: arm64
BASE_IMAGE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
SLIM_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
TRT_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
set: |
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp6
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp6
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp6,mode=max
amd64_extra_builds:
runs-on: ubuntu-22.04
name: AMD64 Extra Build

View File

@ -4,6 +4,7 @@ on:
pull_request:
paths-ignore:
- "docs/**"
- ".github/**"
env:
DEFAULT_PYTHON: 3.11

View File

@ -39,14 +39,14 @@ jobs:
STABLE_TAG=${BASE}:stable
PULL_TAG=${BASE}:${BUILD_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
done
# stable tag
if [[ "${BUILD_TYPE}" == "stable" ]]; then
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
done
fi

View File

@ -38,4 +38,4 @@ services:
container_name: mqtt
image: eclipse-mosquitto:1.6
ports:
- "1883:1883"
- "1883:1883"

View File

@ -3,14 +3,27 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
ARG BASE_IMAGE=debian:12
ARG SLIM_BASE=debian:12-slim
# A hook that allows us to inject commands right after the base images
ARG BASE_HOOK=
FROM ${BASE_IMAGE} AS base
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN ${BASE_HOOK}
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
ARG PIP_BREAK_SYSTEM_PACKAGES
FROM ${SLIM_BASE} AS slim-base
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN ${BASE_HOOK}
FROM slim-base AS wget
ARG DEBIAN_FRONTEND
@ -66,8 +79,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" --break-system-packages \
&& pip install --break-system-packages -r /requirements-ov.txt
&& python3 get-pip.py "pip" \
&& pip install -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
@ -142,8 +155,8 @@ RUN apt-get -qq update \
apt-transport-https wget \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3 \
python3-dev \
python3.11 \
python3.11-dev \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
@ -157,11 +170,13 @@ RUN apt-get -qq update \
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" --break-system-packages
&& python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt
RUN pip3 install -r /requirements.txt --break-system-packages
RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
@ -215,8 +230,8 @@ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_de
/deps/install_deps.sh
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip --break-system-packages && \
pip3 install -U /deps/wheels/*.whl --break-system-packages
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
COPY --from=deps-rootfs / /
@ -263,7 +278,7 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt --break-system-packages
pip3 install -r requirements-dev.txt
HEALTHCHECK NONE

View File

@ -11,7 +11,7 @@ apt-get -qq install --no-install-recommends -y \
lbzip2 \
procps vainfo \
unzip locales tzdata libxml2 xz-utils \
python3 \
python3.11 \
python3-pip \
curl \
lsof \
@ -21,25 +21,16 @@ apt-get -qq install --no-install-recommends -y \
libglib2.0-0 \
libusb-1.0.0
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
mkdir -p -m 600 /root/.gnupg
# install coral runtime
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb"
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.1-1/libedgetpu1-max_16.0tf2.17.1-1.bookworm_${TARGETARCH}.deb"
unset DEBIAN_FRONTEND
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
rm /tmp/libedgetpu1-max.deb
# install python3 & tflite runtime
if [[ "${TARGETARCH}" == "amd64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl
fi
# btbn-ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then
mkdir -p /usr/lib/ffmpeg/5.0

View File

@ -68,3 +68,7 @@ netaddr==0.8.*
netifaces==0.10.*
verboselogs==1.7.*
virtualenv==20.17.*
prometheus-client == 0.21.*
# TFLite
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'

View File

@ -8,7 +8,6 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
RUN rm -rf /rk-wheels/opencv_python-*
@ -16,7 +15,7 @@ FROM deps AS rk-frigate
ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
pip3 install --no-deps -U /deps/rk-wheels/*.whl
WORKDIR /opt/frigate/
COPY --from=rootfs / /

View File

@ -17,7 +17,7 @@ FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.6.1
RUN python3 -m pip config set global.break-system-packages true
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig
WORKDIR /opt/frigate/
@ -32,4 +32,4 @@ COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages
pip3 install -U /deps/trt-wheels/*.whl

View File

@ -7,20 +7,25 @@ ARG BASE_IMAGE
FROM ${BASE_IMAGE} AS build-wheels
ARG DEBIAN_FRONTEND
# Add deadsnakes PPA for python3.11
RUN apt-get -qq update && \
apt-get -qq install -y --no-install-recommends \
software-properties-common \
&& add-apt-repository ppa:deadsnakes/ppa
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends \
python3.9 python3.9-dev \
python3.11 python3.11-dev \
wget build-essential cmake git \
&& rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
# Ensure python3 defaults to python3.11
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
FROM build-wheels AS trt-wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
@ -41,11 +46,12 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
# See https://elinux.org/Jetson_Zoo#ONNX_Runtime
ADD https://nvidia.box.com/shared/static/9yvw05k6u343qfnkhdv2x6xhygze0aq1.whl /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND
@ -67,12 +73,18 @@ RUN --mount=type=bind,source=docker/tensorrt/build_jetson_ffmpeg.sh,target=/deps
# Frigate w/ TensorRT for NVIDIA Jetson platforms
FROM tensorrt-base AS frigate-tensorrt
RUN apt-get update \
&& apt-get install -y python-is-python3 libprotobuf17 \
&& apt-get install -y python-is-python3 libprotobuf23 \
&& rm -rf /var/lib/apt/lists/*
RUN rm -rf /usr/lib/btbn-ffmpeg/
COPY --from=jetson-ffmpeg /rootfs /
# ffmpeg runtime dependencies
RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends \
libx264-163 libx265-199 libegl1 \
&& rm -rf /var/lib/apt/lists/*
COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
@ -81,3 +93,6 @@ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels
WORKDIR /opt/frigate/
COPY --from=rootfs / /
# Fixes "Error importing detector runtime: /usr/lib/aarch64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block"
ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6

View File

@ -14,14 +14,27 @@ apt-get -qq install -y --no-install-recommends libx264-dev libx265-dev
pushd /tmp
# Install libnvmpi to enable nvmpi decoders (h264_nvmpi, hevc_nvmpi)
if [ -e /usr/local/cuda-10.2 ]; then
if [ -e /usr/local/cuda-12 ]; then
# assume Jetpack 6.2
apt-key adv --fetch-key https://repo.download.nvidia.com/jetson/jetson-ota-public.asc
echo "deb https://repo.download.nvidia.com/jetson/common r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
echo "deb https://repo.download.nvidia.com/jetson/t234 r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
echo "deb https://repo.download.nvidia.com/jetson/ffmpeg r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
mkdir -p /opt/nvidia/l4t-packages/
touch /opt/nvidia/l4t-packages/.nv-l4t-disable-boot-fw-update-in-preinstall
apt-get update
apt-get -qq install -y --no-install-recommends -o Dpkg::Options::="--force-confold" nvidia-l4t-jetson-multimedia-api
elif [ -e /usr/local/cuda-10.2 ]; then
# assume Jetpack 4.X
wget -q https://developer.nvidia.com/embedded/L4T/r32_Release_v5.0/T186/Jetson_Multimedia_API_R32.5.0_aarch64.tbz2 -O jetson_multimedia_api.tbz2
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
else
# assume Jetpack 5.X
wget -q https://developer.nvidia.com/downloads/embedded/l4t/r35_release_v3.1/release/jetson_multimedia_api_r35.3.1_aarch64.tbz2 -O jetson_multimedia_api.tbz2
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
fi
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
wget -q https://github.com/AndBobsYourUncle/jetson-ffmpeg/archive/9c17b09.zip -O jetson-ffmpeg.zip
unzip jetson-ffmpeg.zip && rm jetson-ffmpeg.zip && mv jetson-ffmpeg-* jetson-ffmpeg && cd jetson-ffmpeg

View File

@ -6,23 +6,23 @@ mkdir -p /trt-wheels
if [[ "${TARGETARCH}" == "arm64" ]]; then
# NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9,
# NVIDIA supplies python-tensorrt for python3.10, but frigate uses python3.11,
# so we must build python-tensorrt ourselves.
# Get python-tensorrt source
mkdir /workspace
mkdir -p /workspace
cd /workspace
git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1
git clone -b release/8.6 https://github.com/NVIDIA/TensorRT.git --depth=1
# Collect dependencies
EXT_PATH=/workspace/external && mkdir -p $EXT_PATH
pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11
ln -s /usr/include/python3.9 $EXT_PATH/python3.9
pip3 install pybind11 && ln -s /usr/local/lib/python3.11/dist-packages/pybind11 $EXT_PATH/pybind11
ln -s /usr/include/python3.11 $EXT_PATH/python3.11
ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/
# Build wheel
cd /workspace/TensorRT/python
EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh
mv build/dist/*.whl /trt-wheels/
EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=11 TARGET_ARCHITECTURE=aarch64 TENSORRT_MODULE=tensorrt /bin/bash ./build.sh
mv build/bindings_wheel/dist/*.whl /trt-wheels/
fi

View File

@ -1 +1 @@
cuda-python == 11.7; platform_machine == 'aarch64'
cuda-python == 12.6.*; platform_machine == 'aarch64'

View File

@ -13,13 +13,24 @@ variable "TRT_BASE" {
variable "COMPUTE_LEVEL" {
default = ""
}
variable "BASE_HOOK" {
# Ensure an up-to-date python 3.11 is available in tensorrt/jetson image
default = <<EOT
if grep -iq \"ubuntu\" /etc/os-release; then
apt-get update &&
apt-get install -y software-properties-common &&
add-apt-repository ppa:deadsnakes/ppa;
fi
EOT
}
target "_build_args" {
args = {
BASE_IMAGE = BASE_IMAGE,
SLIM_BASE = SLIM_BASE,
TRT_BASE = TRT_BASE,
COMPUTE_LEVEL = COMPUTE_LEVEL
COMPUTE_LEVEL = COMPUTE_LEVEL,
BASE_HOOK = BASE_HOOK
}
platforms = ["linux/${ARCH}"]
}

View File

@ -1,41 +1,41 @@
BOARDS += trt
JETPACK4_BASE ?= timongentzsch/l4t-ubuntu20-opencv:latest # L4T 32.7.1 JetPack 4.6.1
JETPACK5_BASE ?= nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime # L4T 35.3.1 JetPack 5.1.1
JETPACK6_BASE ?= nvcr.io/nvidia/tensorrt:23.12-py3-igpu
X86_DGPU_ARGS := ARCH=amd64 COMPUTE_LEVEL="50 60 70 80 90"
JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BASE) TRT_BASE=$(JETPACK4_BASE)
JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE)
JETPACK6_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK6_BASE) SLIM_BASE=$(JETPACK6_BASE) TRT_BASE=$(JETPACK6_BASE)
local-trt: version
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt \
--load
local-trt-jp4: version
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp4 \
--load
local-trt-jp5: version
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp5 \
--load
local-trt-jp6: version
$(JETPACK6_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp6 \
--load
build-trt:
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5
$(JETPACK6_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp6
push-trt: build-trt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt \
--push
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 \
--push
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 \
--push
$(JETPACK6_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp6 \
--push

View File

@ -5,11 +5,7 @@ title: Face Recognition
Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.
Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database.
## Minimum System Requirements
Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all.
Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer.
## Configuration

View File

@ -295,10 +295,8 @@ These instructions were originally based on the [Jellyfin documentation](https:/
## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano)
A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the
`stable-tensorrt-jp4` tagged image, or if your Jetson host is running Jetpack 5.0+, use the `stable-tensorrt-jp5`
tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform,
but the image will still allow hardware decoding and tensorrt object detection.
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 5.0+ use the `stable-tensorrt-jp5`
tagged image, or if your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
You will need to use the image with the nvidia container runtime:

View File

@ -11,14 +11,37 @@ Frigate offers native notifications using the [WebPush Protocol](https://web.dev
In order to use notifications the following requirements must be met:
- Frigate must be accessed via a secure https connection
- Frigate must be accessed via a secure `https` connection ([see the authorization docs](/configuration/authentication)).
- A supported browser must be used. Currently Chrome, Firefox, and Safari are known to be supported.
- In order for notifications to be usable externally, Frigate must be accessible externally
- In order for notifications to be usable externally, Frigate must be accessible externally.
### Configuration
To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save.
Optionally, you can change the default cooldown period for notifications through the `cooldown` parameter in your config file. This parameter can also be overridden at the camera level.
Notifications will be prevented if either:
- The global cooldown period hasn't elapsed since any camera's last notification
- The camera-specific cooldown period hasn't elapsed for the specific camera
```yaml
notifications:
enabled: True
email: "johndoe@gmail.com"
cooldown: 10 # wait 10 seconds before sending another notification from any camera
```
```yaml
cameras:
doorbell:
...
notifications:
enabled: True
cooldown: 30 # wait 30 seconds before sending another notification from the doorbell camera
```
### Registration
Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent.
@ -39,4 +62,4 @@ Different platforms handle notifications differently, some settings changes may
### Android
Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well.
Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well.

View File

@ -420,6 +420,8 @@ notifications:
# Optional: Email for push service to reach out to
# NOTE: This is required to use notifications
email: "admin@example.com"
# Optional: Cooldown time for notifications in seconds (default: shown below)
cooldown: 0
# Optional: Record configuration
# NOTE: Can be overridden at the camera level

View File

@ -34,7 +34,7 @@ Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshe
### Prerequisites
- GNU make
- Docker
- Docker (including buildx plugin)
- An extra detector (Coral, OpenVINO, etc.) is optional but recommended to simulate real world performance.
:::note

View File

@ -250,7 +250,7 @@ The official docker image tags for the current stable version are:
The community supported docker image tags for the current stable version are:
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
- `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat

View File

@ -20,7 +20,6 @@ from fastapi.params import Depends
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
from markupsafe import escape
from peewee import operator
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
from pydantic import ValidationError
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
@ -28,6 +27,7 @@ from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.models import Event, Timeline
from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.util.builtin import (
clean_camera_user_pass,
get_tz_modifiers,
@ -113,9 +113,13 @@ def stats_history(request: Request, keys: str = None):
@router.get("/metrics")
def metrics():
"""Expose Prometheus metrics endpoint"""
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
def metrics(request: Request):
"""Expose Prometheus metrics endpoint and update metrics with latest stats"""
# Retrieve the latest statistics and update the Prometheus metrics
stats = request.app.stats_emitter.get_latest_stats()
update_metrics(stats)
content, content_type = get_metrics()
return Response(content=content, media_type=content_type)
@router.get("/config")

View File

@ -336,6 +336,7 @@ def events_explore(limit: int = 10):
"sub_label_score",
"average_estimated_speed",
"velocity_angle",
"path_data",
]
},
"event_count": label_counts[event.label],
@ -622,6 +623,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
"sub_label_score",
"average_estimated_speed",
"velocity_angle",
"path_data",
]
}

View File

@ -1,6 +1,5 @@
"""Image and video apis."""
import base64
import glob
import logging
import os
@ -40,6 +39,7 @@ from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.object_processing import TrackedObjectProcessor
from frigate.util.builtin import get_tz_modifiers
from frigate.util.image import get_image_from_recording
from frigate.util.path import get_event_thumbnail_bytes
logger = logging.getLogger(__name__)
@ -804,10 +804,11 @@ def event_snapshot(
)
@router.get("/events/{event_id}/thumbnail.jpg")
@router.get("/events/{event_id}/thumbnail.{extension}")
def event_thumbnail(
request: Request,
event_id: str,
extension: str,
max_cache_age: int = Query(
2592000, description="Max cache age in seconds. Default 30 days in seconds."
),
@ -816,11 +817,15 @@ def event_thumbnail(
thumbnail_bytes = None
event_complete = False
try:
event = Event.get(Event.id == event_id)
event: Event = Event.get(Event.id == event_id)
if event.end_time is not None:
event_complete = True
thumbnail_bytes = base64.b64decode(event.thumbnail)
thumbnail_bytes = get_event_thumbnail_bytes(event)
except DoesNotExist:
thumbnail_bytes = None
if thumbnail_bytes is None:
# see if the object is currently being tracked
try:
camera_states = request.app.detected_frames_processor.camera_states.values()
@ -828,7 +833,7 @@ def event_thumbnail(
if event_id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(event_id)
if tracked_obj is not None:
thumbnail_bytes = tracked_obj.get_thumbnail()
thumbnail_bytes = tracked_obj.get_thumbnail(extension)
except Exception:
return JSONResponse(
content={"success": False, "message": "Event not found"},
@ -843,8 +848,8 @@ def event_thumbnail(
# android notifications prefer a 2:1 ratio
if format == "android":
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
img_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(img_as_np, flags=1)
thumbnail = cv2.copyMakeBorder(
img,
0,
@ -854,17 +859,25 @@ def event_thumbnail(
cv2.BORDER_CONSTANT,
(0, 0, 0),
)
ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail_bytes = jpg.tobytes()
quality_params = None
if extension == "jpg" or extension == "jpeg":
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), 70]
elif extension == "webp":
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60]
_, img = cv2.imencode(f".{img}", thumbnail, quality_params)
thumbnail_bytes = img.tobytes()
return Response(
thumbnail_bytes,
media_type="image/jpeg",
media_type=f"image/{extension}",
headers={
"Cache-Control": f"private, max-age={max_cache_age}"
if event_complete
else "no-store",
"Content-Type": "image/jpeg",
"Content-Type": f"image/{extension}",
},
)

View File

@ -39,6 +39,7 @@ from frigate.const import (
MODEL_CACHE_DIR,
RECORD_DIR,
SHM_FRAMES_VAR,
THUMB_DIR,
)
from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
@ -105,6 +106,7 @@ class FrigateApp:
dirs = [
CONFIG_DIR,
RECORD_DIR,
THUMB_DIR,
f"{CLIPS_DIR}/cache",
CACHE_DIR,
MODEL_CACHE_DIR,

View File

@ -47,6 +47,10 @@ class WebPushClient(Communicator): # type: ignore[misc]
self.suspended_cameras: dict[str, int] = {
c.name: 0 for c in self.config.cameras.values()
}
self.last_camera_notification_time: dict[str, float] = {
c.name: 0 for c in self.config.cameras.values()
}
self.last_notification_time: float = 0
self.notification_queue: queue.Queue[PushNotification] = queue.Queue()
self.notification_thread = threading.Thread(
target=self._process_notifications, daemon=True
@ -264,6 +268,29 @@ class WebPushClient(Communicator): # type: ignore[misc]
):
return
camera: str = payload["after"]["camera"]
current_time = datetime.datetime.now().timestamp()
# Check global cooldown period
if (
current_time - self.last_notification_time
< self.config.notifications.cooldown
):
logger.debug(
f"Skipping notification for {camera} - in global cooldown period"
)
return
# Check camera-specific cooldown period
if (
current_time - self.last_camera_notification_time[camera]
< self.config.cameras[camera].notifications.cooldown
):
logger.debug(
f"Skipping notification for {camera} - in camera-specific cooldown period"
)
return
self.check_registrations()
state = payload["type"]
@ -278,6 +305,9 @@ class WebPushClient(Communicator): # type: ignore[misc]
):
return
self.last_camera_notification_time[camera] = current_time
self.last_notification_time = current_time
reviewId = payload["after"]["id"]
sorted_objects: set[str] = set()
@ -287,7 +317,6 @@ class WebPushClient(Communicator): # type: ignore[misc]
sorted_objects.update(payload["after"]["data"]["sub_labels"])
camera: str = payload["after"]["camera"]
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
message = f"Detected on {camera.replace('_', ' ').title()}"
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"

View File

@ -10,6 +10,9 @@ __all__ = ["NotificationConfig"]
class NotificationConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable notifications")
email: Optional[str] = Field(default=None, title="Email required for push.")
cooldown: Optional[int] = Field(
default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of notifications."
)

View File

@ -7,6 +7,7 @@ BASE_DIR = "/media/frigate"
CLIPS_DIR = f"{BASE_DIR}/clips"
EXPORT_DIR = f"{BASE_DIR}/exports"
FACE_DIR = f"{CLIPS_DIR}/faces"
THUMB_DIR = f"{CLIPS_DIR}/thumbs"
RECORD_DIR = f"{BASE_DIR}/recordings"
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache"

View File

@ -1,6 +1,5 @@
"""SQLite-vec embeddings database."""
import base64
import datetime
import logging
import os
@ -21,6 +20,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event
from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import serialize
from frigate.util.path import get_event_thumbnail_bytes
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
@ -264,14 +264,7 @@ class Embeddings:
st = time.time()
# Get total count of events to process
total_events = (
Event.select()
.where(
(Event.has_clip == True | Event.has_snapshot == True)
& Event.thumbnail.is_null(False)
)
.count()
)
total_events = Event.select().count()
batch_size = 32
current_page = 1
@ -289,10 +282,6 @@ class Embeddings:
events = (
Event.select()
.where(
(Event.has_clip == True | Event.has_snapshot == True)
& Event.thumbnail.is_null(False)
)
.order_by(Event.start_time.desc())
.paginate(current_page, batch_size)
)
@ -302,7 +291,12 @@ class Embeddings:
batch_thumbs = {}
batch_descs = {}
for event in events:
batch_thumbs[event.id] = base64.b64decode(event.thumbnail)
thumbnail = get_event_thumbnail_bytes(event)
if thumbnail is None:
continue
batch_thumbs[event.id] = thumbnail
totals["thumbnails"] += 1
if description := event.data.get("description", "").strip():
@ -341,10 +335,6 @@ class Embeddings:
current_page += 1
events = (
Event.select()
.where(
(Event.has_clip == True | Event.has_snapshot == True)
& Event.thumbnail.is_null(False)
)
.order_by(Event.start_time.desc())
.paginate(current_page, batch_size)
)

View File

@ -38,6 +38,7 @@ from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize
from frigate.util.image import SharedMemoryFrameManager, calculate_region
from frigate.util.path import get_event_thumbnail_bytes
from .embeddings import Embeddings
@ -215,7 +216,7 @@ class EmbeddingMaintainer(threading.Thread):
continue
# Extract valid thumbnail
thumbnail = base64.b64decode(event.thumbnail)
thumbnail = get_event_thumbnail_bytes(event)
# Embed the thumbnail
self._embed_thumbnail(event_id, thumbnail)
@ -390,7 +391,7 @@ class EmbeddingMaintainer(threading.Thread):
logger.error(f"GenAI not enabled for camera {event.camera}")
return
thumbnail = base64.b64decode(event.thumbnail)
thumbnail = get_event_thumbnail_bytes(event)
logger.debug(
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"

View File

@ -11,6 +11,7 @@ from frigate.config import FrigateConfig
from frigate.const import CLIPS_DIR
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event, Timeline
from frigate.util.path import delete_event_images
logger = logging.getLogger(__name__)
@ -64,7 +65,6 @@ class EventCleanup(threading.Thread):
def expire_snapshots(self) -> list[str]:
## Expire events from unlisted cameras based on the global config
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = self.get_removed_camera_labels()
@ -83,6 +83,7 @@ class EventCleanup(threading.Thread):
Event.select(
Event.id,
Event.camera,
Event.thumbnail,
)
.where(
Event.camera.not_in(self.camera_keys),
@ -94,22 +95,15 @@ class EventCleanup(threading.Thread):
.iterator()
)
logger.debug(f"{len(list(expired_events))} events can be expired")
# delete the media from disk
for expired in expired_events:
media_name = f"{expired.camera}-{expired.id}"
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
)
deleted = delete_event_images(expired)
try:
media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
except OSError as e:
logger.warning(f"Unable to delete event images: {e}")
if not deleted:
logger.warning(
f"Unable to delete event images for {expired.camera}: {expired.id}"
)
# update the clips attribute for the db entry
query = Event.select(Event.id).where(
@ -165,6 +159,7 @@ class EventCleanup(threading.Thread):
Event.select(
Event.id,
Event.camera,
Event.thumbnail,
)
.where(
Event.camera == name,
@ -181,19 +176,12 @@ class EventCleanup(threading.Thread):
# so no need to delete mp4 files
for event in expired_events:
events_to_update.append(event.id)
deleted = delete_event_images(event)
try:
media_name = f"{event.camera}-{event.id}"
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
if not deleted:
logger.warning(
f"Unable to delete event images for {event.camera}: {event.id}"
)
media_path.unlink(missing_ok=True)
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
except OSError as e:
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry
for i in range(0, len(events_to_update), CHUNK_SIZE):

View File

@ -1,6 +1,5 @@
"""Handle external events created by the user."""
import base64
import datetime
import logging
import os
@ -15,7 +14,7 @@ from numpy import ndarray
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.events_updater import EventUpdatePublisher
from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.events.types import EventStateEnum, EventTypeEnum
from frigate.util.image import draw_box_with_label
@ -55,9 +54,7 @@ class ExternalEventProcessor:
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
event_id = f"{now}-{rand_id}"
thumbnail = self._write_images(
camera_config, label, event_id, draw, snapshot_frame
)
self._write_images(camera_config, label, event_id, draw, snapshot_frame)
end = now + duration if duration is not None else None
self.event_sender.publish(
@ -74,7 +71,6 @@ class ExternalEventProcessor:
"camera": camera,
"start_time": now - camera_config.record.event_pre_capture,
"end_time": end,
"thumbnail": thumbnail,
"has_clip": camera_config.record.enabled and include_recording,
"has_snapshot": True,
"type": source_type,
@ -134,9 +130,9 @@ class ExternalEventProcessor:
event_id: str,
draw: dict[str, any],
img_frame: Optional[ndarray],
) -> Optional[str]:
) -> None:
if img_frame is None:
return None
return
# write clean snapshot if enabled
if camera_config.snapshots.clean_copy:
@ -182,8 +178,9 @@ class ExternalEventProcessor:
# create thumbnail with max height of 175 and save
width = int(175 * img_frame.shape[1] / img_frame.shape[0])
thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode(".jpg", thumb)
return base64.b64encode(jpg.tobytes()).decode("utf-8")
cv2.imwrite(
os.path.join(THUMB_DIR, camera_config.name, f"{event_id}.webp"), thumb
)
def stop(self):
self.event_sender.stop()

View File

@ -23,11 +23,11 @@ def should_update_db(prev_event: Event, current_event: Event) -> bool:
if (
prev_event["top_score"] != current_event["top_score"]
or prev_event["entered_zones"] != current_event["entered_zones"]
or prev_event["thumbnail"] != current_event["thumbnail"]
or prev_event["end_time"] != current_event["end_time"]
or prev_event["average_estimated_speed"]
!= current_event["average_estimated_speed"]
or prev_event["velocity_angle"] != current_event["velocity_angle"]
or prev_event["path_data"] != current_event["path_data"]
):
return True
return False
@ -201,7 +201,7 @@ class EventProcessor(threading.Thread):
Event.start_time: start_time,
Event.end_time: end_time,
Event.zones: list(event_data["entered_zones"]),
Event.thumbnail: event_data["thumbnail"],
Event.thumbnail: event_data.get("thumbnail"),
Event.has_clip: event_data["has_clip"],
Event.has_snapshot: event_data["has_snapshot"],
Event.model_hash: first_detector.model.model_hash,
@ -217,6 +217,7 @@ class EventProcessor(threading.Thread):
"velocity_angle": event_data["velocity_angle"],
"type": "object",
"max_severity": event_data.get("max_severity"),
"path_data": event_data.get("path_data"),
},
}
@ -256,7 +257,7 @@ class EventProcessor(threading.Thread):
Event.camera: event_data["camera"],
Event.start_time: event_data["start_time"],
Event.end_time: event_data["end_time"],
Event.thumbnail: event_data["thumbnail"],
Event.thumbnail: event_data.get("thumbnail"),
Event.has_clip: event_data["has_clip"],
Event.has_snapshot: event_data["has_snapshot"],
Event.zones: [],

View File

@ -1,7 +1,6 @@
import datetime
import json
import logging
import os
import queue
import threading
from collections import defaultdict
@ -16,13 +15,13 @@ from frigate.comms.dispatcher import Dispatcher
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import (
CameraMqttConfig,
FrigateConfig,
MqttConfig,
RecordConfig,
SnapshotsConfig,
ZoomingModeEnum,
)
from frigate.const import CLIPS_DIR, UPDATE_CAMERA_ACTIVITY
from frigate.const import UPDATE_CAMERA_ACTIVITY
from frigate.events.types import EventStateEnum, EventTypeEnum
from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.track.tracked_object import TrackedObject
@ -413,6 +412,11 @@ class CameraState:
self.previous_frame_id = frame_name
def shutdown(self) -> None:
for obj in self.tracked_objects.values():
if not obj.obj_data.get("end_time"):
obj.write_thumbnail_to_disk()
class TrackedObjectProcessor(threading.Thread):
def __init__(
@ -479,7 +483,7 @@ class TrackedObjectProcessor(threading.Thread):
EventStateEnum.update,
camera,
frame_name,
obj.to_dict(include_thumbnail=True),
obj.to_dict(),
)
)
@ -491,41 +495,13 @@ class TrackedObjectProcessor(threading.Thread):
obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj)
# write thumbnail to disk if it will be saved as an event
if obj.has_snapshot or obj.has_clip:
obj.write_thumbnail_to_disk()
# write the snapshot to disk
if obj.has_snapshot:
snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
jpg_bytes = obj.get_jpg_bytes(
timestamp=snapshot_config.timestamp,
bounding_box=snapshot_config.bounding_box,
crop=snapshot_config.crop,
height=snapshot_config.height,
quality=snapshot_config.quality,
)
if jpg_bytes is None:
logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
else:
with open(
os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"),
"wb",
) as j:
j.write(jpg_bytes)
# write clean snapshot if enabled
if snapshot_config.clean_copy:
png_bytes = obj.get_clean_png()
if png_bytes is None:
logger.warning(
f"Unable to save clean snapshot for {obj.obj_data['id']}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"{camera}-{obj.obj_data['id']}-clean.png",
),
"wb",
) as p:
p.write(png_bytes)
obj.write_snapshot_to_disk()
if not obj.false_positive:
message = {
@ -542,14 +518,15 @@ class TrackedObjectProcessor(threading.Thread):
EventStateEnum.end,
camera,
frame_name,
obj.to_dict(include_thumbnail=True),
obj.to_dict(),
)
)
def snapshot(camera, obj: TrackedObject, frame_name: str):
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(
jpg_bytes = obj.get_img_bytes(
ext="jpg",
timestamp=mqtt_config.timestamp,
bounding_box=mqtt_config.bounding_box,
crop=mqtt_config.crop,
@ -750,6 +727,10 @@ class TrackedObjectProcessor(threading.Thread):
event_id, camera, _ = update
self.camera_states[camera].finished(event_id)
# shut down camera states
for state in self.camera_states.values():
state.shutdown()
self.requestor.stop()
self.detection_publisher.stop()
self.event_sender.stop()

View File

@ -80,8 +80,8 @@ class RecordingExporter(threading.Thread):
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
def get_datetime_from_timestamp(self, timestamp: int) -> str:
"""Convenience fun to get a simple date time from timestamp."""
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y/%m/%d %H:%M")
# return in iso format
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
def save_thumbnail(self, id: str) -> str:
thumb_path = os.path.join(CLIPS_DIR, f"export/{id}.webp")
@ -236,6 +236,10 @@ class RecordingExporter(threading.Thread):
if self.config.ffmpeg.apple_compatibility:
ffmpeg_cmd += FFMPEG_HVC1_ARGS
# add metadata
title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}"
ffmpeg_cmd.extend(["-metadata", f"title={title}"])
ffmpeg_cmd.append(video_path)
return ffmpeg_cmd, playlist_lines
@ -323,6 +327,10 @@ class RecordingExporter(threading.Thread):
)
).split(" ")
# add metadata
title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}"
ffmpeg_cmd.extend(["-metadata", f"title={title}"])
return ffmpeg_cmd, playlist_lines
def run(self) -> None:

View File

@ -1,207 +1,495 @@
from typing import Dict
import logging
import re
from prometheus_client import (
CONTENT_TYPE_LATEST,
Counter,
Gauge,
Info,
generate_latest,
)
# System metrics
SYSTEM_INFO = Info("frigate_system", "System information")
CPU_USAGE = Gauge(
"frigate_cpu_usage_percent",
"Process CPU usage %",
["pid", "name", "process", "type", "cmdline"],
)
MEMORY_USAGE = Gauge(
"frigate_mem_usage_percent",
"Process memory usage %",
["pid", "name", "process", "type", "cmdline"],
)
# Camera metrics
CAMERA_FPS = Gauge(
"frigate_camera_fps",
"Frames per second being consumed from your camera",
["camera_name"],
)
DETECTION_FPS = Gauge(
"frigate_detection_fps",
"Number of times detection is run per second",
["camera_name"],
)
PROCESS_FPS = Gauge(
"frigate_process_fps",
"Frames per second being processed by frigate",
["camera_name"],
)
SKIPPED_FPS = Gauge(
"frigate_skipped_fps", "Frames per second skipped for processing", ["camera_name"]
)
DETECTION_ENABLED = Gauge(
"frigate_detection_enabled", "Detection enabled for camera", ["camera_name"]
)
AUDIO_DBFS = Gauge("frigate_audio_dBFS", "Audio dBFS for camera", ["camera_name"])
AUDIO_RMS = Gauge("frigate_audio_rms", "Audio RMS for camera", ["camera_name"])
# Detector metrics
DETECTOR_INFERENCE = Gauge(
"frigate_detector_inference_speed_seconds",
"Time spent running object detection in seconds",
["name"],
)
DETECTOR_START = Gauge(
"frigate_detection_start", "Detector start time (unix timestamp)", ["name"]
)
# GPU metrics
GPU_USAGE = Gauge("frigate_gpu_usage_percent", "GPU utilisation %", ["gpu_name"])
GPU_MEMORY = Gauge("frigate_gpu_mem_usage_percent", "GPU memory usage %", ["gpu_name"])
# Storage metrics
STORAGE_FREE = Gauge("frigate_storage_free_bytes", "Storage free bytes", ["storage"])
STORAGE_TOTAL = Gauge("frigate_storage_total_bytes", "Storage total bytes", ["storage"])
STORAGE_USED = Gauge("frigate_storage_used_bytes", "Storage used bytes", ["storage"])
STORAGE_MOUNT = Info(
"frigate_storage_mount_type", "Storage mount type", ["mount_type", "storage"]
)
# Service metrics
UPTIME = Gauge("frigate_service_uptime_seconds", "Uptime seconds")
LAST_UPDATE = Gauge(
"frigate_service_last_updated_timestamp", "Stats recorded time (unix timestamp)"
)
TEMPERATURE = Gauge("frigate_device_temperature", "Device Temperature", ["device"])
# Event metrics
CAMERA_EVENTS = Counter(
"frigate_camera_events",
"Count of camera events since exporter started",
["camera", "label"],
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
from prometheus_client.core import (
REGISTRY,
CounterMetricFamily,
GaugeMetricFamily,
InfoMetricFamily,
)
def update_metrics(stats: Dict) -> None:
"""Update Prometheus metrics based on Frigate stats"""
try:
# Update process metrics
if "cpu_usages" in stats:
for pid, proc_stats in stats["cpu_usages"].items():
cmdline = proc_stats.get("cmdline", "")
process_type = "Other"
process_name = cmdline
class CustomCollector(object):
def __init__(self, _url):
self.process_stats = {}
self.previous_event_id = None
self.previous_event_start_time = None
self.all_events = {}
CPU_USAGE.labels(
pid=pid,
name=process_name,
process=process_name,
type=process_type,
cmdline=cmdline,
).set(float(proc_stats["cpu"]))
def add_metric(self, metric, label, stats, key, multiplier=1.0): # Now a method
try:
string = str(stats[key])
value = float(re.findall(r"-?\d*\.?\d*", string)[0])
metric.add_metric(label, value * multiplier)
except (KeyError, TypeError, IndexError, ValueError):
pass
MEMORY_USAGE.labels(
pid=pid,
name=process_name,
process=process_name,
type=process_type,
cmdline=cmdline,
).set(float(proc_stats["mem"]))
def add_metric_process(
self,
metric,
camera_stats,
camera_name,
pid_name,
process_name,
cpu_or_memory,
process_type,
):
try:
pid = str(camera_stats[pid_name])
label_values = [pid, camera_name, process_name, process_type]
try:
# new frigate:0.13.0-beta3 stat 'cmdline'
label_values.append(self.process_stats[pid]["cmdline"])
except KeyError:
pass
metric.add_metric(label_values, self.process_stats[pid][cpu_or_memory])
del self.process_stats[pid][cpu_or_memory]
except (KeyError, TypeError, IndexError):
pass
# Update camera metrics
if "cameras" in stats:
for camera_name, camera_stats in stats["cameras"].items():
if "camera_fps" in camera_stats:
CAMERA_FPS.labels(camera_name=camera_name).set(
camera_stats["camera_fps"]
)
if "detection_fps" in camera_stats:
DETECTION_FPS.labels(camera_name=camera_name).set(
camera_stats["detection_fps"]
)
if "process_fps" in camera_stats:
PROCESS_FPS.labels(camera_name=camera_name).set(
camera_stats["process_fps"]
)
if "skipped_fps" in camera_stats:
SKIPPED_FPS.labels(camera_name=camera_name).set(
camera_stats["skipped_fps"]
)
if "detection_enabled" in camera_stats:
DETECTION_ENABLED.labels(camera_name=camera_name).set(
camera_stats["detection_enabled"]
)
if "audio_dBFS" in camera_stats:
AUDIO_DBFS.labels(camera_name=camera_name).set(
camera_stats["audio_dBFS"]
)
if "audio_rms" in camera_stats:
AUDIO_RMS.labels(camera_name=camera_name).set(
camera_stats["audio_rms"]
)
def collect(self):
stats = self.process_stats # Assign self.process_stats to local variable stats
# Update detector metrics
if "detectors" in stats:
for name, detector in stats["detectors"].items():
if "inference_speed" in detector:
DETECTOR_INFERENCE.labels(name=name).set(
detector["inference_speed"] * 0.001
) # ms to seconds
if "detection_start" in detector:
DETECTOR_START.labels(name=name).set(detector["detection_start"])
try:
self.process_stats = stats["cpu_usages"]
except KeyError:
pass
# Update GPU metrics
if "gpu_usages" in stats:
for gpu_name, gpu_stats in stats["gpu_usages"].items():
if "gpu" in gpu_stats:
GPU_USAGE.labels(gpu_name=gpu_name).set(float(gpu_stats["gpu"]))
if "mem" in gpu_stats:
GPU_MEMORY.labels(gpu_name=gpu_name).set(float(gpu_stats["mem"]))
# process stats for cameras, detectors and other
cpu_usages = GaugeMetricFamily(
"frigate_cpu_usage_percent",
"Process CPU usage %",
labels=["pid", "name", "process", "type", "cmdline"],
)
mem_usages = GaugeMetricFamily(
"frigate_mem_usage_percent",
"Process memory usage %",
labels=["pid", "name", "process", "type", "cmdline"],
)
# Update service metrics
if "service" in stats:
service = stats["service"]
# camera stats
audio_dBFS = GaugeMetricFamily(
"frigate_audio_dBFS", "Audio dBFS for camera", labels=["camera_name"]
)
audio_rms = GaugeMetricFamily(
"frigate_audio_rms", "Audio RMS for camera", labels=["camera_name"]
)
camera_fps = GaugeMetricFamily(
"frigate_camera_fps",
"Frames per second being consumed from your camera.",
labels=["camera_name"],
)
detection_enabled = GaugeMetricFamily(
"frigate_detection_enabled",
"Detection enabled for camera",
labels=["camera_name"],
)
detection_fps = GaugeMetricFamily(
"frigate_detection_fps",
"Number of times detection is run per second.",
labels=["camera_name"],
)
process_fps = GaugeMetricFamily(
"frigate_process_fps",
"Frames per second being processed by frigate.",
labels=["camera_name"],
)
skipped_fps = GaugeMetricFamily(
"frigate_skipped_fps",
"Frames per second skip for processing by frigate.",
labels=["camera_name"],
)
if "uptime" in service:
UPTIME.set(service["uptime"])
if "last_updated" in service:
LAST_UPDATE.set(service["last_updated"])
# read camera stats assuming version < frigate:0.13.0-beta3
cameras = stats
try:
# try to read camera stats in case >= frigate:0.13.0-beta3
cameras = stats["cameras"]
except KeyError:
pass
# Storage metrics
if "storage" in service:
for path, storage in service["storage"].items():
if "free" in storage:
STORAGE_FREE.labels(storage=path).set(
storage["free"] * 1e6
) # MB to bytes
if "total" in storage:
STORAGE_TOTAL.labels(storage=path).set(storage["total"] * 1e6)
if "used" in storage:
STORAGE_USED.labels(storage=path).set(storage["used"] * 1e6)
if "mount_type" in storage:
STORAGE_MOUNT.labels(storage=path).info(
{"mount_type": storage["mount_type"], "storage": path}
)
for camera_name, camera_stats in cameras.items():
self.add_metric(audio_dBFS, [camera_name], camera_stats, "audio_dBFS")
self.add_metric(audio_rms, [camera_name], camera_stats, "audio_rms")
self.add_metric(camera_fps, [camera_name], camera_stats, "camera_fps")
self.add_metric(
detection_enabled, [camera_name], camera_stats, "detection_enabled"
)
self.add_metric(detection_fps, [camera_name], camera_stats, "detection_fps")
self.add_metric(process_fps, [camera_name], camera_stats, "process_fps")
self.add_metric(skipped_fps, [camera_name], camera_stats, "skipped_fps")
# Temperature metrics
if "temperatures" in service:
for device, temp in service["temperatures"].items():
TEMPERATURE.labels(device=device).set(temp)
self.add_metric_process(
cpu_usages,
camera_stats,
camera_name,
"ffmpeg_pid",
"ffmpeg",
"cpu",
"Camera",
)
self.add_metric_process(
cpu_usages,
camera_stats,
camera_name,
"capture_pid",
"capture",
"cpu",
"Camera",
)
self.add_metric_process(
cpu_usages, camera_stats, camera_name, "pid", "detect", "cpu", "Camera"
)
# Version info
if "version" in service and "latest_version" in service:
SYSTEM_INFO.info(
{
"version": service["version"],
"latest_version": service["latest_version"],
}
self.add_metric_process(
mem_usages,
camera_stats,
camera_name,
"ffmpeg_pid",
"ffmpeg",
"mem",
"Camera",
)
self.add_metric_process(
mem_usages,
camera_stats,
camera_name,
"capture_pid",
"capture",
"mem",
"Camera",
)
self.add_metric_process(
mem_usages, camera_stats, camera_name, "pid", "detect", "mem", "Camera"
)
yield audio_dBFS
yield audio_rms
yield camera_fps
yield detection_enabled
yield detection_fps
yield process_fps
yield skipped_fps
# bandwidth stats
bandwidth_usages = GaugeMetricFamily(
"frigate_bandwidth_usages_kBps",
"bandwidth usages kilobytes per second",
labels=["pid", "name", "process", "cmdline"],
)
try:
for b_pid, b_stats in stats["bandwidth_usages"].items():
label = [b_pid] # pid label
try:
n = stats["cpu_usages"][b_pid]["cmdline"]
for p_name, p_stats in stats["processes"].items():
if str(p_stats["pid"]) == b_pid:
n = p_name
break
# new frigate:0.13.0-beta3 stat 'cmdline'
label.append(n) # name label
label.append(stats["cpu_usages"][b_pid]["cmdline"]) # process label
label.append(stats["cpu_usages"][b_pid]["cmdline"]) # cmdline label
self.add_metric(bandwidth_usages, label, b_stats, "bandwidth")
except KeyError:
pass
except KeyError:
pass
yield bandwidth_usages
# detector stats
try:
yield GaugeMetricFamily(
"frigate_detection_total_fps",
"Sum of detection_fps across all cameras and detectors.",
value=stats["detection_fps"],
)
except KeyError:
pass
detector_inference_speed = GaugeMetricFamily(
"frigate_detector_inference_speed_seconds",
"Time spent running object detection in seconds.",
labels=["name"],
)
detector_detection_start = GaugeMetricFamily(
"frigate_detection_start",
"Detector start time (unix timestamp)",
labels=["name"],
)
try:
for detector_name, detector_stats in stats["detectors"].items():
self.add_metric(
detector_inference_speed,
[detector_name],
detector_stats,
"inference_speed",
0.001,
) # ms to seconds
self.add_metric(
detector_detection_start,
[detector_name],
detector_stats,
"detection_start",
)
self.add_metric_process(
cpu_usages,
stats["detectors"],
detector_name,
"pid",
"detect",
"cpu",
"Detector",
)
self.add_metric_process(
mem_usages,
stats["detectors"],
detector_name,
"pid",
"detect",
"mem",
"Detector",
)
except KeyError:
pass
yield detector_inference_speed
yield detector_detection_start
# detector process stats
try:
for detector_name, detector_stats in stats["detectors"].items():
p_pid = str(detector_stats["pid"])
label = [p_pid] # pid label
try:
# new frigate:0.13.0-beta3 stat 'cmdline'
label.append(detector_name) # name label
label.append(detector_name) # process label
label.append("detectors") # type label
label.append(self.process_stats[p_pid]["cmdline"]) # cmdline label
self.add_metric(cpu_usages, label, self.process_stats[p_pid], "cpu")
self.add_metric(mem_usages, label, self.process_stats[p_pid], "mem")
del self.process_stats[p_pid]
except KeyError:
pass
except KeyError:
pass
# other named process stats
try:
for process_name, process_stats in stats["processes"].items():
p_pid = str(process_stats["pid"])
label = [p_pid] # pid label
try:
# new frigate:0.13.0-beta3 stat 'cmdline'
label.append(process_name) # name label
label.append(process_name) # process label
label.append(process_name) # type label
label.append(self.process_stats[p_pid]["cmdline"]) # cmdline label
self.add_metric(cpu_usages, label, self.process_stats[p_pid], "cpu")
self.add_metric(mem_usages, label, self.process_stats[p_pid], "mem")
del self.process_stats[p_pid]
except KeyError:
pass
except KeyError:
pass
# remaining process stats
try:
for process_id, pid_stats in self.process_stats.items():
label = [process_id] # pid label
try:
# new frigate:0.13.0-beta3 stat 'cmdline'
label.append(pid_stats["cmdline"]) # name label
label.append(pid_stats["cmdline"]) # process label
label.append("Other") # type label
label.append(pid_stats["cmdline"]) # cmdline label
except KeyError:
pass
self.add_metric(cpu_usages, label, pid_stats, "cpu")
self.add_metric(mem_usages, label, pid_stats, "mem")
except KeyError:
pass
yield cpu_usages
yield mem_usages
# gpu stats
gpu_usages = GaugeMetricFamily(
"frigate_gpu_usage_percent", "GPU utilisation %", labels=["gpu_name"]
)
gpu_mem_usages = GaugeMetricFamily(
"frigate_gpu_mem_usage_percent", "GPU memory usage %", labels=["gpu_name"]
)
try:
for gpu_name, gpu_stats in stats["gpu_usages"].items():
self.add_metric(gpu_usages, [gpu_name], gpu_stats, "gpu")
self.add_metric(gpu_mem_usages, [gpu_name], gpu_stats, "mem")
except KeyError:
pass
yield gpu_usages
yield gpu_mem_usages
# service stats
uptime_seconds = GaugeMetricFamily(
"frigate_service_uptime_seconds", "Uptime seconds"
)
last_updated_timestamp = GaugeMetricFamily(
"frigate_service_last_updated_timestamp",
"Stats recorded time (unix timestamp)",
)
try:
service_stats = stats["service"]
self.add_metric(uptime_seconds, [""], service_stats, "uptime")
self.add_metric(last_updated_timestamp, [""], service_stats, "last_updated")
info = {
"latest_version": stats["service"]["latest_version"],
"version": stats["service"]["version"],
}
yield InfoMetricFamily(
"frigate_service", "Frigate version info", value=info
)
except KeyError:
pass
yield uptime_seconds
yield last_updated_timestamp
temperatures = GaugeMetricFamily(
"frigate_device_temperature", "Device Temperature", labels=["device"]
)
try:
for device_name in stats["service"]["temperatures"]:
self.add_metric(
temperatures,
[device_name],
stats["service"]["temperatures"],
device_name,
)
except KeyError:
pass
yield temperatures
storage_free = GaugeMetricFamily(
"frigate_storage_free_bytes", "Storage free bytes", labels=["storage"]
)
storage_mount_type = InfoMetricFamily(
"frigate_storage_mount_type",
"Storage mount type",
labels=["mount_type", "storage"],
)
storage_total = GaugeMetricFamily(
"frigate_storage_total_bytes", "Storage total bytes", labels=["storage"]
)
storage_used = GaugeMetricFamily(
"frigate_storage_used_bytes", "Storage used bytes", labels=["storage"]
)
try:
for storage_path, storage_stats in stats["service"]["storage"].items():
self.add_metric(
storage_free, [storage_path], storage_stats, "free", 1e6
) # MB to bytes
self.add_metric(
storage_total, [storage_path], storage_stats, "total", 1e6
) # MB to bytes
self.add_metric(
storage_used, [storage_path], storage_stats, "used", 1e6
) # MB to bytes
storage_mount_type.add_metric(
storage_path,
{
"mount_type": storage_stats["mount_type"],
"storage": storage_path,
},
)
except KeyError:
pass
yield storage_free
yield storage_mount_type
yield storage_total
yield storage_used
# count events
events = []
if len(events) > 0:
# events[0] is newest event, last element is oldest, don't need to sort
if not self.previous_event_id:
# ignore all previous events on startup, prometheus might have already counted them
self.previous_event_id = events[0]["id"]
self.previous_event_start_time = int(events[0]["start_time"])
for event in events:
# break if event already counted
if event["id"] == self.previous_event_id:
break
# break if event starts before previous event
if event["start_time"] < self.previous_event_start_time:
break
# store counted events in a dict
try:
cam = self.all_events[event["camera"]]
try:
cam[event["label"]] += 1
except KeyError:
# create label dict if not exists
cam.update({event["label"]: 1})
except KeyError:
# create camera and label dict if not exists
self.all_events.update({event["camera"]: {event["label"]: 1}})
# don't recount events next time
self.previous_event_id = events[0]["id"]
self.previous_event_start_time = int(events[0]["start_time"])
camera_events = CounterMetricFamily(
"frigate_camera_events",
"Count of camera events since exporter started",
labels=["camera", "label"],
)
for camera, cam_dict in self.all_events.items():
for label, label_value in cam_dict.items():
camera_events.add_metric([camera, label], label_value)
yield camera_events
collector = CustomCollector(None)
REGISTRY.register(collector)
def update_metrics(stats):
"""Updates the Prometheus metrics with the given stats data."""
try:
collector.process_stats = stats # Directly assign the stats data
# Important: Since we are not fetching from URL, we need to manually call collect
for _ in collector.collect():
pass
except Exception as e:
print(f"Error updating Prometheus metrics: {str(e)}")
logging.error(f"Error updating metrics: {e}")
def get_metrics() -> tuple[str, str]:
"""Get Prometheus metrics in text format"""
return generate_latest(), CONTENT_TYPE_LATEST
def get_metrics():
"""Returns the Prometheus metrics in text format."""
content = generate_latest(REGISTRY) # Use generate_latest
return content, CONTENT_TYPE_LATEST

View File

@ -263,12 +263,13 @@ class NorfairTracker(ObjectTracker):
# Get the correct tracker for this object's label
tracker = self.get_tracker(obj["label"])
obj["score_history"] = [
p.data["score"]
for p in next(
(o for o in tracker.tracked_objects if o.global_id == track_id)
).past_detections
]
obj_match = next(
(o for o in tracker.tracked_objects if o.global_id == track_id), None
)
# if we don't have a match, we have a new object
obj["score_history"] = (
[p.data["score"] for p in obj_match.past_detections] if obj_match else []
)
self.tracked_objects[id] = obj
self.disappeared[id] = 0
self.positions[id] = {
@ -519,7 +520,11 @@ class NorfairTracker(ObjectTracker):
default_detections.extend(dets)
# Update default tracker with untracked detections
mode = "ptz" if self.ptz_metrics.autotracker_enabled.value else "static"
mode = (
"ptz"
if self.camera_config.onvif.autotracking.enabled_in_config
else "static"
)
tracked_objects = self.default_tracker[mode].update(
detections=default_detections, coord_transformations=coord_transformations
)

View File

@ -1,7 +1,8 @@
"""Object attribute."""
import base64
import logging
import math
import os
from collections import defaultdict
from statistics import median
from typing import Optional
@ -12,8 +13,10 @@ import numpy as np
from frigate.config import (
CameraConfig,
ModelConfig,
SnapshotsConfig,
UIConfig,
)
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.review.types import SeverityEnum
from frigate.util.image import (
area,
@ -66,6 +69,7 @@ class TrackedObject:
self.current_estimated_speed = 0
self.average_estimated_speed = 0
self.velocity_angle = 0
self.path_data = []
self.previous = self.to_dict()
@property
@ -148,6 +152,7 @@ class TrackedObject:
"attributes": obj_data["attributes"],
"current_estimated_speed": self.current_estimated_speed,
"velocity_angle": self.velocity_angle,
"path_data": self.path_data,
}
thumb_update = True
@ -300,11 +305,34 @@ class TrackedObject:
if self.obj_data["frame_time"] - self.previous["frame_time"] >= (1 / 3):
autotracker_update = True
# update path
width = self.camera_config.detect.width
height = self.camera_config.detect.height
bottom_center = (
round(obj_data["centroid"][0] / width, 4),
round(obj_data["box"][3] / height, 4),
)
# calculate a reasonable movement threshold (e.g., 5% of the frame diagonal)
threshold = 0.05 * math.sqrt(width**2 + height**2) / max(width, height)
if not self.path_data:
self.path_data.append((bottom_center, obj_data["frame_time"]))
elif (
math.dist(self.path_data[-1][0], bottom_center) >= threshold
or len(self.path_data) == 1
):
# check Euclidean distance before appending
self.path_data.append((bottom_center, obj_data["frame_time"]))
logger.debug(
f"Point tracking: {obj_data['id']}, {bottom_center}, {obj_data['frame_time']}"
)
self.obj_data.update(obj_data)
self.current_zones = current_zones
return (thumb_update, significant_change, autotracker_update)
def to_dict(self, include_thumbnail: bool = False):
def to_dict(self):
event = {
"id": self.obj_data["id"],
"camera": self.camera_config.name,
@ -336,11 +364,9 @@ class TrackedObject:
"current_estimated_speed": self.current_estimated_speed,
"average_estimated_speed": self.average_estimated_speed,
"velocity_angle": self.velocity_angle,
"path_data": self.path_data,
}
if include_thumbnail:
event["thumbnail"] = base64.b64encode(self.get_thumbnail()).decode("utf-8")
return event
def is_active(self):
@ -352,22 +378,16 @@ class TrackedObject:
> self.camera_config.detect.stationary.threshold
)
def get_thumbnail(self):
if (
self.thumbnail_data is None
or self.thumbnail_data["frame_time"] not in self.frame_cache
):
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
jpg_bytes = self.get_jpg_bytes(
timestamp=False, bounding_box=False, crop=True, height=175
def get_thumbnail(self, ext: str):
img_bytes = self.get_img_bytes(
ext, timestamp=False, bounding_box=False, crop=True, height=175
)
if jpg_bytes:
return jpg_bytes
if img_bytes:
return img_bytes
else:
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
return jpg.tobytes()
_, img = cv2.imencode(f".{ext}", np.zeros((175, 175, 3), np.uint8))
return img.tobytes()
def get_clean_png(self):
if self.thumbnail_data is None:
@ -390,8 +410,14 @@ class TrackedObject:
else:
return None
def get_jpg_bytes(
self, timestamp=False, bounding_box=False, crop=False, height=None, quality=70
def get_img_bytes(
self,
ext: str,
timestamp=False,
bounding_box=False,
crop=False,
height: int | None = None,
quality: int | None = None,
):
if self.thumbnail_data is None:
return None
@ -476,14 +502,69 @@ class TrackedObject:
position=self.camera_config.timestamp_style.position,
)
ret, jpg = cv2.imencode(
".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality]
)
quality_params = None
if ext == "jpg":
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality or 70]
elif ext == "webp":
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality or 60]
ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params)
if ret:
return jpg.tobytes()
else:
return None
def write_snapshot_to_disk(self) -> None:
snapshot_config: SnapshotsConfig = self.camera_config.snapshots
jpg_bytes = self.get_img_bytes(
ext="jpg",
timestamp=snapshot_config.timestamp,
bounding_box=snapshot_config.bounding_box,
crop=snapshot_config.crop,
height=snapshot_config.height,
quality=snapshot_config.quality,
)
if jpg_bytes is None:
logger.warning(f"Unable to save snapshot for {self.obj_data['id']}.")
else:
with open(
os.path.join(
CLIPS_DIR, f"{self.camera_config.name}-{self.obj_data['id']}.jpg"
),
"wb",
) as j:
j.write(jpg_bytes)
# write clean snapshot if enabled
if snapshot_config.clean_copy:
png_bytes = self.get_clean_png()
if png_bytes is None:
logger.warning(
f"Unable to save clean snapshot for {self.obj_data['id']}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"{self.camera_config.name}-{self.obj_data['id']}-clean.png",
),
"wb",
) as p:
p.write(png_bytes)
def write_thumbnail_to_disk(self) -> None:
directory = os.path.join(THUMB_DIR, self.camera_config.name)
if not os.path.exists(directory):
os.makedirs(directory)
thumb_bytes = self.get_thumbnail("webp")
with open(os.path.join(directory, f"{self.obj_data['id']}.webp"), "wb") as f:
f.write(thumb_bytes)
def zone_filtered(obj: TrackedObject, object_config):
object_name = obj.obj_data["label"]

51
frigate/util/path.py Normal file
View File

@ -0,0 +1,51 @@
"""Path utilities."""
import base64
import os
from pathlib import Path
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.models import Event
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
if event.thumbnail:
return base64.b64decode(event.thumbnail)
else:
try:
with open(
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
) as f:
return f.read()
except Exception:
return None
### Deletion
def delete_event_images(event: Event) -> bool:
return delete_event_snapshot(event) and delete_event_thumbnail(event)
def delete_event_snapshot(event: Event) -> bool:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
try:
media_path.unlink(missing_ok=True)
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
media_path.unlink(missing_ok=True)
return True
except OSError:
return False
def delete_event_thumbnail(event: Event) -> bool:
if event.thumbnail:
return True
else:
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
missing_ok=True
)
return True

View File

@ -0,0 +1,36 @@
"""Peewee migrations -- 028_optional_event_thumbnail.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import peewee as pw
from frigate.models import Event
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.drop_not_null(Event, "thumbnail")
def rollback(migrator, database, fake=False, **kwargs):
migrator.add_not_null(Event, "thumbnail")

View File

@ -11,6 +11,9 @@
},
"aliases": {
"components": "@/components",
"utils": "@/lib/utils"
"utils": "@/lib/utils",
"ui": "@/components/ui",
"lib": "@/lib",
"hooks": "@/hooks"
}
}
}

View File

@ -80,7 +80,7 @@ export default function SearchThumbnail({
: undefined
}
draggable={false}
src={`${apiHost}api/events/${searchResult.id}/thumbnail.jpg`}
src={`${apiHost}api/events/${searchResult.id}/thumbnail.webp`}
loading={isSafari ? "eager" : "lazy"}
onLoad={() => {
onImgLoad();

View File

@ -11,6 +11,9 @@ import {
ContextMenuContent,
ContextMenuItem,
ContextMenuSeparator,
ContextMenuSub,
ContextMenuSubContent,
ContextMenuSubTrigger,
ContextMenuTrigger,
} from "@/components/ui/context-menu";
import {
@ -24,12 +27,19 @@ import { VolumeSlider } from "@/components/ui/slider";
import { CameraStreamingDialog } from "../settings/CameraStreamingDialog";
import {
AllGroupsStreamingSettings,
FrigateConfig,
GroupStreamingSettings,
} from "@/types/frigateConfig";
import { useStreamingSettings } from "@/context/streaming-settings-provider";
import { IoIosWarning } from "react-icons/io";
import {
IoIosNotifications,
IoIosNotificationsOff,
IoIosWarning,
} from "react-icons/io";
import { cn } from "@/lib/utils";
import { useNavigate } from "react-router-dom";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { useNotifications, useNotificationSuspend } from "@/api/ws";
type LiveContextMenuProps = {
className?: string;
@ -48,6 +58,7 @@ type LiveContextMenuProps = {
statsState: boolean;
toggleStats: () => void;
resetPreferredLiveMode: () => void;
config?: FrigateConfig;
children?: ReactNode;
};
export default function LiveContextMenu({
@ -67,6 +78,7 @@ export default function LiveContextMenu({
statsState,
toggleStats,
resetPreferredLiveMode,
config,
children,
}: LiveContextMenuProps) {
const [showSettings, setShowSettings] = useState(false);
@ -185,6 +197,44 @@ export default function LiveContextMenu({
const navigate = useNavigate();
// notifications
const notificationsEnabledInConfig =
config?.cameras[camera].notifications.enabled_in_config;
const { payload: notificationState, send: sendNotification } =
useNotifications(camera);
const { payload: notificationSuspendUntil, send: sendNotificationSuspend } =
useNotificationSuspend(camera);
const [isSuspended, setIsSuspended] = useState<boolean>(false);
useEffect(() => {
if (notificationSuspendUntil) {
setIsSuspended(
notificationSuspendUntil !== "0" || notificationState === "OFF",
);
}
}, [notificationSuspendUntil, notificationState]);
const handleSuspend = (duration: string) => {
if (duration === "off") {
sendNotification("OFF");
} else {
sendNotificationSuspend(Number.parseInt(duration));
}
};
const formatSuspendedUntil = (timestamp: string) => {
if (timestamp === "0") return "Frigate restarts.";
return formatUnixTimestampToDateTime(Number.parseInt(timestamp), {
time_style: "medium",
date_style: "medium",
timezone: config?.ui.timezone,
strftime_fmt: `%b %d, ${config?.ui.time_format == "24hour" ? "%H:%M" : "%I:%M %p"}`,
});
};
return (
<div className={cn("w-full", className)}>
<ContextMenu key={camera} onOpenChange={handleOpenChange}>
@ -288,6 +338,115 @@ export default function LiveContextMenu({
</ContextMenuItem>
</>
)}
{notificationsEnabledInConfig && (
<>
<ContextMenuSeparator />
<ContextMenuSub>
<ContextMenuSubTrigger>
<div className="flex items-center gap-2">
<span>Notifications</span>
</div>
</ContextMenuSubTrigger>
<ContextMenuSubContent>
<div className="flex flex-col gap-0.5 px-2 py-1.5 text-sm font-medium">
<div className="flex w-full items-center gap-1">
{notificationState === "ON" ? (
<>
{isSuspended ? (
<>
<IoIosNotificationsOff className="size-5 text-muted-foreground" />
<span>Suspended</span>
</>
) : (
<>
<IoIosNotifications className="size-5 text-muted-foreground" />
<span>Enabled</span>
</>
)}
</>
) : (
<>
<IoIosNotificationsOff className="size-5 text-danger" />
<span>Disabled</span>
</>
)}
</div>
{isSuspended && (
<span className="text-xs text-primary-variant">
Until {formatSuspendedUntil(notificationSuspendUntil)}
</span>
)}
</div>
{isSuspended ? (
<>
<ContextMenuSeparator />
<ContextMenuItem
onClick={() => {
sendNotification("ON");
sendNotificationSuspend(0);
}}
>
<div className="flex w-full flex-col gap-2">
{notificationState === "ON" ? (
<span>Unsuspend</span>
) : (
<span>Enable</span>
)}
</div>
</ContextMenuItem>
</>
) : (
notificationState === "ON" && (
<>
<ContextMenuSeparator />
<div className="px-2 py-1.5">
<p className="mb-2 text-sm font-medium text-muted-foreground">
Suspend for:
</p>
<div className="space-y-1">
<ContextMenuItem onClick={() => handleSuspend("5")}>
5 minutes
</ContextMenuItem>
<ContextMenuItem
onClick={() => handleSuspend("10")}
>
10 minutes
</ContextMenuItem>
<ContextMenuItem
onClick={() => handleSuspend("30")}
>
30 minutes
</ContextMenuItem>
<ContextMenuItem
onClick={() => handleSuspend("60")}
>
1 hour
</ContextMenuItem>
<ContextMenuItem
onClick={() => handleSuspend("840")}
>
12 hours
</ContextMenuItem>
<ContextMenuItem
onClick={() => handleSuspend("1440")}
>
24 hours
</ContextMenuItem>
<ContextMenuItem
onClick={() => handleSuspend("off")}
>
Until restart
</ContextMenuItem>
</div>
</div>
</>
)
)}
</ContextMenuSubContent>
</ContextMenuSub>
</>
)}
</ContextMenuContent>
</ContextMenu>

View File

@ -112,6 +112,12 @@ export default function ExportDialog({
});
}, [camera, name, range, setRange, setName, setMode]);
const handleCancel = useCallback(() => {
setName("");
setMode("none");
setRange(undefined);
}, [setMode, setRange]);
const Overlay = isDesktop ? Dialog : Drawer;
const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
const Content = isDesktop ? DialogContent : DrawerContent;
@ -129,7 +135,7 @@ export default function ExportDialog({
show={mode == "timeline"}
onPreview={() => setShowPreview(true)}
onSave={() => onStartExport()}
onCancel={() => setMode("none")}
onCancel={handleCancel}
/>
<Overlay
open={mode == "select"}
@ -176,7 +182,7 @@ export default function ExportDialog({
setName={setName}
setRange={setRange}
setMode={setMode}
onCancel={() => setMode("none")}
onCancel={handleCancel}
/>
</Content>
</Overlay>

View File

@ -52,6 +52,8 @@ import {
ContextMenuTrigger,
} from "@/components/ui/context-menu";
import { useNavigate } from "react-router-dom";
import { ObjectPath } from "./ObjectPath";
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
type ObjectLifecycleProps = {
className?: string;
@ -108,6 +110,17 @@ export default function ObjectLifecycle({
[config, event],
);
const getObjectColor = useCallback(
(label: string) => {
const objectColor = config?.model?.colormap[label];
if (objectColor) {
const reversed = [...objectColor].reverse();
return reversed;
}
},
[config],
);
const getZonePolygon = useCallback(
(zoneName: string) => {
if (!imgRef.current || !config) {
@ -120,7 +133,7 @@ export default function ObjectLifecycle({
return zonePoints
.split(",")
.map(parseFloat)
.map(Number.parseFloat)
.reduce((acc, value, index) => {
const isXCoordinate = index % 2 === 0;
const coordinate = isXCoordinate
@ -158,6 +171,47 @@ export default function ObjectLifecycle({
);
}, [config, event.camera]);
const savedPathPoints = useMemo(() => {
return (
event.data.path_data?.map(([coords, timestamp]: [number[], number]) => ({
x: coords[0],
y: coords[1],
timestamp,
lifecycle_item: undefined,
})) || []
);
}, [event.data.path_data]);
const eventSequencePoints = useMemo(() => {
return (
eventSequence
?.filter((event) => event.data.box !== undefined)
.map((event) => {
const [left, top, width, height] = event.data.box!;
return {
x: left + width / 2, // Center x-coordinate
y: top + height, // Bottom y-coordinate
timestamp: event.timestamp,
lifecycle_item: event,
};
}) || []
);
}, [eventSequence]);
// final object path with timeline points included
const pathPoints = useMemo(() => {
// don't display a path if we don't have any saved path points
if (
savedPathPoints.length === 0 ||
config?.cameras[event.camera]?.onvif.autotracking.enabled_in_config
)
return [];
return [...savedPathPoints, ...eventSequencePoints].sort(
(a, b) => a.timestamp - b.timestamp,
);
}, [savedPathPoints, eventSequencePoints, config, event]);
const [timeIndex, setTimeIndex] = useState(0);
const handleSetBox = useCallback(
@ -171,12 +225,13 @@ export default function ObjectLifecycle({
top: `${box[1] * imgRect.height}px`,
width: `${box[2] * imgRect.width}px`,
height: `${box[3] * imgRect.height}px`,
borderColor: `rgb(${getObjectColor(event.label)?.join(",")})`,
};
setBoxStyle(style);
}
},
[imgRef],
[imgRef, event, getObjectColor],
);
// image
@ -254,6 +309,21 @@ export default function ObjectLifecycle({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [mainApi, thumbnailApi]);
const handlePathPointClick = useCallback(
(index: number) => {
if (!mainApi || !thumbnailApi || !eventSequence) return;
const sequenceIndex = eventSequence.findIndex(
(item) => item.timestamp === pathPoints[index].timestamp,
);
if (sequenceIndex !== -1) {
mainApi.scrollTo(sequenceIndex);
thumbnailApi.scrollTo(sequenceIndex);
setCurrent(sequenceIndex);
}
},
[mainApi, thumbnailApi, eventSequence, pathPoints],
);
if (!event.id || !eventSequence || !config || !timeIndex) {
return <ActivityIndicator />;
}
@ -325,6 +395,8 @@ export default function ObjectLifecycle({
/>
{showZones &&
imgRef.current?.width &&
imgRef.current?.height &&
lifecycleZones?.map((zone) => (
<div
className="absolute inset-0 flex items-center justify-center"
@ -355,13 +427,36 @@ export default function ObjectLifecycle({
))}
{boxStyle && (
<div
className="absolute border-2 border-red-600"
style={boxStyle}
>
<div className="absolute border-2" style={boxStyle}>
<div className="absolute bottom-[-3px] left-1/2 h-[5px] w-[5px] -translate-x-1/2 transform bg-yellow-500" />
</div>
)}
{imgRef.current?.width &&
imgRef.current?.height &&
pathPoints &&
pathPoints.length > 0 && (
<div
className="absolute inset-0 flex items-center justify-center"
style={{
width: imgRef.current?.clientWidth,
height: imgRef.current?.clientHeight,
}}
key="path"
>
<svg
viewBox={`0 0 ${imgRef.current?.width} ${imgRef.current?.height}`}
className="absolute inset-0"
>
<ObjectPath
positions={pathPoints}
color={getObjectColor(event.label)}
width={2}
imgRef={imgRef}
onPointClick={handlePathPointClick}
/>
</svg>
</div>
)}
</ContextMenuTrigger>
<ContextMenuContent>
<ContextMenuItem>
@ -412,6 +507,11 @@ export default function ObjectLifecycle({
{current + 1} of {eventSequence.length}
</div>
</div>
{config?.cameras[event.camera]?.onvif.autotracking.enabled_in_config && (
<div className="-mt-2 mb-2 text-sm text-danger">
Bounding box positions will be inaccurate for autotracking cameras.
</div>
)}
{showControls && (
<AnnotationSettingsPane
event={event}
@ -655,47 +755,3 @@ export function LifecycleIcon({
return null;
}
}
function getLifecycleItemDescription(lifecycleItem: ObjectLifecycleSequence) {
const label = (
(Array.isArray(lifecycleItem.data.sub_label)
? lifecycleItem.data.sub_label[0]
: lifecycleItem.data.sub_label) || lifecycleItem.data.label
).replaceAll("_", " ");
switch (lifecycleItem.class_type) {
case "visible":
return `${label} detected`;
case "entered_zone":
return `${label} entered ${lifecycleItem.data.zones
.join(" and ")
.replaceAll("_", " ")}`;
case "active":
return `${label} became active`;
case "stationary":
return `${label} became stationary`;
case "attribute": {
let title = "";
if (
lifecycleItem.data.attribute == "face" ||
lifecycleItem.data.attribute == "license_plate"
) {
title = `${lifecycleItem.data.attribute.replaceAll(
"_",
" ",
)} detected for ${label}`;
} else {
title = `${
lifecycleItem.data.label
} recognized as ${lifecycleItem.data.attribute.replaceAll("_", " ")}`;
}
return title;
}
case "gone":
return `${label} left`;
case "heard":
return `${label} heard`;
case "external":
return `${label} detected`;
}
}

View File

@ -0,0 +1,113 @@
import { useCallback } from "react";
import { LifecycleClassType, Position } from "@/types/timeline";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { TooltipPortal } from "@radix-ui/react-tooltip";
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
type ObjectPathProps = {
positions?: Position[];
color?: number[];
width?: number;
pointRadius?: number;
imgRef: React.RefObject<HTMLImageElement>;
onPointClick?: (index: number) => void;
};
const typeColorMap: Partial<
Record<LifecycleClassType, [number, number, number]>
> = {
[LifecycleClassType.VISIBLE]: [0, 255, 0], // Green
[LifecycleClassType.GONE]: [255, 0, 0], // Red
[LifecycleClassType.ENTERED_ZONE]: [255, 165, 0], // Orange
[LifecycleClassType.ATTRIBUTE]: [128, 0, 128], // Purple
[LifecycleClassType.ACTIVE]: [255, 255, 0], // Yellow
[LifecycleClassType.STATIONARY]: [128, 128, 128], // Gray
[LifecycleClassType.HEARD]: [0, 255, 255], // Cyan
[LifecycleClassType.EXTERNAL]: [165, 42, 42], // Brown
};
export function ObjectPath({
positions,
color = [0, 0, 255],
width = 2,
pointRadius = 4,
imgRef,
onPointClick,
}: ObjectPathProps) {
const getAbsolutePositions = useCallback(() => {
if (!imgRef.current || !positions) return [];
const imgRect = imgRef.current.getBoundingClientRect();
return positions.map((pos) => ({
x: pos.x * imgRect.width,
y: pos.y * imgRect.height,
timestamp: pos.timestamp,
lifecycle_item: pos.lifecycle_item,
}));
}, [positions, imgRef]);
const generateStraightPath = useCallback((points: Position[]) => {
if (!points || points.length < 2) return "";
let path = `M ${points[0].x} ${points[0].y}`;
for (let i = 1; i < points.length; i++) {
path += ` L ${points[i].x} ${points[i].y}`;
}
return path;
}, []);
const getPointColor = (baseColor: number[], type?: LifecycleClassType) => {
if (type) {
const typeColor = typeColorMap[type];
if (typeColor) {
return `rgb(${typeColor.join(",")})`;
}
}
// normal path point
return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`;
};
if (!imgRef.current) return null;
const absolutePositions = getAbsolutePositions();
const lineColor = `rgb(${color.join(",")})`;
return (
<g>
<path
d={generateStraightPath(absolutePositions)}
fill="none"
stroke={lineColor}
strokeWidth={width}
strokeLinecap="round"
strokeLinejoin="round"
/>
{absolutePositions.map((pos, index) => (
<Tooltip key={`point-${index}`}>
<TooltipTrigger asChild>
<circle
cx={pos.x}
cy={pos.y}
r={pointRadius}
fill={getPointColor(color, pos.lifecycle_item?.class_type)}
stroke="white"
strokeWidth={width / 2}
onClick={() =>
pos.lifecycle_item && onPointClick && onPointClick(index)
}
style={{ cursor: pos.lifecycle_item ? "pointer" : "default" }}
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent side="top" className="capitalize">
{pos.lifecycle_item
? getLifecycleItemDescription(pos.lifecycle_item)
: "Tracked point"}
</TooltipContent>
</TooltipPortal>
</Tooltip>
))}
</g>
);
}

View File

@ -385,7 +385,7 @@ function EventItem({
src={
event.has_snapshot
? `${apiHost}api/events/${event.id}/snapshot.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.webp`
}
/>
{hovered && (
@ -400,7 +400,7 @@ function EventItem({
href={
event.has_snapshot
? `${apiHost}api/events/${event.id}/snapshot.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.webp`
}
>
<Chip className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500">

View File

@ -511,7 +511,7 @@ function ObjectDetailsTab({
: undefined
}
draggable={false}
src={`${apiHost}api/events/${search.id}/thumbnail.jpg`}
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
/>
{config?.semantic_search.enabled && search.data.type == "object" && (
<Button

View File

@ -0,0 +1,59 @@
import * as React from "react"
import { cva, type VariantProps } from "class-variance-authority"
import { cn } from "@/lib/utils"
const alertVariants = cva(
"relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground",
{
variants: {
variant: {
default: "bg-background text-foreground",
destructive:
"border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive",
},
},
defaultVariants: {
variant: "default",
},
}
)
const Alert = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement> & VariantProps<typeof alertVariants>
>(({ className, variant, ...props }, ref) => (
<div
ref={ref}
role="alert"
className={cn(alertVariants({ variant }), className)}
{...props}
/>
))
Alert.displayName = "Alert"
const AlertTitle = React.forwardRef<
HTMLParagraphElement,
React.HTMLAttributes<HTMLHeadingElement>
>(({ className, ...props }, ref) => (
<h5
ref={ref}
className={cn("mb-1 font-medium leading-none tracking-tight", className)}
{...props}
/>
))
AlertTitle.displayName = "AlertTitle"
const AlertDescription = React.forwardRef<
HTMLParagraphElement,
React.HTMLAttributes<HTMLParagraphElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("text-sm [&_p]:leading-relaxed", className)}
{...props}
/>
))
AlertDescription.displayName = "AlertDescription"
export { Alert, AlertTitle, AlertDescription }

View File

@ -327,7 +327,7 @@ function FaceAttempt({
.post(`/faces/reprocess`, { training_file: image })
.then((resp) => {
if (resp.status == 200) {
toast.success(`Successfully trained face.`, {
toast.success(`Successfully updated face score.`, {
position: "top-center",
});
onRefresh();
@ -335,11 +335,14 @@ function FaceAttempt({
})
.catch((error) => {
if (error.response?.data?.message) {
toast.error(`Failed to train: ${error.response.data.message}`, {
position: "top-center",
});
toast.error(
`Failed to update score: ${error.response.data.message}`,
{
position: "top-center",
},
);
} else {
toast.error(`Failed to train: ${error.message}`, {
toast.error(`Failed to update score: ${error.message}`, {
position: "top-center",
});
}
@ -419,7 +422,7 @@ function FaceAttempt({
onClick={() => onReprocess()}
/>
</TooltipTrigger>
<TooltipContent>Delete Face Attempt</TooltipContent>
<TooltipContent>Reprocess Face</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger>

View File

@ -1,353 +0,0 @@
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuLabel,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
} from "@/components/ui/alert-dialog";
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
import { Drawer, DrawerContent, DrawerTrigger } from "@/components/ui/drawer";
import { Button } from "@/components/ui/button";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import useOptimisticState from "@/hooks/use-optimistic-state";
import { isMobile } from "react-device-detect";
import { FaVideo } from "react-icons/fa";
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
import useSWR from "swr";
import FilterSwitch from "@/components/filter/FilterSwitch";
import { ZoneMaskFilterButton } from "@/components/filter/ZoneMaskFilter";
import { PolygonType } from "@/types/canvas";
import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
import scrollIntoView from "scroll-into-view-if-needed";
import CameraSettingsView from "@/views/settings/CameraSettingsView";
import ObjectSettingsView from "@/views/settings/ObjectSettingsView";
import MotionTunerView from "@/views/settings/MotionTunerView";
import MasksAndZonesView from "@/views/settings/MasksAndZonesView";
import AuthenticationView from "@/views/settings/AuthenticationView";
import NotificationView from "@/views/settings/NotificationsSettingsView";
import SearchSettingsView from "@/views/settings/SearchSettingsView";
import UiSettingsView from "@/views/settings/UiSettingsView";
import { useSearchEffect } from "@/hooks/use-overlay-state";
import { useSearchParams } from "react-router-dom";
const allSettingsViews = [
"UI settings",
"explore settings",
"camera settings",
"masks / zones",
"motion tuner",
"debug",
"users",
"notifications",
] as const;
type SettingsType = (typeof allSettingsViews)[number];
export default function Settings() {
const [page, setPage] = useState<SettingsType>("UI settings");
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
const tabsRef = useRef<HTMLDivElement | null>(null);
const { data: config } = useSWR<FrigateConfig>("config");
const [searchParams] = useSearchParams();
// available settings views
const settingsViews = useMemo(() => {
const views = [...allSettingsViews];
if (!("Notification" in window) || !window.isSecureContext) {
const index = views.indexOf("notifications");
views.splice(index, 1);
}
return views;
}, []);
// TODO: confirm leave page
const [unsavedChanges, setUnsavedChanges] = useState(false);
const [confirmationDialogOpen, setConfirmationDialogOpen] = useState(false);
const cameras = useMemo(() => {
if (!config) {
return [];
}
return Object.values(config.cameras)
.filter((conf) => conf.ui.dashboard && conf.enabled)
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
}, [config]);
const [selectedCamera, setSelectedCamera] = useState<string>("");
const [filterZoneMask, setFilterZoneMask] = useState<PolygonType[]>();
const handleDialog = useCallback(
(save: boolean) => {
if (unsavedChanges && save) {
// TODO
}
setConfirmationDialogOpen(false);
setUnsavedChanges(false);
},
[unsavedChanges],
);
useEffect(() => {
if (cameras.length > 0 && selectedCamera === "") {
setSelectedCamera(cameras[0].name);
}
}, [cameras, selectedCamera]);
useEffect(() => {
if (tabsRef.current) {
const element = tabsRef.current.querySelector(
`[data-nav-item="${pageToggle}"]`,
);
if (element instanceof HTMLElement) {
scrollIntoView(element, {
behavior: "smooth",
inline: "start",
});
}
}
}, [tabsRef, pageToggle]);
useSearchEffect("page", (page: string) => {
if (allSettingsViews.includes(page as SettingsType)) {
setPage(page as SettingsType);
}
// don't clear url params if we're creating a new object mask
return !searchParams.has("object_mask");
});
useSearchEffect("camera", (camera: string) => {
const cameraNames = cameras.map((c) => c.name);
if (cameraNames.includes(camera)) {
setSelectedCamera(camera);
}
// don't clear url params if we're creating a new object mask
return !searchParams.has("object_mask");
});
useEffect(() => {
document.title = "Settings - Frigate";
}, []);
return (
<div className="flex size-full flex-col p-2">
<div className="relative flex h-11 w-full items-center justify-between">
<ScrollArea className="w-full whitespace-nowrap">
<div ref={tabsRef} className="flex flex-row">
<ToggleGroup
className="*:rounded-md *:px-3 *:py-4"
type="single"
size="sm"
value={pageToggle}
onValueChange={(value: SettingsType) => {
if (value) {
setPageToggle(value);
}
}}
>
{Object.values(settingsViews).map((item) => (
<ToggleGroupItem
key={item}
className={`flex scroll-mx-10 items-center justify-between gap-2 ${page == "UI settings" ? "last:mr-20" : ""} ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
value={item}
data-nav-item={item}
aria-label={`Select ${item}`}
>
<div className="capitalize">{item}</div>
</ToggleGroupItem>
))}
</ToggleGroup>
<ScrollBar orientation="horizontal" className="h-0" />
</div>
</ScrollArea>
{(page == "debug" ||
page == "camera settings" ||
page == "masks / zones" ||
page == "motion tuner") && (
<div className="ml-2 flex flex-shrink-0 items-center gap-2">
{page == "masks / zones" && (
<ZoneMaskFilterButton
selectedZoneMask={filterZoneMask}
updateZoneMaskFilter={setFilterZoneMask}
/>
)}
<CameraSelectButton
allCameras={cameras}
selectedCamera={selectedCamera}
setSelectedCamera={setSelectedCamera}
/>
</div>
)}
</div>
<div className="mt-2 flex h-full w-full flex-col items-start md:h-dvh md:pb-24">
{page == "UI settings" && <UiSettingsView />}
{page == "explore settings" && (
<SearchSettingsView setUnsavedChanges={setUnsavedChanges} />
)}
{page == "debug" && (
<ObjectSettingsView selectedCamera={selectedCamera} />
)}
{page == "camera settings" && (
<CameraSettingsView
selectedCamera={selectedCamera}
setUnsavedChanges={setUnsavedChanges}
/>
)}
{page == "masks / zones" && (
<MasksAndZonesView
selectedCamera={selectedCamera}
selectedZoneMask={filterZoneMask}
setUnsavedChanges={setUnsavedChanges}
/>
)}
{page == "motion tuner" && (
<MotionTunerView
selectedCamera={selectedCamera}
setUnsavedChanges={setUnsavedChanges}
/>
)}
{page == "users" && <AuthenticationView />}
{page == "notifications" && (
<NotificationView setUnsavedChanges={setUnsavedChanges} />
)}
</div>
{confirmationDialogOpen && (
<AlertDialog
open={confirmationDialogOpen}
onOpenChange={() => setConfirmationDialogOpen(false)}
>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>You have unsaved changes.</AlertDialogTitle>
<AlertDialogDescription>
Do you want to save your changes before continuing?
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel onClick={() => handleDialog(false)}>
Cancel
</AlertDialogCancel>
<AlertDialogAction onClick={() => handleDialog(true)}>
Save
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
)}
</div>
);
}
type CameraSelectButtonProps = {
allCameras: CameraConfig[];
selectedCamera: string;
setSelectedCamera: React.Dispatch<React.SetStateAction<string>>;
};
function CameraSelectButton({
allCameras,
selectedCamera,
setSelectedCamera,
}: CameraSelectButtonProps) {
const [open, setOpen] = useState(false);
if (!allCameras.length) {
return;
}
const trigger = (
<Button
className="flex items-center gap-2 bg-selected capitalize hover:bg-selected"
aria-label="Select a camera"
size="sm"
>
<FaVideo className="text-background dark:text-primary" />
<div className="hidden text-background dark:text-primary md:block">
{selectedCamera == undefined
? "No Camera"
: selectedCamera.replaceAll("_", " ")}
</div>
</Button>
);
const content = (
<>
{isMobile && (
<>
<DropdownMenuLabel className="flex justify-center">
Camera
</DropdownMenuLabel>
<DropdownMenuSeparator />
</>
)}
<div className="scrollbar-container mb-5 h-auto max-h-[80dvh] overflow-y-auto overflow-x-hidden p-4 md:mb-1">
<div className="flex flex-col gap-2.5">
{allCameras.map((item) => (
<FilterSwitch
key={item.name}
isChecked={item.name === selectedCamera}
label={item.name.replaceAll("_", " ")}
onCheckedChange={(isChecked) => {
if (isChecked) {
setSelectedCamera(item.name);
setOpen(false);
}
}}
/>
))}
</div>
</div>
</>
);
if (isMobile) {
return (
<Drawer
open={open}
onOpenChange={(open: boolean) => {
if (!open) {
setSelectedCamera(selectedCamera);
}
setOpen(open);
}}
>
<DrawerTrigger asChild>{trigger}</DrawerTrigger>
<DrawerContent className="max-h-[75dvh] overflow-hidden">
{content}
</DrawerContent>
</Drawer>
);
}
return (
<DropdownMenu
modal={false}
open={open}
onOpenChange={(open: boolean) => {
if (!open) {
setSelectedCamera(selectedCamera);
}
setOpen(open);
}}
>
<DropdownMenuTrigger asChild>{trigger}</DropdownMenuTrigger>
<DropdownMenuContent>{content}</DropdownMenuContent>
</DropdownMenu>
);
}

View File

@ -22,5 +22,6 @@ export interface Event {
area: number;
ratio: number;
type: "object" | "audio" | "manual";
path_data: [number[], number][];
};
}

View File

@ -1,3 +1,15 @@
export enum LifecycleClassType {
VISIBLE = "visible",
GONE = "gone",
ENTERED_ZONE = "entered_zone",
ATTRIBUTE = "attribute",
ACTIVE = "active",
STATIONARY = "stationary",
HEARD = "heard",
EXTERNAL = "external",
PATH_POINT = "path_point",
}
export type ObjectLifecycleSequence = {
camera: string;
timestamp: number;
@ -10,15 +22,7 @@ export type ObjectLifecycleSequence = {
attribute: string;
zones: string[];
};
class_type:
| "visible"
| "gone"
| "entered_zone"
| "attribute"
| "active"
| "stationary"
| "heard"
| "external";
class_type: LifecycleClassType;
source_id: string;
source: string;
};
@ -28,3 +32,10 @@ export type TimeRange = { before: number; after: number };
export type TimelineType = "timeline" | "events";
export type TimelineScrubMode = "auto" | "drag" | "hover" | "compat";
export type Position = {
x: number;
y: number;
timestamp: number;
lifecycle_item?: ObjectLifecycleSequence;
};

View File

@ -0,0 +1,47 @@
import { ObjectLifecycleSequence } from "@/types/timeline";
export function getLifecycleItemDescription(
lifecycleItem: ObjectLifecycleSequence,
) {
const label = (
(Array.isArray(lifecycleItem.data.sub_label)
? lifecycleItem.data.sub_label[0]
: lifecycleItem.data.sub_label) || lifecycleItem.data.label
).replaceAll("_", " ");
switch (lifecycleItem.class_type) {
case "visible":
return `${label} detected`;
case "entered_zone":
return `${label} entered ${lifecycleItem.data.zones
.join(" and ")
.replaceAll("_", " ")}`;
case "active":
return `${label} became active`;
case "stationary":
return `${label} became stationary`;
case "attribute": {
let title = "";
if (
lifecycleItem.data.attribute == "face" ||
lifecycleItem.data.attribute == "license_plate"
) {
title = `${lifecycleItem.data.attribute.replaceAll(
"_",
" ",
)} detected for ${label}`;
} else {
title = `${
lifecycleItem.data.label
} recognized as ${lifecycleItem.data.attribute.replaceAll("_", " ")}`;
}
return title;
}
case "gone":
return `${label} left`;
case "heard":
return `${label} heard`;
case "external":
return `${label} detected`;
}
}

View File

@ -262,7 +262,7 @@ function ExploreThumbnailImage({
}
loading={isSafari ? "eager" : "lazy"}
draggable={false}
src={`${apiHost}api/events/${event.id}/thumbnail.jpg`}
src={`${apiHost}api/events/${event.id}/thumbnail.webp`}
onClick={() => setSearchDetail(event)}
onLoad={onImgLoad}
alt={`${event.label} thumbnail`}

View File

@ -584,6 +584,7 @@ export default function DraggableGridLayout({
resetPreferredLiveMode={() =>
resetPreferredLiveMode(camera.name)
}
config={config}
>
<LivePlayer
key={camera.name}
@ -790,6 +791,7 @@ type GridLiveContextMenuProps = {
muteAll: () => void;
unmuteAll: () => void;
resetPreferredLiveMode: () => void;
config?: FrigateConfig;
};
const GridLiveContextMenu = React.forwardRef<
@ -819,6 +821,7 @@ const GridLiveContextMenu = React.forwardRef<
muteAll,
unmuteAll,
resetPreferredLiveMode,
config,
...props
},
ref,
@ -849,6 +852,7 @@ const GridLiveContextMenu = React.forwardRef<
muteAll={muteAll}
unmuteAll={unmuteAll}
resetPreferredLiveMode={resetPreferredLiveMode}
config={config}
>
{children}
</LiveContextMenu>

View File

@ -507,6 +507,7 @@ export default function LiveDashboardView({
resetPreferredLiveMode={() =>
resetPreferredLiveMode(camera.name)
}
config={config}
>
<LivePlayer
cameraRef={cameraRef}

View File

@ -20,7 +20,7 @@ import { zodResolver } from "@hookform/resolvers/zod";
import axios from "axios";
import { useCallback, useContext, useEffect, useMemo, useState } from "react";
import { useForm } from "react-hook-form";
import { LuCheck, LuExternalLink, LuX } from "react-icons/lu";
import { LuAlertCircle, LuCheck, LuExternalLink, LuX } from "react-icons/lu";
import { Link } from "react-router-dom";
import { toast } from "sonner";
import useSWR from "swr";
@ -39,6 +39,7 @@ import {
} from "@/components/ui/select";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import FilterSwitch from "@/components/filter/FilterSwitch";
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert";
const NOTIFICATION_SERVICE_WORKER = "notifications-worker.js";
@ -161,6 +162,9 @@ export default function NotificationView({
useState<ServiceWorkerRegistration | null>();
useEffect(() => {
if (!("Notification" in window) || !window.isSecureContext) {
return;
}
navigator.serviceWorker
.getRegistration(NOTIFICATION_SERVICE_WORKER)
.then((worker) => {
@ -279,6 +283,60 @@ export default function NotificationView({
saveToConfig(values as NotificationSettingsValueType);
}
if (!("Notification" in window) || !window.isSecureContext) {
return (
<div className="scrollbar-container order-last mb-10 mt-2 flex h-full w-full flex-col overflow-y-auto rounded-lg border-[1px] border-secondary-foreground bg-background_alt p-2 md:order-none md:mb-0 md:mr-2 md:mt-0">
<div className="grid w-full grid-cols-1 gap-4 md:grid-cols-2">
<div className="col-span-1">
<Heading as="h3" className="my-2">
Notification Settings
</Heading>
<div className="max-w-6xl">
<div className="mb-5 mt-2 flex max-w-5xl flex-col gap-2 text-sm text-primary-variant">
<p>
Frigate can natively send push notifications to your device
when it is running in the browser or installed as a PWA.
</p>
<div className="flex items-center text-primary">
<Link
to="https://docs.frigate.video/configuration/notifications"
target="_blank"
rel="noopener noreferrer"
className="inline"
>
Read the Documentation{" "}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
</div>
<Alert variant="destructive">
<LuAlertCircle className="size-5" />
<AlertTitle>Notifications Unavailable</AlertTitle>
<AlertDescription>
Web push notifications require a secure context (
<code>https://...</code>). This is a browser limitation. Access
Frigate securely to use notifications.
<div className="mt-3 flex items-center">
<Link
to="https://docs.frigate.video/configuration/authentication"
target="_blank"
rel="noopener noreferrer"
className="inline"
>
Read the Documentation{" "}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</AlertDescription>
</Alert>
</div>
</div>
</div>
);
}
return (
<>
<div className="flex size-full flex-col md:flex-row">