mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-07 22:05:44 +03:00
Merge remote-tracking branch 'upstream/dev' into Synaptics-SL1680
This commit is contained in:
commit
4fb5a22817
5
.github/DISCUSSION_TEMPLATE/report-a-bug.yml
vendored
5
.github/DISCUSSION_TEMPLATE/report-a-bug.yml
vendored
@ -6,7 +6,7 @@ body:
|
|||||||
value: |
|
value: |
|
||||||
Use this form to submit a reproducible bug in Frigate or Frigate's UI.
|
Use this form to submit a reproducible bug in Frigate or Frigate's UI.
|
||||||
|
|
||||||
Before submitting your bug report, please [search the discussions][discussions], look at recent open and closed [pull requests][prs], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your bug has already been fixed by the developers or reported by the community.
|
Before submitting your bug report, please ask the AI with the "Ask AI" button on the [official documentation site][ai] about your issue, [search the discussions][discussions], look at recent open and closed [pull requests][prs], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your bug has already been fixed by the developers or reported by the community.
|
||||||
|
|
||||||
**If you are unsure if your issue is actually a bug or not, please submit a support request first.**
|
**If you are unsure if your issue is actually a bug or not, please submit a support request first.**
|
||||||
|
|
||||||
@ -14,6 +14,7 @@ body:
|
|||||||
[prs]: https://www.github.com/blakeblackshear/frigate/pulls
|
[prs]: https://www.github.com/blakeblackshear/frigate/pulls
|
||||||
[docs]: https://docs.frigate.video
|
[docs]: https://docs.frigate.video
|
||||||
[faq]: https://github.com/blakeblackshear/frigate/discussions/12724
|
[faq]: https://github.com/blakeblackshear/frigate/discussions/12724
|
||||||
|
[ai]: https://docs.frigate.video
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Checklist
|
label: Checklist
|
||||||
@ -26,6 +27,8 @@ body:
|
|||||||
- label: I have tried a different browser to see if it is related to my browser.
|
- label: I have tried a different browser to see if it is related to my browser.
|
||||||
required: true
|
required: true
|
||||||
- label: I have tried reproducing the issue in [incognito mode](https://www.computerworld.com/article/1719851/how-to-go-incognito-in-chrome-firefox-safari-and-edge.html) to rule out problems with any third party extensions or plugins I have installed.
|
- label: I have tried reproducing the issue in [incognito mode](https://www.computerworld.com/article/1719851/how-to-go-incognito-in-chrome-firefox-safari-and-edge.html) to rule out problems with any third party extensions or plugins I have installed.
|
||||||
|
- label: I have asked the AI at https://docs.frigate.video about my issue.
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
40
.github/workflows/ci.yml
vendored
40
.github/workflows/ci.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
name: AMD64 Build
|
name: AMD64 Build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
@ -47,7 +47,7 @@ jobs:
|
|||||||
name: ARM Build
|
name: ARM Build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
@ -77,42 +77,12 @@ jobs:
|
|||||||
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
||||||
jetson_jp5_build:
|
|
||||||
if: false
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
name: Jetson Jetpack 5
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Set up QEMU and Buildx
|
|
||||||
id: setup
|
|
||||||
uses: ./.github/actions/setup
|
|
||||||
with:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Build and push TensorRT (Jetson, Jetpack 5)
|
|
||||||
env:
|
|
||||||
ARCH: arm64
|
|
||||||
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
|
||||||
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
|
||||||
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
|
||||||
uses: docker/bake-action@v6
|
|
||||||
with:
|
|
||||||
source: .
|
|
||||||
push: true
|
|
||||||
targets: tensorrt
|
|
||||||
files: docker/tensorrt/trt.hcl
|
|
||||||
set: |
|
|
||||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5
|
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
|
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
|
|
||||||
jetson_jp6_build:
|
jetson_jp6_build:
|
||||||
runs-on: ubuntu-22.04-arm
|
runs-on: ubuntu-22.04-arm
|
||||||
name: Jetson Jetpack 6
|
name: Jetson Jetpack 6
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
@ -143,7 +113,7 @@ jobs:
|
|||||||
- amd64_build
|
- amd64_build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
@ -185,7 +155,7 @@ jobs:
|
|||||||
- arm64_build
|
- arm64_build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
|
|||||||
12
.github/workflows/pull_request.yml
vendored
12
.github/workflows/pull_request.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
@ -40,7 +40,7 @@ jobs:
|
|||||||
name: Web - Lint
|
name: Web - Lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
@ -56,7 +56,7 @@ jobs:
|
|||||||
name: Web - Test
|
name: Web - Test
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
@ -76,7 +76,7 @@ jobs:
|
|||||||
name: Python Checks
|
name: Python Checks
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the repository
|
- name: Check out the repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||||
@ -99,7 +99,7 @@ jobs:
|
|||||||
name: Python Tests
|
name: Python Tests
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
@ -107,7 +107,7 @@ jobs:
|
|||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Build
|
- name: Build
|
||||||
run: make
|
run: make debug
|
||||||
- name: Run mypy
|
- name: Run mypy
|
||||||
run: docker run --rm --entrypoint=python3 frigate:latest -u -m mypy --config-file frigate/mypy.ini frigate
|
run: docker run --rm --entrypoint=python3 frigate:latest -u -m mypy --config-file frigate/mypy.ini frigate
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- id: lowercaseRepo
|
- id: lowercaseRepo
|
||||||
|
|||||||
8
Makefile
8
Makefile
@ -1,7 +1,7 @@
|
|||||||
default_target: local
|
default_target: local
|
||||||
|
|
||||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||||
VERSION = 0.16.0
|
VERSION = 0.17.0
|
||||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
BOARDS= #Initialized empty
|
BOARDS= #Initialized empty
|
||||||
@ -20,6 +20,12 @@ local: version
|
|||||||
--tag frigate:latest \
|
--tag frigate:latest \
|
||||||
--load
|
--load
|
||||||
|
|
||||||
|
debug: version
|
||||||
|
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||||
|
--build-arg DEBUG=true \
|
||||||
|
--tag frigate:latest \
|
||||||
|
--load
|
||||||
|
|
||||||
amd64:
|
amd64:
|
||||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||||
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
|
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
|
||||||
|
|||||||
@ -4,13 +4,13 @@ from statistics import mean
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import frigate.util as util
|
|
||||||
from frigate.config import DetectorTypeEnum
|
from frigate.config import DetectorTypeEnum
|
||||||
from frigate.object_detection.base import (
|
from frigate.object_detection.base import (
|
||||||
ObjectDetectProcess,
|
ObjectDetectProcess,
|
||||||
RemoteObjectDetector,
|
RemoteObjectDetector,
|
||||||
load_labels,
|
load_labels,
|
||||||
)
|
)
|
||||||
|
from frigate.util.process import FrigateProcess
|
||||||
|
|
||||||
my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0)
|
my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0)
|
||||||
labels = load_labels("/labelmap.txt")
|
labels = load_labels("/labelmap.txt")
|
||||||
@ -91,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess(
|
|||||||
)
|
)
|
||||||
|
|
||||||
for x in range(0, 10):
|
for x in range(0, 10):
|
||||||
camera_process = util.Process(
|
camera_process = FrigateProcess(
|
||||||
target=start, args=(x, 300, detection_queue, events[str(x)])
|
target=start, args=(x, 300, detection_queue, events[str(x)])
|
||||||
)
|
)
|
||||||
camera_process.daemon = True
|
camera_process.daemon = True
|
||||||
|
|||||||
@ -148,11 +148,12 @@ RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/inst
|
|||||||
FROM base AS wheels
|
FROM base AS wheels
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
ARG DEBUG=false
|
||||||
|
|
||||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||||
RUN apt-get -qq update \
|
RUN apt-get -qq update \
|
||||||
&& apt-get -qq install -y \
|
&& apt-get -qq install -y \
|
||||||
apt-transport-https wget \
|
apt-transport-https wget unzip \
|
||||||
&& apt-get -qq update \
|
&& apt-get -qq update \
|
||||||
&& apt-get -qq install -y \
|
&& apt-get -qq install -y \
|
||||||
python3.11 \
|
python3.11 \
|
||||||
@ -177,6 +178,8 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
|||||||
&& python3 get-pip.py "pip"
|
&& python3 get-pip.py "pip"
|
||||||
|
|
||||||
COPY docker/main/requirements.txt /requirements.txt
|
COPY docker/main/requirements.txt /requirements.txt
|
||||||
|
COPY docker/main/requirements-dev.txt /requirements-dev.txt
|
||||||
|
|
||||||
RUN pip3 install -r /requirements.txt
|
RUN pip3 install -r /requirements.txt
|
||||||
|
|
||||||
# Build pysqlite3 from source
|
# Build pysqlite3 from source
|
||||||
@ -184,7 +187,10 @@ COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
|||||||
RUN /build_pysqlite3.sh
|
RUN /build_pysqlite3.sh
|
||||||
|
|
||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
|
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
|
||||||
|
if [ "$DEBUG" = "true" ]; then \
|
||||||
|
pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Install HailoRT & Wheels
|
# Install HailoRT & Wheels
|
||||||
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
|
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||||
@ -206,6 +212,7 @@ COPY docker/main/rootfs/ /
|
|||||||
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
|
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
|
||||||
FROM slim-base AS deps
|
FROM slim-base AS deps
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
ARG BASE_IMAGE
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
# http://stackoverflow.com/questions/48162574/ddg#49462622
|
# http://stackoverflow.com/questions/48162574/ddg#49462622
|
||||||
@ -224,9 +231,15 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
|||||||
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
|
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
|
||||||
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
||||||
|
|
||||||
|
# Set NumPy to ignore getlimits warning
|
||||||
|
ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
|
||||||
|
|
||||||
# Set HailoRT to disable logging
|
# Set HailoRT to disable logging
|
||||||
ENV HAILORT_LOGGER_PATH=NONE
|
ENV HAILORT_LOGGER_PATH=NONE
|
||||||
|
|
||||||
|
# TensorFlow error only
|
||||||
|
ENV TF_CPP_MIN_LOG_LEVEL=3
|
||||||
|
|
||||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
|
|||||||
@ -2,18 +2,25 @@
|
|||||||
|
|
||||||
set -euxo pipefail
|
set -euxo pipefail
|
||||||
|
|
||||||
SQLITE3_VERSION="96c92aba00c8375bc32fafcdf12429c58bd8aabfcadab6683e35bbb9cdebf19e" # 3.46.0
|
SQLITE3_VERSION="3.46.1"
|
||||||
PYSQLITE3_VERSION="0.5.3"
|
PYSQLITE3_VERSION="0.5.3"
|
||||||
|
|
||||||
# Fetch the source code for the latest release of Sqlite.
|
# Fetch the pre-built sqlite amalgamation instead of building from source
|
||||||
if [[ ! -d "sqlite" ]]; then
|
if [[ ! -d "sqlite" ]]; then
|
||||||
wget https://www.sqlite.org/src/tarball/sqlite.tar.gz?r=${SQLITE3_VERSION} -O sqlite.tar.gz
|
mkdir sqlite
|
||||||
tar xzf sqlite.tar.gz
|
cd sqlite
|
||||||
cd sqlite/
|
|
||||||
LIBS="-lm" ./configure --disable-tcl --enable-tempstore=always
|
# Download the pre-built amalgamation from sqlite.org
|
||||||
make sqlite3.c
|
# For SQLite 3.46.1, the amalgamation version is 3460100
|
||||||
|
SQLITE_AMALGAMATION_VERSION="3460100"
|
||||||
|
|
||||||
|
wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip
|
||||||
|
unzip sqlite-amalgamation.zip
|
||||||
|
mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* .
|
||||||
|
rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}
|
||||||
|
rm sqlite-amalgamation.zip
|
||||||
|
|
||||||
cd ../
|
cd ../
|
||||||
rm sqlite.tar.gz
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Grab the pysqlite3 source code.
|
# Grab the pysqlite3 source code.
|
||||||
|
|||||||
@ -31,6 +31,18 @@ unset DEBIAN_FRONTEND
|
|||||||
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
|
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
|
||||||
rm /tmp/libedgetpu1-max.deb
|
rm /tmp/libedgetpu1-max.deb
|
||||||
|
|
||||||
|
# install mesa-teflon-delegate from bookworm-backports
|
||||||
|
# Only available for arm64 at the moment
|
||||||
|
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||||
|
if [[ "${BASE_IMAGE}" == *"nvcr.io/nvidia/tensorrt"* ]]; then
|
||||||
|
echo "Info: Skipping apt-get commands because BASE_IMAGE includes 'nvcr.io/nvidia/tensorrt' for arm64."
|
||||||
|
else
|
||||||
|
echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backbacks.list
|
||||||
|
apt-get -qq update
|
||||||
|
apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# ffmpeg -> amd64
|
# ffmpeg -> amd64
|
||||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
mkdir -p /usr/lib/ffmpeg/5.0
|
mkdir -p /usr/lib/ffmpeg/5.0
|
||||||
@ -71,11 +83,33 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
apt-get -qq update
|
apt-get -qq update
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||||
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \
|
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2
|
||||||
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04
|
|
||||||
|
apt-get -qq install -y ocl-icd-libopencl1
|
||||||
|
|
||||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
|
|
||||||
|
# install legacy and standard intel icd and level-zero-gpu
|
||||||
|
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
|
||||||
|
# needed core package
|
||||||
|
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb
|
||||||
|
dpkg -i libigdgmm12_22.5.5_amd64.deb
|
||||||
|
rm libigdgmm12_22.5.5_amd64.deb
|
||||||
|
|
||||||
|
# legacy packages
|
||||||
|
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-opencl-icd-legacy1_24.35.30872.22_amd64.deb
|
||||||
|
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-level-zero-gpu-legacy1_1.3.30872.22_amd64.deb
|
||||||
|
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-opencl_1.0.17537.20_amd64.deb
|
||||||
|
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-core_1.0.17537.20_amd64.deb
|
||||||
|
# standard packages
|
||||||
|
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb
|
||||||
|
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb
|
||||||
|
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb
|
||||||
|
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb
|
||||||
|
|
||||||
|
dpkg -i *.deb
|
||||||
|
rm *.deb
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||||
|
|||||||
@ -1 +1,4 @@
|
|||||||
ruff
|
ruff
|
||||||
|
|
||||||
|
# types
|
||||||
|
types-peewee == 3.17.*
|
||||||
|
|||||||
@ -1,19 +1,23 @@
|
|||||||
aiofiles == 24.1.*
|
aiofiles == 24.1.*
|
||||||
click == 8.1.*
|
click == 8.1.*
|
||||||
# FastAPI
|
# FastAPI
|
||||||
aiohttp == 3.11.3
|
aiohttp == 3.12.*
|
||||||
starlette == 0.41.2
|
starlette == 0.47.*
|
||||||
starlette-context == 0.3.6
|
starlette-context == 0.4.*
|
||||||
fastapi == 0.115.*
|
fastapi[standard-no-fastapi-cloud-cli] == 0.116.*
|
||||||
uvicorn == 0.30.*
|
uvicorn == 0.35.*
|
||||||
slowapi == 0.1.*
|
slowapi == 0.1.*
|
||||||
joserfc == 1.0.*
|
joserfc == 1.2.*
|
||||||
pathvalidate == 3.2.*
|
cryptography == 44.0.*
|
||||||
|
pathvalidate == 3.3.*
|
||||||
markupsafe == 3.0.*
|
markupsafe == 3.0.*
|
||||||
python-multipart == 0.0.12
|
python-multipart == 0.0.20
|
||||||
|
# Classification Model Training
|
||||||
|
tensorflow == 2.19.* ; platform_machine == 'aarch64'
|
||||||
|
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'
|
||||||
# General
|
# General
|
||||||
mypy == 1.6.1
|
mypy == 1.6.1
|
||||||
onvif-zeep-async == 3.1.*
|
onvif-zeep-async == 4.0.*
|
||||||
paho-mqtt == 2.1.*
|
paho-mqtt == 2.1.*
|
||||||
pandas == 2.2.*
|
pandas == 2.2.*
|
||||||
peewee == 3.17.*
|
peewee == 3.17.*
|
||||||
@ -27,7 +31,7 @@ ruamel.yaml == 0.18.*
|
|||||||
tzlocal == 5.2
|
tzlocal == 5.2
|
||||||
requests == 2.32.*
|
requests == 2.32.*
|
||||||
types-requests == 2.32.*
|
types-requests == 2.32.*
|
||||||
norfair == 2.2.*
|
norfair == 2.3.*
|
||||||
setproctitle == 1.3.*
|
setproctitle == 1.3.*
|
||||||
ws4py == 0.5.*
|
ws4py == 0.5.*
|
||||||
unidecode == 1.3.*
|
unidecode == 1.3.*
|
||||||
@ -38,14 +42,14 @@ opencv-python-headless == 4.11.0.*
|
|||||||
opencv-contrib-python == 4.11.0.*
|
opencv-contrib-python == 4.11.0.*
|
||||||
scipy == 1.14.*
|
scipy == 1.14.*
|
||||||
# OpenVino & ONNX
|
# OpenVino & ONNX
|
||||||
openvino == 2024.4.*
|
openvino == 2025.1.*
|
||||||
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64'
|
onnxruntime-openvino == 1.22.* ; platform_machine == 'x86_64'
|
||||||
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
|
onnxruntime == 1.22.* ; platform_machine == 'aarch64'
|
||||||
# Embeddings
|
# Embeddings
|
||||||
transformers == 4.45.*
|
transformers == 4.45.*
|
||||||
# Generative AI
|
# Generative AI
|
||||||
google-generativeai == 0.8.*
|
google-generativeai == 0.8.*
|
||||||
ollama == 0.3.*
|
ollama == 0.5.*
|
||||||
openai == 1.65.*
|
openai == 1.65.*
|
||||||
# push notifications
|
# push notifications
|
||||||
py-vapid == 1.9.*
|
py-vapid == 1.9.*
|
||||||
@ -71,3 +75,8 @@ prometheus-client == 0.21.*
|
|||||||
# TFLite
|
# TFLite
|
||||||
tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
|
tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
|
||||||
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'
|
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'
|
||||||
|
# audio transcription
|
||||||
|
sherpa-onnx==1.12.*
|
||||||
|
faster-whisper==1.1.*
|
||||||
|
librosa==0.11.*
|
||||||
|
soundfile==0.13.*
|
||||||
@ -10,7 +10,7 @@ echo "[INFO] Starting certsync..."
|
|||||||
|
|
||||||
lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
|
lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
|
||||||
|
|
||||||
tls_enabled=`python3 /usr/local/nginx/get_tls_settings.py | jq -r .enabled`
|
tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .enabled`
|
||||||
|
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
|
|||||||
@ -85,7 +85,7 @@ python3 /usr/local/nginx/get_base_path.py | \
|
|||||||
-out /usr/local/nginx/conf/base_path.conf
|
-out /usr/local/nginx/conf/base_path.conf
|
||||||
|
|
||||||
# build templates for optional TLS support
|
# build templates for optional TLS support
|
||||||
python3 /usr/local/nginx/get_tls_settings.py | \
|
python3 /usr/local/nginx/get_listen_settings.py | \
|
||||||
tempio -template /usr/local/nginx/templates/listen.gotmpl \
|
tempio -template /usr/local/nginx/templates/listen.gotmpl \
|
||||||
-out /usr/local/nginx/conf/listen.conf
|
-out /usr/local/nginx/conf/listen.conf
|
||||||
|
|
||||||
|
|||||||
@ -26,6 +26,10 @@ try:
|
|||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
config: dict[str, Any] = {}
|
config: dict[str, Any] = {}
|
||||||
|
|
||||||
tls_config: dict[str, Any] = config.get("tls", {"enabled": True})
|
tls_config: dict[str, any] = config.get("tls", {"enabled": True})
|
||||||
|
networking_config = config.get("networking", {})
|
||||||
|
ipv6_config = networking_config.get("ipv6", {"enabled": False})
|
||||||
|
|
||||||
print(json.dumps(tls_config))
|
output = {"tls": tls_config, "ipv6": ipv6_config}
|
||||||
|
|
||||||
|
print(json.dumps(output))
|
||||||
@ -1,33 +1,45 @@
|
|||||||
# intended for internal traffic, not protected by auth
|
|
||||||
|
# Internal (IPv4 always; IPv6 optional)
|
||||||
listen 5000;
|
listen 5000;
|
||||||
|
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
|
||||||
|
|
||||||
|
|
||||||
{{ if not .enabled }}
|
|
||||||
# intended for external traffic, protected by auth
|
# intended for external traffic, protected by auth
|
||||||
listen 8971;
|
{{ if .tls }}
|
||||||
|
{{ if .tls.enabled }}
|
||||||
|
# external HTTPS (IPv4 always; IPv6 optional)
|
||||||
|
listen 8971 ssl;
|
||||||
|
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
|
||||||
|
|
||||||
|
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
|
||||||
|
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
ssl_session_tickets off;
|
||||||
|
|
||||||
|
# modern configuration
|
||||||
|
ssl_protocols TLSv1.3;
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
|
||||||
|
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
|
||||||
|
# ACME challenge location
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
default_type "text/plain";
|
||||||
|
root /etc/letsencrypt/www;
|
||||||
|
}
|
||||||
|
{{ else }}
|
||||||
|
# external HTTP (IPv4 always; IPv6 optional)
|
||||||
|
listen 8971;
|
||||||
|
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
|
||||||
|
{{ end }}
|
||||||
{{ else }}
|
{{ else }}
|
||||||
# intended for external traffic, protected by auth
|
# (No tls section) default to HTTP (IPv4 always; IPv6 optional)
|
||||||
listen 8971 ssl;
|
listen 8971;
|
||||||
|
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
|
||||||
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
|
|
||||||
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
|
|
||||||
|
|
||||||
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
|
|
||||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
|
|
||||||
ssl_session_timeout 1d;
|
|
||||||
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
|
||||||
ssl_session_tickets off;
|
|
||||||
|
|
||||||
# modern configuration
|
|
||||||
ssl_protocols TLSv1.3;
|
|
||||||
ssl_prefer_server_ciphers off;
|
|
||||||
|
|
||||||
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
|
||||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
|
||||||
|
|
||||||
# ACME challenge location
|
|
||||||
location /.well-known/acme-challenge/ {
|
|
||||||
default_type "text/plain";
|
|
||||||
root /etc/letsencrypt/www;
|
|
||||||
}
|
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
|
|||||||
@ -11,8 +11,10 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
|||||||
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
||||||
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
||||||
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
|
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
|
||||||
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
|
||||||
|
&& pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
||||||
RUN rm -rf /rk-wheels/opencv_python-*
|
RUN rm -rf /rk-wheels/opencv_python-*
|
||||||
|
RUN rm -rf /rk-wheels/torch-*
|
||||||
|
|
||||||
FROM deps AS rk-frigate
|
FROM deps AS rk-frigate
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ARG ROCM=6.3.3
|
ARG ROCM=1
|
||||||
ARG AMDGPU=gfx900
|
ARG AMDGPU=gfx900
|
||||||
ARG HSA_OVERRIDE_GFX_VERSION
|
ARG HSA_OVERRIDE_GFX_VERSION
|
||||||
ARG HSA_OVERRIDE
|
ARG HSA_OVERRIDE
|
||||||
@ -13,12 +13,12 @@ FROM wget AS rocm
|
|||||||
ARG ROCM
|
ARG ROCM
|
||||||
ARG AMDGPU
|
ARG AMDGPU
|
||||||
|
|
||||||
RUN apt update && \
|
RUN apt update -qq && \
|
||||||
apt install -y wget gpg && \
|
apt install -y wget gpg && \
|
||||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/$ROCM/ubuntu/jammy/amdgpu-install_6.3.60303-1_all.deb && \
|
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/6.4.1/ubuntu/jammy/amdgpu-install_6.4.60401-1_all.deb && \
|
||||||
apt install -y ./rocm.deb && \
|
apt install -y ./rocm.deb && \
|
||||||
apt update && \
|
apt update && \
|
||||||
apt install -y rocm
|
apt install -qq -y rocm
|
||||||
|
|
||||||
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
|
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
|
||||||
RUN cd /opt/rocm-$ROCM/lib && \
|
RUN cd /opt/rocm-$ROCM/lib && \
|
||||||
@ -33,7 +33,10 @@ RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
|
|||||||
#######################################################################
|
#######################################################################
|
||||||
FROM deps AS deps-prelim
|
FROM deps AS deps-prelim
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y libnuma1
|
COPY docker/rocm/debian-backports.sources /etc/apt/sources.list.d/debian-backports.sources
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y libnuma1 && \
|
||||||
|
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers
|
||||||
|
|
||||||
WORKDIR /opt/frigate
|
WORKDIR /opt/frigate
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
@ -62,7 +65,6 @@ COPY --from=rocm /opt/rocm-dist/ /
|
|||||||
#######################################################################
|
#######################################################################
|
||||||
FROM deps-prelim AS rocm-prelim-hsa-override0
|
FROM deps-prelim AS rocm-prelim-hsa-override0
|
||||||
ENV HSA_ENABLE_SDMA=0
|
ENV HSA_ENABLE_SDMA=0
|
||||||
ENV MIGRAPHX_ENABLE_NHWC=1
|
|
||||||
ENV TF_ROCM_USE_IMMEDIATE_MODE=1
|
ENV TF_ROCM_USE_IMMEDIATE_MODE=1
|
||||||
|
|
||||||
COPY --from=rocm-dist / /
|
COPY --from=rocm-dist / /
|
||||||
|
|||||||
6
docker/rocm/debian-backports.sources
Normal file
6
docker/rocm/debian-backports.sources
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Types: deb
|
||||||
|
URIs: http://deb.debian.org/debian
|
||||||
|
Suites: bookworm-backports
|
||||||
|
Components: main
|
||||||
|
Enabled: yes
|
||||||
|
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
|
||||||
@ -1 +1 @@
|
|||||||
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.3.3/onnxruntime_rocm-1.20.1-cp311-cp311-linux_x86_64.whl
|
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.4.1/onnxruntime_rocm-1.21.1-cp311-cp311-linux_x86_64.whl
|
||||||
@ -2,7 +2,7 @@ variable "AMDGPU" {
|
|||||||
default = "gfx900"
|
default = "gfx900"
|
||||||
}
|
}
|
||||||
variable "ROCM" {
|
variable "ROCM" {
|
||||||
default = "6.3.3"
|
default = "6.4.1"
|
||||||
}
|
}
|
||||||
variable "HSA_OVERRIDE_GFX_VERSION" {
|
variable "HSA_OVERRIDE_GFX_VERSION" {
|
||||||
default = ""
|
default = ""
|
||||||
|
|||||||
@ -12,7 +12,10 @@ ARG PIP_BREAK_SYSTEM_PACKAGES
|
|||||||
# Install TensorRT wheels
|
# Install TensorRT wheels
|
||||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
RUN pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
|
|
||||||
|
# remove dependencies from the requirements that have type constraints
|
||||||
|
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
|
||||||
|
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
|
||||||
|
|
||||||
FROM deps AS frigate-tensorrt
|
FROM deps AS frigate-tensorrt
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
|
|||||||
@ -13,6 +13,7 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
|
|||||||
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
||||||
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
||||||
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
||||||
|
tensorflow==2.19.*; platform_machine == 'x86_64'
|
||||||
onnx==1.16.*; platform_machine == 'x86_64'
|
onnx==1.16.*; platform_machine == 'x86_64'
|
||||||
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
|
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
|
||||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||||
|
|||||||
@ -72,3 +72,77 @@ audio:
|
|||||||
- speech
|
- speech
|
||||||
- yell
|
- yell
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Audio Transcription
|
||||||
|
|
||||||
|
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI’s open-source Whisper models via `faster-whisper`. To enable transcription, it is recommended to only configure the features at the global level, and enable it at the individual camera level.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
audio_transcription:
|
||||||
|
enabled: False
|
||||||
|
device: ...
|
||||||
|
model_size: ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Enable audio transcription for select cameras at the camera level:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
cameras:
|
||||||
|
back_yard:
|
||||||
|
...
|
||||||
|
audio_transcription:
|
||||||
|
enabled: True
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
Audio detection must be enabled and configured as described above in order to use audio transcription features.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
The optional config parameters that can be set at the global level include:
|
||||||
|
|
||||||
|
- **`enabled`**: Enable or disable the audio transcription feature.
|
||||||
|
- Default: `False`
|
||||||
|
- It is recommended to only configure the features at the global level, and enable it at the individual camera level.
|
||||||
|
- **`device`**: Device to use to run transcription and translation models.
|
||||||
|
- Default: `CPU`
|
||||||
|
- This can be `CPU` or `GPU`. The `sherpa-onnx` models are lightweight and run on the CPU only. The `whisper` models can run on GPU but are only supported on CUDA hardware.
|
||||||
|
- **`model_size`**: The size of the model used for live transcription.
|
||||||
|
- Default: `small`
|
||||||
|
- This can be `small` or `large`. The `small` setting uses `sherpa-onnx` models that are fast, lightweight, and always run on the CPU but are not as accurate as the `whisper` model.
|
||||||
|
- The
|
||||||
|
- This config option applies to **live transcription only**. Recorded `speech` events will always use a different `whisper` model (and can be accelerated for CUDA hardware if available with `device: GPU`).
|
||||||
|
- **`language`**: Defines the language used by `whisper` to translate `speech` audio events (and live audio only if using the `large` model).
|
||||||
|
- Default: `en`
|
||||||
|
- You must use a valid [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10).
|
||||||
|
- Transcriptions for `speech` events are translated.
|
||||||
|
- Live audio is translated only if you are using the `large` model. The `small` `sherpa-onnx` model is English-only.
|
||||||
|
|
||||||
|
The only field that is valid at the camera level is `enabled`.
|
||||||
|
|
||||||
|
#### Live transcription
|
||||||
|
|
||||||
|
The single camera Live view in the Frigate UI supports live transcription of audio for streams defined with the `audio` role. Use the Enable/Disable Live Audio Transcription button/switch to toggle transcription processing. When speech is heard, the UI will display a black box over the top of the camera stream with text. The MQTT topic `frigate/<camera_name>/audio/transcription` will also be updated in real-time with transcribed text.
|
||||||
|
|
||||||
|
Results can be error-prone due to a number of factors, including:
|
||||||
|
|
||||||
|
- Poor quality camera microphone
|
||||||
|
- Distance of the audio source to the camera microphone
|
||||||
|
- Low audio bitrate setting in the camera
|
||||||
|
- Background noise
|
||||||
|
- Using the `small` model - it's fast, but not accurate for poor quality audio
|
||||||
|
|
||||||
|
For speech sources close to the camera with minimal background noise, use the `small` model.
|
||||||
|
|
||||||
|
If you have CUDA hardware, you can experiment with the `large` `whisper` model on GPU. Performance is not quite as fast as the `sherpa-onnx` `small` model, but live transcription is far more accurate. Using the `large` model with CPU will likely be too slow for real-time transcription.
|
||||||
|
|
||||||
|
#### Transcription and translation of `speech` audio events
|
||||||
|
|
||||||
|
Any `speech` events in Explore can be transcribed and/or translated through the Transcribe button in the Tracked Object Details pane.
|
||||||
|
|
||||||
|
In order to use transcription and translation for past events, you must enable audio detection and define `speech` as an audio type to listen for in your config. To have `speech` events translated into the language of your choice, set the `language` config parameter with the correct [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10).
|
||||||
|
|
||||||
|
The transcribed/translated speech will appear in the description box in the Tracked Object Details pane. If Semantic Search is enabled, embeddings are generated for the transcription text and are fully searchable using the description search type.
|
||||||
|
|
||||||
|
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
|
||||||
|
|||||||
@ -24,7 +24,7 @@ Frigate needs to first detect a `person` before it can detect and recognize a fa
|
|||||||
Frigate has support for two face recognition model types:
|
Frigate has support for two face recognition model types:
|
||||||
|
|
||||||
- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate.
|
- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate.
|
||||||
- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available.
|
- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU / NPU is available.
|
||||||
|
|
||||||
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
|
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ All of these features run locally on your system.
|
|||||||
|
|
||||||
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
|
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
|
||||||
|
|
||||||
The `large` model is optimized for accuracy, an integrated or discrete GPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
The `large` model is optimized for accuracy, an integrated or discrete GPU / NPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
@ -73,6 +73,9 @@ Fine-tune face recognition with these optional parameters at the global level of
|
|||||||
- Default: `100`.
|
- Default: `100`.
|
||||||
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
|
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
|
||||||
- Default: `True`.
|
- Default: `True`.
|
||||||
|
- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
|
||||||
|
- Default: `None`.
|
||||||
|
- Note: This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
|||||||
@ -9,35 +9,38 @@ Requests for a description are sent off automatically to your AI provider at the
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
Generative AI can be enabled for all cameras or only for specific cameras. If GenAI is disabled for a camera, you can still manually generate descriptions for events using the HTTP API. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||||
|
|
||||||
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
enabled: True
|
|
||||||
provider: gemini
|
provider: gemini
|
||||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||||
model: gemini-1.5-flash
|
model: gemini-1.5-flash
|
||||||
|
|
||||||
cameras:
|
cameras:
|
||||||
front_camera:
|
front_camera:
|
||||||
|
objects:
|
||||||
genai:
|
genai:
|
||||||
enabled: True # <- enable GenAI for your front camera
|
enabled: True # <- enable GenAI for your front camera
|
||||||
use_snapshot: True
|
use_snapshot: True
|
||||||
objects:
|
objects:
|
||||||
- person
|
- person
|
||||||
required_zones:
|
required_zones:
|
||||||
- steps
|
- steps
|
||||||
indoor_camera:
|
indoor_camera:
|
||||||
genai:
|
objects:
|
||||||
enabled: False # <- disable GenAI for your indoor camera
|
genai:
|
||||||
|
enabled: False # <- disable GenAI for your indoor camera
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||||
|
|
||||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||||
|
|
||||||
|
Generative AI can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
|
||||||
|
|
||||||
## Ollama
|
## Ollama
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
@ -66,7 +69,6 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
enabled: True
|
|
||||||
provider: ollama
|
provider: ollama
|
||||||
base_url: http://localhost:11434
|
base_url: http://localhost:11434
|
||||||
model: llava:7b
|
model: llava:7b
|
||||||
@ -93,12 +95,17 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
enabled: True
|
|
||||||
provider: gemini
|
provider: gemini
|
||||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||||
model: gemini-1.5-flash
|
model: gemini-1.5-flash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
To use a different Gemini-compatible API endpoint, set the `GEMINI_BASE_URL` environment variable to your provider's API URL.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## OpenAI
|
## OpenAI
|
||||||
|
|
||||||
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
||||||
@ -115,7 +122,6 @@ To start using OpenAI, you must first [create an API key](https://platform.opena
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
enabled: True
|
|
||||||
provider: openai
|
provider: openai
|
||||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||||
model: gpt-4o
|
model: gpt-4o
|
||||||
@ -143,7 +149,6 @@ To start using Azure OpenAI, you must first [create a resource](https://learn.mi
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
enabled: True
|
|
||||||
provider: azure_openai
|
provider: azure_openai
|
||||||
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||||
@ -186,32 +191,35 @@ You are also able to define custom prompts in your configuration.
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
enabled: True
|
|
||||||
provider: ollama
|
provider: ollama
|
||||||
base_url: http://localhost:11434
|
base_url: http://localhost:11434
|
||||||
model: llava
|
model: llava
|
||||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
|
||||||
object_prompts:
|
objects:
|
||||||
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
object_prompts:
|
||||||
|
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||||
|
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||||
```
|
```
|
||||||
|
|
||||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
front_door:
|
front_door:
|
||||||
genai:
|
objects:
|
||||||
use_snapshot: True
|
genai:
|
||||||
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
|
enabled: True
|
||||||
object_prompts:
|
use_snapshot: True
|
||||||
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
|
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
|
||||||
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
|
object_prompts:
|
||||||
objects:
|
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
|
||||||
- person
|
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
|
||||||
- cat
|
objects:
|
||||||
required_zones:
|
- person
|
||||||
- steps
|
- cat
|
||||||
|
required_zones:
|
||||||
|
- steps
|
||||||
```
|
```
|
||||||
|
|
||||||
### Experiment with prompts
|
### Experiment with prompts
|
||||||
|
|||||||
127
docs/docs/configuration/genai/config.md
Normal file
127
docs/docs/configuration/genai/config.md
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
---
|
||||||
|
id: genai_config
|
||||||
|
title: Configuring Generative AI
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||||
|
|
||||||
|
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||||
|
|
||||||
|
## Ollama
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
|
||||||
|
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||||
|
|
||||||
|
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||||
|
|
||||||
|
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||||
|
|
||||||
|
The following models are recommended:
|
||||||
|
|
||||||
|
| Model | Size | Notes |
|
||||||
|
| ----------------- | ------ | ----------------------------------------------------------- |
|
||||||
|
| `gemma3:4b` | 3.3 GB | Strong frame-to-frame understanding, slower inference times |
|
||||||
|
| `qwen2.5vl:3b` | 3.2 GB | Fast but capable model with good vision comprehension |
|
||||||
|
| `llava-phi3:3.8b` | 2.9 GB | Lightweight and fast model with vision comprehension |
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
provider: ollama
|
||||||
|
base_url: http://localhost:11434
|
||||||
|
model: minicpm-v:8b
|
||||||
|
provider_options: # other Ollama client options can be defined
|
||||||
|
keep_alive: -1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Google Gemini
|
||||||
|
|
||||||
|
Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage.
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`.
|
||||||
|
|
||||||
|
### Get API Key
|
||||||
|
|
||||||
|
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
|
||||||
|
|
||||||
|
1. Accept the Terms of Service
|
||||||
|
2. Click "Get API Key" from the right hand navigation
|
||||||
|
3. Click "Create API key in new project"
|
||||||
|
4. Copy the API key for use in your config
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
provider: gemini
|
||||||
|
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||||
|
model: gemini-1.5-flash
|
||||||
|
```
|
||||||
|
|
||||||
|
## OpenAI
|
||||||
|
|
||||||
|
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||||
|
|
||||||
|
### Get API Key
|
||||||
|
|
||||||
|
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
provider: openai
|
||||||
|
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||||
|
model: gpt-4o
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Azure OpenAI
|
||||||
|
|
||||||
|
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||||
|
|
||||||
|
### Create Resource and Get API Key
|
||||||
|
|
||||||
|
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
provider: azure_openai
|
||||||
|
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||||
|
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||||
|
```
|
||||||
77
docs/docs/configuration/genai/objects.md
Normal file
77
docs/docs/configuration/genai/objects.md
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
---
|
||||||
|
id: genai_objects
|
||||||
|
title: Object Descriptions
|
||||||
|
---
|
||||||
|
|
||||||
|
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||||
|
|
||||||
|
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle, or can optionally be sent earlier after a number of significantly changed frames, for example in use in more real-time notifications. Descriptions can also be regenerated manually via the Frigate UI. Note that if you are manually entering a description for tracked objects prior to its end, this will be overwritten by the generated response.
|
||||||
|
|
||||||
|
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||||
|
|
||||||
|
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||||
|
|
||||||
|
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
|
||||||
|
|
||||||
|
## Usage and Best Practices
|
||||||
|
|
||||||
|
Frigate's thumbnail search excels at identifying specific details about tracked objects – for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate’s default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
|
||||||
|
|
||||||
|
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context.
|
||||||
|
|
||||||
|
## Custom Prompts
|
||||||
|
|
||||||
|
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.
|
||||||
|
```
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
You are also able to define custom prompts in your configuration.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
provider: ollama
|
||||||
|
base_url: http://localhost:11434
|
||||||
|
model: llava
|
||||||
|
|
||||||
|
objects:
|
||||||
|
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||||
|
object_prompts:
|
||||||
|
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||||
|
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||||
|
```
|
||||||
|
|
||||||
|
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
cameras:
|
||||||
|
front_door:
|
||||||
|
objects:
|
||||||
|
genai:
|
||||||
|
enabled: True
|
||||||
|
use_snapshot: True
|
||||||
|
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
|
||||||
|
object_prompts:
|
||||||
|
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
|
||||||
|
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
|
||||||
|
objects:
|
||||||
|
- person
|
||||||
|
- cat
|
||||||
|
required_zones:
|
||||||
|
- steps
|
||||||
|
```
|
||||||
|
|
||||||
|
### Experiment with prompts
|
||||||
|
|
||||||
|
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
|
||||||
|
|
||||||
|
- OpenAI - [ChatGPT](https://chatgpt.com)
|
||||||
|
- Gemini - [Google AI Studio](https://aistudio.google.com)
|
||||||
|
- Ollama - [Open WebUI](https://docs.openwebui.com/)
|
||||||
44
docs/docs/configuration/genai/review_summaries.md
Normal file
44
docs/docs/configuration/genai/review_summaries.md
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
id: genai_review
|
||||||
|
title: Review Summaries
|
||||||
|
---
|
||||||
|
|
||||||
|
Generative AI can be used to automatically generate structured summaries of review items. These summaries will show up in Frigate's native notifications as well as in the UI. Generative AI can also be used to take a collection of summaries over a period of time and provide a report, which may be useful to get a quick report of everything that happened while out for some amount of time.
|
||||||
|
|
||||||
|
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
|
||||||
|
|
||||||
|
Generative AI review summaries can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/review_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
|
||||||
|
|
||||||
|
## Review Summary Usage and Best Practices
|
||||||
|
|
||||||
|
Review summaries provide structured JSON responses that are saved for each review item:
|
||||||
|
|
||||||
|
```
|
||||||
|
- `scene` (string): A full description including setting, entities, actions, and any plausible supported inferences.
|
||||||
|
- `confidence` (float): 0-1 confidence in the analysis.
|
||||||
|
- `other_concerns` (list): List of user-defined concerns that may need additional investigation.
|
||||||
|
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
|
||||||
|
|
||||||
|
Threat-level definitions:
|
||||||
|
- 0 — Typical or expected activity for this location/time (includes residents, guests, or known animals engaged in normal activities, even if they glance around or scan surroundings).
|
||||||
|
- 1 — Unusual or suspicious activity: At least one security-relevant behavior is present **and not explainable by a normal residential activity**.
|
||||||
|
- 2 — Active or immediate threat: Breaking in, vandalism, aggression, weapon display.
|
||||||
|
```
|
||||||
|
|
||||||
|
This will show in the UI as a list of concerns that each review item has along with the general description.
|
||||||
|
|
||||||
|
### Additional Concerns
|
||||||
|
|
||||||
|
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
review:
|
||||||
|
genai:
|
||||||
|
enabled: true
|
||||||
|
additional_concerns:
|
||||||
|
- animals in the garden
|
||||||
|
```
|
||||||
|
|
||||||
|
## Review Reports
|
||||||
|
|
||||||
|
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.
|
||||||
@ -5,11 +5,11 @@ title: Enrichments
|
|||||||
|
|
||||||
# Enrichments
|
# Enrichments
|
||||||
|
|
||||||
Some of Frigate's enrichments can use a discrete GPU for accelerated processing.
|
Some of Frigate's enrichments can use a discrete GPU / NPU for accelerated processing.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU and configure the enrichment according to its specific documentation.
|
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU / NPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU / NPU and configure the enrichment according to its specific documentation.
|
||||||
|
|
||||||
- **AMD**
|
- **AMD**
|
||||||
|
|
||||||
@ -23,6 +23,9 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
|
|||||||
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
|
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
|
||||||
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
|
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
|
||||||
|
|
||||||
|
- **RockChip**
|
||||||
|
- RockChip NPU will automatically be detected and used for semantic search v1 and face recognition in the `-rk` Frigate image.
|
||||||
|
|
||||||
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments.
|
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|||||||
@ -67,9 +67,9 @@ Fine-tune the LPR feature using these optional parameters at the global level of
|
|||||||
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
|
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
|
||||||
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
|
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
|
||||||
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
|
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
|
||||||
- **`device`**: Device to use to run license plate recognition models.
|
- **`device`**: Device to use to run license plate detection *and* recognition models.
|
||||||
- Default: `CPU`
|
- Default: `CPU`
|
||||||
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
- This can be `CPU` or one of [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/). For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||||
- **`model_size`**: The size of the model used to detect text on plates.
|
- **`model_size`**: The size of the model used to detect text on plates.
|
||||||
- Default: `small`
|
- Default: `small`
|
||||||
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_.
|
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_.
|
||||||
|
|||||||
@ -19,6 +19,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
|
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
|
||||||
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
|
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
|
||||||
|
|
||||||
|
**Apple Silicon**
|
||||||
|
|
||||||
|
- [Apple Silicon](#apple-silicon-detector): Apple Silicon can run on M1 and newer Apple Silicon devices.
|
||||||
|
|
||||||
**Intel**
|
**Intel**
|
||||||
|
|
||||||
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
||||||
@ -264,7 +268,7 @@ detectors:
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Supported Models
|
### OpenVINO Supported Models
|
||||||
|
|
||||||
#### SSDLite MobileNet v2
|
#### SSDLite MobileNet v2
|
||||||
|
|
||||||
@ -402,6 +406,59 @@ model:
|
|||||||
|
|
||||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||||
|
|
||||||
|
## Apple Silicon detector
|
||||||
|
|
||||||
|
The NPU in Apple Silicon can't be accessed from within a container, so the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) must first be setup. It is recommended to use the Frigate docker image with `-standard-arm64` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-arm64-standard`.
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
1. Setup the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) and run the client
|
||||||
|
2. Configure the detector in Frigate and startup Frigate
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Using the detector config below will connect to the client:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
detectors:
|
||||||
|
apple-silicon:
|
||||||
|
type: zmq
|
||||||
|
endpoint: tcp://host.docker.internal:5555
|
||||||
|
```
|
||||||
|
|
||||||
|
### Apple Silicon Supported Models
|
||||||
|
|
||||||
|
There is no default model provided, the following formats are supported:
|
||||||
|
|
||||||
|
#### YOLO (v3, v4, v7, v9)
|
||||||
|
|
||||||
|
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
detectors:
|
||||||
|
onnx:
|
||||||
|
type: onnx
|
||||||
|
|
||||||
|
model:
|
||||||
|
model_type: yolo-generic
|
||||||
|
width: 320 # <--- should match the imgsize set during model export
|
||||||
|
height: 320 # <--- should match the imgsize set during model export
|
||||||
|
input_tensor: nchw
|
||||||
|
input_dtype: float
|
||||||
|
path: /config/model_cache/yolo.onnx
|
||||||
|
labelmap_path: /labelmap/coco-80.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||||
|
|
||||||
## AMD/ROCm GPU detector
|
## AMD/ROCm GPU detector
|
||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
@ -483,7 +540,7 @@ We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from mes
|
|||||||
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
|
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
|
||||||
```
|
```
|
||||||
|
|
||||||
### Supported Models
|
### ROCm Supported Models
|
||||||
|
|
||||||
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
|
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
|
||||||
|
|
||||||
@ -526,7 +583,7 @@ detectors:
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Supported Models
|
### ONNX Supported Models
|
||||||
|
|
||||||
There is no default model provided, the following formats are supported:
|
There is no default model provided, the following formats are supported:
|
||||||
|
|
||||||
@ -824,7 +881,7 @@ $ cat /sys/kernel/debug/rknpu/load
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Supported Models
|
### RockChip Supported Models
|
||||||
|
|
||||||
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
|
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
|
||||||
|
|
||||||
|
|||||||
@ -13,14 +13,15 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br
|
|||||||
|
|
||||||
### Most conservative: Ensure all video is saved
|
### Most conservative: Ensure all video is saved
|
||||||
|
|
||||||
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
|
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
record:
|
record:
|
||||||
enabled: True
|
enabled: True
|
||||||
retain:
|
continuous:
|
||||||
days: 3
|
days: 3
|
||||||
mode: all
|
motion:
|
||||||
|
days: 7
|
||||||
alerts:
|
alerts:
|
||||||
retain:
|
retain:
|
||||||
days: 30
|
days: 30
|
||||||
@ -38,9 +39,8 @@ In order to reduce storage requirements, you can adjust your config to only reta
|
|||||||
```yaml
|
```yaml
|
||||||
record:
|
record:
|
||||||
enabled: True
|
enabled: True
|
||||||
retain:
|
motion:
|
||||||
days: 3
|
days: 3
|
||||||
mode: motion
|
|
||||||
alerts:
|
alerts:
|
||||||
retain:
|
retain:
|
||||||
days: 30
|
days: 30
|
||||||
@ -58,7 +58,7 @@ If you only want to retain video that occurs during a tracked object, this confi
|
|||||||
```yaml
|
```yaml
|
||||||
record:
|
record:
|
||||||
enabled: True
|
enabled: True
|
||||||
retain:
|
continuous:
|
||||||
days: 0
|
days: 0
|
||||||
alerts:
|
alerts:
|
||||||
retain:
|
retain:
|
||||||
@ -80,15 +80,17 @@ Retention configs support decimals meaning they can be configured to retain `0.5
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Continuous Recording
|
### Continuous and Motion Recording
|
||||||
|
|
||||||
The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled.
|
The number of days to retain continuous and motion recordings can be set via the following config where X is a number, by default continuous recording is disabled.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
record:
|
record:
|
||||||
enabled: True
|
enabled: True
|
||||||
retain:
|
continuous:
|
||||||
days: 1 # <- number of days to keep continuous recordings
|
days: 1 # <- number of days to keep continuous recordings
|
||||||
|
motion:
|
||||||
|
days: 2 # <- number of days to keep motion recordings
|
||||||
```
|
```
|
||||||
|
|
||||||
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
|
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
|
||||||
@ -112,38 +114,6 @@ This configuration will retain recording segments that overlap with alerts and d
|
|||||||
|
|
||||||
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
|
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
|
||||||
|
|
||||||
## What do the different retain modes mean?
|
|
||||||
|
|
||||||
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects).
|
|
||||||
|
|
||||||
Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording.
|
|
||||||
|
|
||||||
- With the `all` option all 48 hours of those two days would be kept and viewable.
|
|
||||||
- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments.
|
|
||||||
- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary.
|
|
||||||
|
|
||||||
The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type.
|
|
||||||
|
|
||||||
A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
record:
|
|
||||||
enabled: True
|
|
||||||
retain:
|
|
||||||
days: 7
|
|
||||||
mode: motion
|
|
||||||
alerts:
|
|
||||||
retain:
|
|
||||||
days: 14
|
|
||||||
mode: active_objects
|
|
||||||
detections:
|
|
||||||
retain:
|
|
||||||
days: 14
|
|
||||||
mode: active_objects
|
|
||||||
```
|
|
||||||
|
|
||||||
The above configuration example can be added globally or on a per camera basis.
|
|
||||||
|
|
||||||
## Can I have "continuous" recordings, but only at certain times?
|
## Can I have "continuous" recordings, but only at certain times?
|
||||||
|
|
||||||
Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
|
Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
|
||||||
|
|||||||
@ -73,6 +73,12 @@ tls:
|
|||||||
# Optional: Enable TLS for port 8971 (default: shown below)
|
# Optional: Enable TLS for port 8971 (default: shown below)
|
||||||
enabled: True
|
enabled: True
|
||||||
|
|
||||||
|
# Optional: IPv6 configuration
|
||||||
|
networking:
|
||||||
|
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
|
||||||
|
ipv6:
|
||||||
|
enabled: False
|
||||||
|
|
||||||
# Optional: Proxy configuration
|
# Optional: Proxy configuration
|
||||||
proxy:
|
proxy:
|
||||||
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
|
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
|
||||||
@ -339,6 +345,33 @@ objects:
|
|||||||
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
|
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
|
||||||
# Checks based on the bottom center of the bounding box of the object
|
# Checks based on the bottom center of the bounding box of the object
|
||||||
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
|
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
|
||||||
|
# Optional: Configuration for AI generated tracked object descriptions
|
||||||
|
genai:
|
||||||
|
# Optional: Enable AI object description generation (default: shown below)
|
||||||
|
enabled: False
|
||||||
|
# Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below)
|
||||||
|
use_snapshot: False
|
||||||
|
# Optional: The default prompt for generating descriptions. Can use replacement
|
||||||
|
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
|
||||||
|
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
|
||||||
|
# Optional: Object specific prompts to customize description results
|
||||||
|
# Format: {label}: {prompt}
|
||||||
|
object_prompts:
|
||||||
|
person: "My special person prompt."
|
||||||
|
# Optional: objects to generate descriptions for (default: all objects that are tracked)
|
||||||
|
objects:
|
||||||
|
- person
|
||||||
|
- cat
|
||||||
|
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
|
||||||
|
required_zones: []
|
||||||
|
# Optional: What triggers to use to send frames for a tracked object to generative AI (default: shown below)
|
||||||
|
send_triggers:
|
||||||
|
# Once the object is no longer tracked
|
||||||
|
tracked_object_end: True
|
||||||
|
# Optional: After X many significant updates are received (default: shown below)
|
||||||
|
after_significant_updates: None
|
||||||
|
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
|
||||||
|
debug_save_thumbnails: False
|
||||||
|
|
||||||
# Optional: Review configuration
|
# Optional: Review configuration
|
||||||
# NOTE: Can be overridden at the camera level
|
# NOTE: Can be overridden at the camera level
|
||||||
@ -371,6 +404,19 @@ review:
|
|||||||
# should be configured at the camera level.
|
# should be configured at the camera level.
|
||||||
required_zones:
|
required_zones:
|
||||||
- driveway
|
- driveway
|
||||||
|
# Optional: GenAI Review Summary Configuration
|
||||||
|
genai:
|
||||||
|
# Optional: Enable the GenAI review summary feature (default: shown below)
|
||||||
|
enabled: False
|
||||||
|
# Optional: Enable GenAI review summaries for alerts (default: shown below)
|
||||||
|
alerts: True
|
||||||
|
# Optional: Enable GenAI review summaries for detections (default: shown below)
|
||||||
|
detections: False
|
||||||
|
# Optional: Additional concerns that the GenAI should make note of (default: None)
|
||||||
|
additional_concerns:
|
||||||
|
- Animals in the garden
|
||||||
|
# Optional: Preferred response language (default: English)
|
||||||
|
preferred_language: English
|
||||||
|
|
||||||
# Optional: Motion configuration
|
# Optional: Motion configuration
|
||||||
# NOTE: Can be overridden at the camera level
|
# NOTE: Can be overridden at the camera level
|
||||||
@ -438,20 +484,20 @@ record:
|
|||||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||||
expire_interval: 60
|
expire_interval: 60
|
||||||
# Optional: Sync recordings with disk on startup and once a day (default: shown below).
|
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
|
||||||
sync_recordings: False
|
sync_recordings: False
|
||||||
# Optional: Retention settings for recording
|
# Optional: Continuous retention settings
|
||||||
retain:
|
continuous:
|
||||||
|
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
|
||||||
|
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
|
||||||
|
# if you only want to retain recordings of alerts and detections.
|
||||||
|
days: 0
|
||||||
|
# Optional: Motion retention settings
|
||||||
|
motion:
|
||||||
# Optional: Number of days to retain recordings regardless of tracked objects (default: shown below)
|
# Optional: Number of days to retain recordings regardless of tracked objects (default: shown below)
|
||||||
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
|
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
|
||||||
# if you only want to retain recordings of alerts and detections.
|
# if you only want to retain recordings of alerts and detections.
|
||||||
days: 0
|
days: 0
|
||||||
# Optional: Mode for retention. Available options are: all, motion, and active_objects
|
|
||||||
# all - save all recording segments regardless of activity
|
|
||||||
# motion - save all recordings segments with any detected motion
|
|
||||||
# active_objects - save all recording segments with active/moving objects
|
|
||||||
# NOTE: this mode only applies when the days setting above is greater than 0
|
|
||||||
mode: all
|
|
||||||
# Optional: Recording Export Settings
|
# Optional: Recording Export Settings
|
||||||
export:
|
export:
|
||||||
# Optional: Timelapse Output Args (default: shown below).
|
# Optional: Timelapse Output Args (default: shown below).
|
||||||
@ -546,6 +592,9 @@ semantic_search:
|
|||||||
# Optional: Set the model size used for embeddings. (default: shown below)
|
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||||
# NOTE: small model runs on CPU and large model runs on GPU
|
# NOTE: small model runs on CPU and large model runs on GPU
|
||||||
model_size: "small"
|
model_size: "small"
|
||||||
|
# Optional: Target a specific device to run the model (default: shown below)
|
||||||
|
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
|
||||||
|
device: None
|
||||||
|
|
||||||
# Optional: Configuration for face recognition capability
|
# Optional: Configuration for face recognition capability
|
||||||
# NOTE: enabled, min_area can be overridden at the camera level
|
# NOTE: enabled, min_area can be overridden at the camera level
|
||||||
@ -569,6 +618,9 @@ face_recognition:
|
|||||||
blur_confidence_filter: True
|
blur_confidence_filter: True
|
||||||
# Optional: Set the model size used face recognition. (default: shown below)
|
# Optional: Set the model size used face recognition. (default: shown below)
|
||||||
model_size: small
|
model_size: small
|
||||||
|
# Optional: Target a specific device to run the model (default: shown below)
|
||||||
|
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
|
||||||
|
device: None
|
||||||
|
|
||||||
# Optional: Configuration for license plate recognition capability
|
# Optional: Configuration for license plate recognition capability
|
||||||
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level
|
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level
|
||||||
@ -576,6 +628,7 @@ lpr:
|
|||||||
# Optional: Enable license plate recognition (default: shown below)
|
# Optional: Enable license plate recognition (default: shown below)
|
||||||
enabled: False
|
enabled: False
|
||||||
# Optional: The device to run the models on (default: shown below)
|
# Optional: The device to run the models on (default: shown below)
|
||||||
|
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
|
||||||
device: CPU
|
device: CPU
|
||||||
# Optional: Set the model size used for text detection. (default: shown below)
|
# Optional: Set the model size used for text detection. (default: shown below)
|
||||||
model_size: small
|
model_size: small
|
||||||
@ -612,13 +665,22 @@ genai:
|
|||||||
base_url: http://localhost::11434
|
base_url: http://localhost::11434
|
||||||
# Required if gemini or openai
|
# Required if gemini or openai
|
||||||
api_key: "{FRIGATE_GENAI_API_KEY}"
|
api_key: "{FRIGATE_GENAI_API_KEY}"
|
||||||
# Optional: The default prompt for generating descriptions. Can use replacement
|
# Optional additional args to pass to the GenAI Provider (default: None)
|
||||||
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
|
provider_options:
|
||||||
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
|
keep_alive: -1
|
||||||
# Optional: Object specific prompts to customize description results
|
|
||||||
# Format: {label}: {prompt}
|
# Optional: Configuration for audio transcription
|
||||||
object_prompts:
|
# NOTE: only the enabled option can be overridden at the camera level
|
||||||
person: "My special person prompt."
|
audio_transcription:
|
||||||
|
# Optional: Enable license plate recognition (default: shown below)
|
||||||
|
enabled: False
|
||||||
|
# Optional: The device to run the models on (default: shown below)
|
||||||
|
device: CPU
|
||||||
|
# Optional: Set the model size used for transcription. (default: shown below)
|
||||||
|
model_size: small
|
||||||
|
# Optional: Set the language used for transcription translation. (default: shown below)
|
||||||
|
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
|
||||||
|
language: en
|
||||||
|
|
||||||
# Optional: Restream configuration
|
# Optional: Restream configuration
|
||||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.9)
|
# Uses https://github.com/AlexxIT/go2rtc (v1.9.9)
|
||||||
@ -827,33 +889,22 @@ cameras:
|
|||||||
# By default the cameras are sorted alphabetically.
|
# By default the cameras are sorted alphabetically.
|
||||||
order: 0
|
order: 0
|
||||||
|
|
||||||
# Optional: Configuration for AI generated tracked object descriptions
|
# Optional: Configuration for triggers to automate actions based on semantic search results.
|
||||||
genai:
|
triggers:
|
||||||
# Optional: Enable AI description generation (default: shown below)
|
# Required: Unique identifier for the trigger (generated automatically from nickname if not specified).
|
||||||
enabled: False
|
trigger_name:
|
||||||
# Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below)
|
# Required: Enable or disable the trigger. (default: shown below)
|
||||||
use_snapshot: False
|
enabled: true
|
||||||
# Optional: The default prompt for generating descriptions. Can use replacement
|
# Type of trigger, either `thumbnail` for image-based matching or `description` for text-based matching. (default: none)
|
||||||
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
|
type: thumbnail
|
||||||
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
|
# Reference data for matching, either an event ID for `thumbnail` or a text string for `description`. (default: none)
|
||||||
# Optional: Object specific prompts to customize description results
|
data: 1751565549.853251-b69j73
|
||||||
# Format: {label}: {prompt}
|
# Similarity threshold for triggering. (default: none)
|
||||||
object_prompts:
|
threshold: 0.7
|
||||||
person: "My special person prompt."
|
# List of actions to perform when the trigger fires. (default: none)
|
||||||
# Optional: objects to generate descriptions for (default: all objects that are tracked)
|
# Available options: `notification` (send a webpush notification)
|
||||||
objects:
|
actions:
|
||||||
- person
|
- notification
|
||||||
- cat
|
|
||||||
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
|
|
||||||
required_zones: []
|
|
||||||
# Optional: What triggers to use to send frames for a tracked object to generative AI (default: shown below)
|
|
||||||
send_triggers:
|
|
||||||
# Once the object is no longer tracked
|
|
||||||
tracked_object_end: True
|
|
||||||
# Optional: After X many significant updates are received (default: shown below)
|
|
||||||
after_significant_updates: None
|
|
||||||
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
|
|
||||||
debug_save_thumbnails: False
|
|
||||||
|
|
||||||
# Optional
|
# Optional
|
||||||
ui:
|
ui:
|
||||||
|
|||||||
@ -39,7 +39,7 @@ If you are enabling Semantic Search for the first time, be advised that Frigate
|
|||||||
|
|
||||||
The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a vision model which is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a vision model which is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||||
|
|
||||||
The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the object description docs](/configuration/genai/objects.md) for more information on how to automatically generate tracked object descriptions.
|
||||||
|
|
||||||
Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||||
|
|
||||||
@ -78,17 +78,21 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
|
|||||||
|
|
||||||
### GPU Acceleration
|
### GPU Acceleration
|
||||||
|
|
||||||
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
|
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU / NPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
enabled: True
|
enabled: True
|
||||||
model_size: large
|
model_size: large
|
||||||
|
# Optional, if using the 'large' model in a multi-GPU installation
|
||||||
|
device: 0
|
||||||
```
|
```
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
|
If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU / NPU will be detected and used automatically.
|
||||||
|
Specify the `device` option to target a specific GPU in a multi-GPU system (see [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)).
|
||||||
|
If you do not specify a device, the first available GPU will be used.
|
||||||
|
|
||||||
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||||
|
|
||||||
@ -102,3 +106,41 @@ See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_
|
|||||||
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
||||||
5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
|
5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
|
||||||
6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you.
|
6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you.
|
||||||
|
|
||||||
|
## Triggers
|
||||||
|
|
||||||
|
Triggers utilize semantic search to automate actions when a tracked object matches a specified image or description. Triggers can be configured so that Frigate executes a specific actions when a tracked object's image or description matches a predefined image or text, based on a similarity threshold. Triggers are managed per camera and can be configured via the Frigate UI in the Settings page under the Triggers tab.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires.
|
||||||
|
|
||||||
|
#### Managing Triggers in the UI
|
||||||
|
|
||||||
|
1. Navigate to the **Settings** page and select the **Triggers** tab.
|
||||||
|
2. Choose a camera from the dropdown menu to view or manage its triggers.
|
||||||
|
3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one.
|
||||||
|
4. In the **Create Trigger** dialog:
|
||||||
|
- Enter a **Name** for the trigger (e.g., "red_car_alert").
|
||||||
|
- Select the **Type** (`Thumbnail` or `Description`).
|
||||||
|
- For `Thumbnail`, select an image to trigger this action when a similar thumbnail image is detected, based on the threshold.
|
||||||
|
- For `Description`, enter text to trigger this action when a similar tracked object description is detected.
|
||||||
|
- Set the **Threshold** for similarity matching.
|
||||||
|
- Select **Actions** to perform when the trigger fires.
|
||||||
|
5. Save the trigger to update the configuration and store the embedding in the database.
|
||||||
|
|
||||||
|
When a trigger fires, the UI highlights the trigger with a blue outline for 3 seconds for easy identification.
|
||||||
|
|
||||||
|
### Usage and Best Practices
|
||||||
|
|
||||||
|
1. **Thumbnail Triggers**: Select a representative image (event ID) from the Explore page that closely matches the object you want to detect. For best results, choose images where the object is prominent and fills most of the frame.
|
||||||
|
2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object’s description. Avoid vague terms to improve matching accuracy.
|
||||||
|
3. **Threshold Tuning**: Adjust the threshold to balance sensitivity and specificity. A higher threshold (e.g., 0.8) requires closer matches, reducing false positives but potentially missing similar objects. A lower threshold (e.g., 0.6) is more inclusive but may trigger more often.
|
||||||
|
4. **Using Explore**: Use the context menu or right-click / long-press on a tracked object in the Grid View in Explore to quickly add a trigger based on the tracked object's thumbnail.
|
||||||
|
5. **Editing triggers**: For the best experience, triggers should be edited via the UI. However, Frigate will ensure triggers edited in the config will be synced with triggers created and edited in the UI.
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
|
||||||
|
- Triggers rely on the same Jina AI CLIP models (V1 or V2) used for semantic search. Ensure `semantic_search` is enabled and properly configured.
|
||||||
|
- Reindexing embeddings (via the UI or `reindex: True`) does not affect trigger configurations but may update the embeddings used for matching.
|
||||||
|
- For optimal performance, use a system with sufficient RAM (8GB minimum, 16GB recommended) and a GPU for `large` model configurations, as described in the Semantic Search requirements.
|
||||||
|
|||||||
@ -88,7 +88,9 @@ Sometimes objects are expected to be passing through a zone, but an object loite
|
|||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
When using loitering zones, a review item will remain active until the object leaves. Loitering zones are only meant to be used in areas where loitering is not expected behavior.
|
When using loitering zones, a review item will behave in the following way:
|
||||||
|
- When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary.
|
||||||
|
- When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|||||||
@ -61,19 +61,26 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
**AMD**
|
**AMD**
|
||||||
|
|
||||||
- [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection
|
- [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection
|
||||||
- [Supports limited model architectures](../../configuration/object_detectors#supported-models-1)
|
- [Supports limited model architectures](../../configuration/object_detectors#rocm-supported-models)
|
||||||
- Runs best on discrete AMD GPUs
|
- Runs best on discrete AMD GPUs
|
||||||
|
|
||||||
|
**Apple Silicon**
|
||||||
|
|
||||||
|
- [Apple Silicon](#apple-silicon): Apple Silicon is usable on all M1 and newer Apple Silicon devices to provide efficient and fast object detection
|
||||||
|
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#apple-silicon-supported-models)
|
||||||
|
- Runs well with any size models including large
|
||||||
|
- Runs via ZMQ proxy which adds some latency, only recommended for local connection
|
||||||
|
|
||||||
**Intel**
|
**Intel**
|
||||||
|
|
||||||
- [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
- [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
||||||
- [Supports majority of model architectures](../../configuration/object_detectors#supported-models)
|
- [Supports majority of model architectures](../../configuration/object_detectors#openvino-supported-models)
|
||||||
- Runs best with tiny, small, or medium models
|
- Runs best with tiny, small, or medium models
|
||||||
|
|
||||||
**Nvidia**
|
**Nvidia**
|
||||||
|
|
||||||
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
|
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
|
||||||
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#supported-models-2)
|
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
|
||||||
- Runs well with any size models including large
|
- Runs well with any size models including large
|
||||||
|
|
||||||
**Rockchip**
|
**Rockchip**
|
||||||
@ -173,14 +180,28 @@ Inference speeds will vary greatly depending on the GPU and the model used.
|
|||||||
| RTX A4000 | | 320: ~ 15 ms | |
|
| RTX A4000 | | 320: ~ 15 ms | |
|
||||||
| Tesla P40 | | 320: ~ 105 ms | |
|
| Tesla P40 | | 320: ~ 105 ms | |
|
||||||
|
|
||||||
|
### Apple Silicon
|
||||||
|
|
||||||
|
With the [Apple Silicon](../configuration/object_detectors.md#apple-silicon-detector) detector Frigate can take advantage of the NPU in M1 and newer Apple Silicon.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
|
||||||
|
Apple Silicon can not run within a container, so a ZMQ proxy is utilized to communicate with [the Apple Silicon Frigate detector](https://github.com/frigate-nvr/apple-silicon-detector) which runs on the host. This should add minimal latency when run on the same device.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
| Name | YOLOv9 Inference Time |
|
||||||
|
| --------- | ---------------------- |
|
||||||
|
| M3 Pro | t-320: 6 ms s-320: 8ms |
|
||||||
|
| M1 | s-320: 9ms |
|
||||||
|
|
||||||
### ROCm - AMD GPU
|
### ROCm - AMD GPU
|
||||||
|
|
||||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
With the [ROCm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
||||||
|
|
||||||
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
|
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
|
||||||
| --------- | --------------------- | ------------------------- |
|
| --------- | --------------------- | ------------------------- |
|
||||||
| AMD 780M | ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms |
|
| AMD 780M | ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms |
|
||||||
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
|
|
||||||
|
|
||||||
## Community Supported Detectors
|
## Community Supported Detectors
|
||||||
|
|
||||||
|
|||||||
74
docs/docs/frigate/planning_setup.md
Normal file
74
docs/docs/frigate/planning_setup.md
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
---
|
||||||
|
id: planning_setup
|
||||||
|
title: Planning a New Installation
|
||||||
|
---
|
||||||
|
|
||||||
|
Choosing the right hardware for your Frigate NVR setup is important for optimal performance and a smooth experience. This guide will walk you through the key considerations, focusing on the number of cameras and the hardware required for efficient object detection.
|
||||||
|
|
||||||
|
## Key Considerations
|
||||||
|
|
||||||
|
### Number of Cameras and Simultaneous Activity
|
||||||
|
|
||||||
|
The most fundamental factor in your hardware decision is the number of cameras you plan to use. However, it's not just about the raw count; it's also about how many of those cameras are likely to see activity and require object detection simultaneously.
|
||||||
|
|
||||||
|
When motion is detected in a camera's feed, regions of that frame are sent to your chosen [object detection hardware](/configuration/object_detectors).
|
||||||
|
|
||||||
|
- **Low Simultaneous Activity (1-6 cameras with occasional motion)**: If you have a few cameras in areas with infrequent activity (e.g., a seldom-used backyard, a quiet interior), the demand on your object detection hardware will be lower. A single, entry-level AI accelerator will suffice.
|
||||||
|
- **Moderate Simultaneous Activity (6-12 cameras with some overlapping motion)**: For setups with more cameras, especially in areas like a busy street or a property with multiple access points, it's more likely that several cameras will capture activity at the same time. This increases the load on your object detection hardware, requiring more processing power.
|
||||||
|
- **High Simultaneous Activity (12+ cameras or highly active zones)**: Large installations or scenarios where many cameras frequently capture activity (e.g., busy street with overview, identification, dedicated LPR cameras, etc.) will necessitate robust object detection capabilities. You'll likely need multiple entry-level AI accelerators or a more powerful single unit such as a discrete GPU.
|
||||||
|
- **Commercial Installations (40+ cameras)**: Commercial installations or scenarios where a substantial number of cameras capture activity (e.g., a commercial property, an active public space) will necessitate robust object detection capabilities. You'll likely need a modern discrete GPU.
|
||||||
|
|
||||||
|
### Video Decoding
|
||||||
|
|
||||||
|
Modern CPUs with integrated GPUs (Intel Quick Sync, AMD VCN) or dedicated GPUs can significantly offload video decoding from the main CPU, freeing up resources. This is highly recommended, especially for multiple cameras.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
For commercial installations it is important to verify the number of supported concurrent streams on your GPU, many consumer GPUs max out at ~20 concurrent camera streams.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Hardware Considerations
|
||||||
|
|
||||||
|
### Object Detection
|
||||||
|
|
||||||
|
There are many different hardware options for object detection depending on priorities and available hardware. See [the recommended hardware page](./hardware.md#detectors) for more specifics on what hardware is recommended for object detection.
|
||||||
|
|
||||||
|
### Storage
|
||||||
|
|
||||||
|
Storage is an important consideration when planning a new installation. To get a more precise estimate of your storage requirements, you can use an IP camera storage calculator. Websites like [IPConfigure Storage Calculator](https://calculator.ipconfigure.com/) can help you determine the necessary disk space based on your camera settings.
|
||||||
|
|
||||||
|
|
||||||
|
#### SSDs (Solid State Drives)
|
||||||
|
|
||||||
|
SSDs are an excellent choice for Frigate, offering high speed and responsiveness. The older concern that SSDs would quickly "wear out" from constant video recording is largely no longer valid for modern consumer and enterprise-grade SSDs.
|
||||||
|
|
||||||
|
- Longevity: Modern SSDs are designed with advanced wear-leveling algorithms and significantly higher "Terabytes Written" (TBW) ratings than earlier models. For typical home NVR use, a good quality SSD will likely outlast the useful life of your NVR hardware itself.
|
||||||
|
- Performance: SSDs excel at handling the numerous small write operations that occur during continuous video recording and can significantly improve the responsiveness of the Frigate UI and clip retrieval.
|
||||||
|
- Silence and Efficiency: SSDs produce no noise and consume less power than traditional HDDs.
|
||||||
|
|
||||||
|
#### HDDs (Hard Disk Drives)
|
||||||
|
|
||||||
|
Traditional Hard Disk Drives (HDDs) remain a great and often more cost-effective option for long-term video storage, especially for larger setups where raw capacity is prioritized.
|
||||||
|
|
||||||
|
- Cost-Effectiveness: HDDs offer the best cost per gigabyte, making them ideal for storing many days, weeks, or months of continuous footage.
|
||||||
|
- Capacity: HDDs are available in much larger capacities than most consumer SSDs, which is beneficial for extensive video archives.
|
||||||
|
- NVR-Rated Drives: If choosing an HDD, consider drives specifically designed for surveillance (NVR) use, such as Western Digital Purple or Seagate SkyHawk. These drives are engineered for 24/7 operation and continuous write workloads, offering improved reliability compared to standard desktop drives.
|
||||||
|
|
||||||
|
Determining Your Storage Needs
|
||||||
|
The amount of storage you need will depend on several factors:
|
||||||
|
|
||||||
|
- Number of Cameras: More cameras naturally require more space.
|
||||||
|
- Resolution and Framerate: Higher resolution (e.g., 4K) and higher framerate (e.g., 30fps) streams consume significantly more storage.
|
||||||
|
- Recording Method: Continuous recording uses the most space. motion-only recording or object-triggered recording can save space, but may miss some footage.
|
||||||
|
- Retention Period: How many days, weeks, or months of footage do you want to keep?
|
||||||
|
|
||||||
|
#### Network Storage (NFS/SMB)
|
||||||
|
|
||||||
|
While supported, using network-attached storage (NAS) for recordings can introduce latency and network dependency considerations. For optimal performance and reliability, it is generally recommended to have local storage for your Frigate recordings. If using a NAS, ensure your network connection to it is robust and fast (Gigabit Ethernet at minimum) and that the NAS itself can handle the continuous write load.
|
||||||
|
|
||||||
|
### RAM (Memory)
|
||||||
|
|
||||||
|
- **Basic Minimum: 4GB RAM**: This is generally sufficient for a very basic Frigate setup with a few cameras and a dedicated object detection accelerator, without running any enrichments. Performance might be tight, especially with higher resolution streams or numerous detections.
|
||||||
|
- **Minimum for Enrichments: 8GB RAM**: If you plan to utilize Frigate's enrichment features (e.g., facial recognition, license plate recognition, or other AI models that run alongside standard object detection), 8GB of RAM should be considered the minimum. Enrichments require additional memory to load and process their respective models and data.
|
||||||
|
- **Recommended: 16GB RAM**: For most users, especially those with many cameras (8+) or who plan to heavily leverage enrichments, 16GB of RAM is highly recommended. This provides ample headroom for smooth operation, reduces the likelihood of swapping to disk (which can impact performance), and allows for future expansion.
|
||||||
@ -5,7 +5,7 @@ title: Updating
|
|||||||
|
|
||||||
# Updating Frigate
|
# Updating Frigate
|
||||||
|
|
||||||
The current stable version of Frigate is **0.15.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.15.0).
|
The current stable version of Frigate is **0.16.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.0).
|
||||||
|
|
||||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||||
|
|
||||||
@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
|||||||
2. **Update and Pull the Latest Image**:
|
2. **Update and Pull the Latest Image**:
|
||||||
|
|
||||||
- If using Docker Compose:
|
- If using Docker Compose:
|
||||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.15.0` instead of `0.14.1`). For example:
|
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.0` instead of `0.15.2`). For example:
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
frigate:
|
frigate:
|
||||||
image: ghcr.io/blakeblackshear/frigate:0.15.0
|
image: ghcr.io/blakeblackshear/frigate:0.16.0
|
||||||
```
|
```
|
||||||
- Then pull the image:
|
- Then pull the image:
|
||||||
```bash
|
```bash
|
||||||
docker pull ghcr.io/blakeblackshear/frigate:0.15.0
|
docker pull ghcr.io/blakeblackshear/frigate:0.16.0
|
||||||
```
|
```
|
||||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||||
- If using `docker run`:
|
- If using `docker run`:
|
||||||
- Pull the image with the appropriate tag (e.g., `0.15.0`, `0.15.0-tensorrt`, or `stable`):
|
- Pull the image with the appropriate tag (e.g., `0.16.0`, `0.16.0-tensorrt`, or `stable`):
|
||||||
```bash
|
```bash
|
||||||
docker pull ghcr.io/blakeblackshear/frigate:0.15.0
|
docker pull ghcr.io/blakeblackshear/frigate:0.16.0
|
||||||
```
|
```
|
||||||
|
|
||||||
3. **Start the Container**:
|
3. **Start the Container**:
|
||||||
@ -105,8 +105,8 @@ If an update causes issues:
|
|||||||
1. Stop Frigate.
|
1. Stop Frigate.
|
||||||
2. Restore your backed-up config file and database.
|
2. Restore your backed-up config file and database.
|
||||||
3. Revert to the previous image version:
|
3. Revert to the previous image version:
|
||||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`) in your `docker run` command.
|
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`) in your `docker run` command.
|
||||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`), and re-run `docker compose up -d`.
|
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`), and re-run `docker compose up -d`.
|
||||||
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
||||||
4. Verify the old version is running again.
|
4. Verify the old version is running again.
|
||||||
|
|
||||||
|
|||||||
@ -139,7 +139,7 @@ Message published for updates to tracked object metadata, for example:
|
|||||||
"name": "John",
|
"name": "John",
|
||||||
"score": 0.95,
|
"score": 0.95,
|
||||||
"camera": "front_door_cam",
|
"camera": "front_door_cam",
|
||||||
"timestamp": 1607123958.748393,
|
"timestamp": 1607123958.748393
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ Message published for updates to tracked object metadata, for example:
|
|||||||
"plate": "123ABC",
|
"plate": "123ABC",
|
||||||
"score": 0.95,
|
"score": 0.95,
|
||||||
"camera": "driveway_cam",
|
"camera": "driveway_cam",
|
||||||
"timestamp": 1607123958.748393,
|
"timestamp": 1607123958.748393
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -206,6 +206,20 @@ Message published for each changed review item. The first message is published w
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `frigate/triggers`
|
||||||
|
|
||||||
|
Message published when a trigger defined in a camera's `semantic_search` configuration fires.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "car_trigger",
|
||||||
|
"camera": "driveway",
|
||||||
|
"event_id": "1751565549.853251-b69j73",
|
||||||
|
"type": "thumbnail",
|
||||||
|
"score": 0.85
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `frigate/stats`
|
### `frigate/stats`
|
||||||
|
|
||||||
Same data available at `/api/stats` published at a configurable interval.
|
Same data available at `/api/stats` published at a configurable interval.
|
||||||
@ -269,6 +283,12 @@ Publishes the rms value for audio detected on this camera.
|
|||||||
|
|
||||||
**NOTE:** Requires audio detection to be enabled
|
**NOTE:** Requires audio detection to be enabled
|
||||||
|
|
||||||
|
### `frigate/<camera_name>/audio/transcription`
|
||||||
|
|
||||||
|
Publishes transcribed text for audio detected on this camera.
|
||||||
|
|
||||||
|
**NOTE:** Requires audio detection and transcription to be enabled
|
||||||
|
|
||||||
### `frigate/<camera_name>/enabled/set`
|
### `frigate/<camera_name>/enabled/set`
|
||||||
|
|
||||||
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.
|
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.
|
||||||
@ -391,6 +411,22 @@ Topic to turn review detections for a camera on or off. Expected values are `ON`
|
|||||||
|
|
||||||
Topic with current state of review detections for a camera. Published values are `ON` and `OFF`.
|
Topic with current state of review detections for a camera. Published values are `ON` and `OFF`.
|
||||||
|
|
||||||
|
### `frigate/<camera_name>/object_descriptions/set`
|
||||||
|
|
||||||
|
Topic to turn generative AI object descriptions for a camera on or off. Expected values are `ON` and `OFF`.
|
||||||
|
|
||||||
|
### `frigate/<camera_name>/object_descriptions/state`
|
||||||
|
|
||||||
|
Topic with current state of generative AI object descriptions for a camera. Published values are `ON` and `OFF`.
|
||||||
|
|
||||||
|
### `frigate/<camera_name>/review_descriptions/set`
|
||||||
|
|
||||||
|
Topic to turn generative AI review descriptions for a camera on or off. Expected values are `ON` and `OFF`.
|
||||||
|
|
||||||
|
### `frigate/<camera_name>/review_descriptions/state`
|
||||||
|
|
||||||
|
Topic with current state of generative AI review descriptions for a camera. Published values are `ON` and `OFF`.
|
||||||
|
|
||||||
### `frigate/<camera_name>/birdseye/set`
|
### `frigate/<camera_name>/birdseye/set`
|
||||||
|
|
||||||
Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode
|
Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode
|
||||||
|
|||||||
@ -7,6 +7,7 @@ const sidebars: SidebarsConfig = {
|
|||||||
Frigate: [
|
Frigate: [
|
||||||
'frigate/index',
|
'frigate/index',
|
||||||
'frigate/hardware',
|
'frigate/hardware',
|
||||||
|
'frigate/planning_setup',
|
||||||
'frigate/installation',
|
'frigate/installation',
|
||||||
'frigate/updating',
|
'frigate/updating',
|
||||||
'frigate/camera_setup',
|
'frigate/camera_setup',
|
||||||
@ -36,10 +37,23 @@ const sidebars: SidebarsConfig = {
|
|||||||
],
|
],
|
||||||
Enrichments: [
|
Enrichments: [
|
||||||
"configuration/semantic_search",
|
"configuration/semantic_search",
|
||||||
"configuration/genai",
|
|
||||||
"configuration/face_recognition",
|
"configuration/face_recognition",
|
||||||
"configuration/license_plate_recognition",
|
"configuration/license_plate_recognition",
|
||||||
"configuration/bird_classification",
|
"configuration/bird_classification",
|
||||||
|
{
|
||||||
|
type: "category",
|
||||||
|
label: "Generative AI",
|
||||||
|
link: {
|
||||||
|
type: "generated-index",
|
||||||
|
title: "Generative AI",
|
||||||
|
description: "Generative AI Features",
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
"configuration/genai/genai_config",
|
||||||
|
"configuration/genai/genai_review",
|
||||||
|
"configuration/genai/genai_objects",
|
||||||
|
],
|
||||||
|
},
|
||||||
],
|
],
|
||||||
Cameras: [
|
Cameras: [
|
||||||
"configuration/cameras",
|
"configuration/cameras",
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import faulthandler
|
import faulthandler
|
||||||
|
import multiprocessing as mp
|
||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
@ -15,12 +16,17 @@ from frigate.util.config import find_config_file
|
|||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
def main() -> None:
|
||||||
|
manager = mp.Manager()
|
||||||
faulthandler.enable()
|
faulthandler.enable()
|
||||||
|
|
||||||
# Setup the logging thread
|
# Setup the logging thread
|
||||||
setup_logging()
|
setup_logging(manager)
|
||||||
|
|
||||||
threading.current_thread().name = "frigate"
|
threading.current_thread().name = "frigate"
|
||||||
|
stop_event = mp.Event()
|
||||||
|
|
||||||
|
# send stop event on SIGINT
|
||||||
|
signal.signal(signal.SIGINT, lambda sig, frame: stop_event.set())
|
||||||
|
|
||||||
# Make sure we exit cleanly on SIGTERM.
|
# Make sure we exit cleanly on SIGTERM.
|
||||||
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
|
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
|
||||||
@ -93,7 +99,14 @@ def main() -> None:
|
|||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
print("*** End Config Validation Errors ***")
|
print("*** End Config Validation Errors ***")
|
||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
sys.exit(1)
|
|
||||||
|
# attempt to start Frigate in recovery mode
|
||||||
|
try:
|
||||||
|
config = FrigateConfig.load(install=True, safe_load=True)
|
||||||
|
print("Starting Frigate in safe mode.")
|
||||||
|
except ValidationError:
|
||||||
|
print("Unable to start Frigate in safe mode.")
|
||||||
|
sys.exit(1)
|
||||||
if args.validate_config:
|
if args.validate_config:
|
||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
print("*** Your config file is valid. ***")
|
print("*** Your config file is valid. ***")
|
||||||
@ -101,8 +114,23 @@ def main() -> None:
|
|||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
# Run the main application.
|
# Run the main application.
|
||||||
FrigateApp(config).start()
|
FrigateApp(config, manager, stop_event).start()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
mp.set_forkserver_preload(
|
||||||
|
[
|
||||||
|
# Standard library and core dependencies
|
||||||
|
"sqlite3",
|
||||||
|
# Third-party libraries commonly used in Frigate
|
||||||
|
"numpy",
|
||||||
|
"cv2",
|
||||||
|
"peewee",
|
||||||
|
"zmq",
|
||||||
|
"ruamel.yaml",
|
||||||
|
# Frigate core modules
|
||||||
|
"frigate.camera.maintainer",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
mp.set_start_method("forkserver", force=True)
|
||||||
main()
|
main()
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
|
import urllib
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
@ -20,7 +21,7 @@ from fastapi.encoders import jsonable_encoder
|
|||||||
from fastapi.params import Depends
|
from fastapi.params import Depends
|
||||||
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
|
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
|
||||||
from markupsafe import escape
|
from markupsafe import escape
|
||||||
from peewee import operator
|
from peewee import SQL, operator
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
|
||||||
from frigate.api.auth import require_role
|
from frigate.api.auth import require_role
|
||||||
@ -28,12 +29,18 @@ from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryPa
|
|||||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.config.camera.updater import (
|
||||||
|
CameraConfigUpdateEnum,
|
||||||
|
CameraConfigUpdateTopic,
|
||||||
|
)
|
||||||
from frigate.models import Event, Timeline
|
from frigate.models import Event, Timeline
|
||||||
from frigate.stats.prometheus import get_metrics, update_metrics
|
from frigate.stats.prometheus import get_metrics, update_metrics
|
||||||
from frigate.util.builtin import (
|
from frigate.util.builtin import (
|
||||||
clean_camera_user_pass,
|
clean_camera_user_pass,
|
||||||
|
flatten_config_data,
|
||||||
get_tz_modifiers,
|
get_tz_modifiers,
|
||||||
update_yaml_from_url,
|
process_config_query_string,
|
||||||
|
update_yaml_file_bulk,
|
||||||
)
|
)
|
||||||
from frigate.util.config import find_config_file
|
from frigate.util.config import find_config_file
|
||||||
from frigate.util.services import (
|
from frigate.util.services import (
|
||||||
@ -354,14 +361,37 @@ def config_set(request: Request, body: AppConfigSetBody):
|
|||||||
|
|
||||||
with open(config_file, "r") as f:
|
with open(config_file, "r") as f:
|
||||||
old_raw_config = f.read()
|
old_raw_config = f.read()
|
||||||
f.close()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
update_yaml_from_url(config_file, str(request.url))
|
updates = {}
|
||||||
|
|
||||||
|
# process query string parameters (takes precedence over body.config_data)
|
||||||
|
parsed_url = urllib.parse.urlparse(str(request.url))
|
||||||
|
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
|
||||||
|
|
||||||
|
# Filter out empty keys but keep blank values for non-empty keys
|
||||||
|
query_string = {k: v for k, v in query_string.items() if k}
|
||||||
|
|
||||||
|
if query_string:
|
||||||
|
updates = process_config_query_string(query_string)
|
||||||
|
elif body.config_data:
|
||||||
|
updates = flatten_config_data(body.config_data)
|
||||||
|
|
||||||
|
if not updates:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{"success": False, "message": "No configuration data provided"}
|
||||||
|
),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
# apply all updates in a single operation
|
||||||
|
update_yaml_file_bulk(config_file, updates)
|
||||||
|
|
||||||
|
# validate the updated config
|
||||||
with open(config_file, "r") as f:
|
with open(config_file, "r") as f:
|
||||||
new_raw_config = f.read()
|
new_raw_config = f.read()
|
||||||
f.close()
|
|
||||||
# Validate the config schema
|
|
||||||
try:
|
try:
|
||||||
config = FrigateConfig.parse(new_raw_config)
|
config = FrigateConfig.parse(new_raw_config)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -385,8 +415,25 @@ def config_set(request: Request, body: AppConfigSetBody):
|
|||||||
status_code=500,
|
status_code=500,
|
||||||
)
|
)
|
||||||
|
|
||||||
if body.requires_restart == 0:
|
if body.requires_restart == 0 or body.update_topic:
|
||||||
|
old_config: FrigateConfig = request.app.frigate_config
|
||||||
request.app.frigate_config = config
|
request.app.frigate_config = config
|
||||||
|
|
||||||
|
if body.update_topic and body.update_topic.startswith("config/cameras/"):
|
||||||
|
_, _, camera, field = body.update_topic.split("/")
|
||||||
|
|
||||||
|
if field == "add":
|
||||||
|
settings = config.cameras[camera]
|
||||||
|
elif field == "remove":
|
||||||
|
settings = old_config.cameras[camera]
|
||||||
|
else:
|
||||||
|
settings = config.get_nested_object(body.update_topic)
|
||||||
|
|
||||||
|
request.app.config_publisher.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
|
||||||
|
settings,
|
||||||
|
)
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
{
|
{
|
||||||
@ -685,7 +732,14 @@ def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
|
|||||||
@router.get("/recognized_license_plates")
|
@router.get("/recognized_license_plates")
|
||||||
def get_recognized_license_plates(split_joined: Optional[int] = None):
|
def get_recognized_license_plates(split_joined: Optional[int] = None):
|
||||||
try:
|
try:
|
||||||
events = Event.select(Event.data).distinct()
|
query = (
|
||||||
|
Event.select(
|
||||||
|
SQL("json_extract(data, '$.recognized_license_plate') AS plate")
|
||||||
|
)
|
||||||
|
.where(SQL("json_extract(data, '$.recognized_license_plate') IS NOT NULL"))
|
||||||
|
.distinct()
|
||||||
|
)
|
||||||
|
recognized_license_plates = [row[0] for row in query.tuples()]
|
||||||
except Exception:
|
except Exception:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
@ -694,14 +748,6 @@ def get_recognized_license_plates(split_joined: Optional[int] = None):
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
recognized_license_plates = []
|
|
||||||
for e in events:
|
|
||||||
if e.data is not None and "recognized_license_plate" in e.data:
|
|
||||||
recognized_license_plates.append(e.data["recognized_license_plate"])
|
|
||||||
|
|
||||||
while None in recognized_license_plates:
|
|
||||||
recognized_license_plates.remove(None)
|
|
||||||
|
|
||||||
if split_joined:
|
if split_joined:
|
||||||
original_recognized_license_plates = recognized_license_plates.copy()
|
original_recognized_license_plates = recognized_license_plates.copy()
|
||||||
for recognized_license_plate in original_recognized_license_plates:
|
for recognized_license_plate in original_recognized_license_plates:
|
||||||
|
|||||||
@ -14,10 +14,14 @@ from peewee import DoesNotExist
|
|||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
from frigate.api.auth import require_role
|
from frigate.api.auth import require_role
|
||||||
from frigate.api.defs.request.classification_body import RenameFaceBody
|
from frigate.api.defs.request.classification_body import (
|
||||||
|
AudioTranscriptionBody,
|
||||||
|
RenameFaceBody,
|
||||||
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
from frigate.config.camera import DetectConfig
|
from frigate.config.camera import DetectConfig
|
||||||
from frigate.const import FACE_DIR
|
from frigate.const import CLIPS_DIR, FACE_DIR
|
||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.util.path import get_event_snapshot
|
from frigate.util.path import get_event_snapshot
|
||||||
@ -384,3 +388,255 @@ def reindex_embeddings(request: Request):
|
|||||||
},
|
},
|
||||||
status_code=500,
|
status_code=500,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/audio/transcribe")
|
||||||
|
def transcribe_audio(request: Request, body: AudioTranscriptionBody):
|
||||||
|
event_id = body.event_id
|
||||||
|
|
||||||
|
try:
|
||||||
|
event = Event.get(Event.id == event_id)
|
||||||
|
except DoesNotExist:
|
||||||
|
message = f"Event {event_id} not found"
|
||||||
|
logger.error(message)
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": False, "message": message}), status_code=404
|
||||||
|
)
|
||||||
|
|
||||||
|
if not request.app.frigate_config.cameras[event.camera].audio_transcription.enabled:
|
||||||
|
message = f"Audio transcription is not enabled for {event.camera}."
|
||||||
|
logger.error(message)
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": message,
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
response = context.transcribe_audio(model_to_dict(event))
|
||||||
|
|
||||||
|
if response == "started":
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": True,
|
||||||
|
"message": "Audio transcription has started.",
|
||||||
|
},
|
||||||
|
status_code=202, # 202 Accepted
|
||||||
|
)
|
||||||
|
elif response == "in_progress":
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Audio transcription for a speech event is currently in progress. Try again later.",
|
||||||
|
},
|
||||||
|
status_code=409, # 409 Conflict
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Failed to transcribe audio.",
|
||||||
|
},
|
||||||
|
status_code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# custom classification training
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/classification/{name}/dataset")
|
||||||
|
def get_classification_dataset(name: str):
|
||||||
|
dataset_dict: dict[str, list[str]] = {}
|
||||||
|
|
||||||
|
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "dataset")
|
||||||
|
|
||||||
|
if not os.path.exists(dataset_dir):
|
||||||
|
return JSONResponse(status_code=200, content={})
|
||||||
|
|
||||||
|
for name in os.listdir(dataset_dir):
|
||||||
|
category_dir = os.path.join(dataset_dir, name)
|
||||||
|
|
||||||
|
if not os.path.isdir(category_dir):
|
||||||
|
continue
|
||||||
|
|
||||||
|
dataset_dict[name] = []
|
||||||
|
|
||||||
|
for file in filter(
|
||||||
|
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
|
||||||
|
os.listdir(category_dir),
|
||||||
|
):
|
||||||
|
dataset_dict[name].append(file)
|
||||||
|
|
||||||
|
return JSONResponse(status_code=200, content=dataset_dict)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/classification/{name}/train")
|
||||||
|
def get_classification_images(name: str):
|
||||||
|
train_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "train")
|
||||||
|
|
||||||
|
if not os.path.exists(train_dir):
|
||||||
|
return JSONResponse(status_code=200, content=[])
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=200,
|
||||||
|
content=list(
|
||||||
|
filter(
|
||||||
|
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
|
||||||
|
os.listdir(train_dir),
|
||||||
|
)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/classification/{name}/train")
|
||||||
|
async def train_configured_model(request: Request, name: str):
|
||||||
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
|
if name not in config.classification.custom:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"{name} is not a known classification model.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
context.start_classification_training(name)
|
||||||
|
return JSONResponse(
|
||||||
|
content={"success": True, "message": "Started classification model training."},
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/classification/{name}/dataset/{category}/delete",
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def delete_classification_dataset_images(
|
||||||
|
request: Request, name: str, category: str, body: dict = None
|
||||||
|
):
|
||||||
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
|
if name not in config.classification.custom:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"{name} is not a known classification model.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
json: dict[str, Any] = body or {}
|
||||||
|
list_of_ids = json.get("ids", "")
|
||||||
|
folder = os.path.join(
|
||||||
|
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category)
|
||||||
|
)
|
||||||
|
|
||||||
|
for id in list_of_ids:
|
||||||
|
file_path = os.path.join(folder, sanitize_filename(id))
|
||||||
|
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
os.unlink(file_path)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/classification/{name}/dataset/categorize",
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def categorize_classification_image(request: Request, name: str, body: dict = None):
|
||||||
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
|
if name not in config.classification.custom:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"{name} is not a known classification model.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
json: dict[str, Any] = body or {}
|
||||||
|
category = sanitize_filename(json.get("category", ""))
|
||||||
|
training_file_name = sanitize_filename(json.get("training_file", ""))
|
||||||
|
training_file = os.path.join(
|
||||||
|
CLIPS_DIR, sanitize_filename(name), "train", training_file_name
|
||||||
|
)
|
||||||
|
|
||||||
|
if training_file_name and not os.path.isfile(training_file):
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"Invalid filename or no file exists: {training_file_name}",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
new_name = f"{category}-{datetime.datetime.now().timestamp()}.png"
|
||||||
|
new_file_folder = os.path.join(
|
||||||
|
CLIPS_DIR, sanitize_filename(name), "dataset", category
|
||||||
|
)
|
||||||
|
|
||||||
|
if not os.path.exists(new_file_folder):
|
||||||
|
os.mkdir(new_file_folder)
|
||||||
|
|
||||||
|
# use opencv because webp images can not be used to train
|
||||||
|
img = cv2.imread(training_file)
|
||||||
|
cv2.imwrite(os.path.join(new_file_folder, new_name), img)
|
||||||
|
os.unlink(training_file)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/classification/{name}/train/delete",
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def delete_classification_train_images(request: Request, name: str, body: dict = None):
|
||||||
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
|
if name not in config.classification.custom:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"{name} is not a known classification model.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
json: dict[str, Any] = body or {}
|
||||||
|
list_of_ids = json.get("ids", "")
|
||||||
|
folder = os.path.join(CLIPS_DIR, sanitize_filename(name), "train")
|
||||||
|
|
||||||
|
for id in list_of_ids:
|
||||||
|
file_path = os.path.join(folder, sanitize_filename(id))
|
||||||
|
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
os.unlink(file_path)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|||||||
@ -1,7 +1,8 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional, Union
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
from pydantic.json_schema import SkipJsonSchema
|
||||||
|
|
||||||
|
|
||||||
class Extension(str, Enum):
|
class Extension(str, Enum):
|
||||||
@ -17,6 +18,7 @@ class MediaLatestFrameQueryParams(BaseModel):
|
|||||||
zones: Optional[int] = None
|
zones: Optional[int] = None
|
||||||
mask: Optional[int] = None
|
mask: Optional[int] = None
|
||||||
motion: Optional[int] = None
|
motion: Optional[int] = None
|
||||||
|
paths: Optional[int] = None
|
||||||
regions: Optional[int] = None
|
regions: Optional[int] = None
|
||||||
quality: Optional[int] = 70
|
quality: Optional[int] = 70
|
||||||
height: Optional[int] = None
|
height: Optional[int] = None
|
||||||
@ -46,3 +48,10 @@ class MediaMjpegFeedQueryParams(BaseModel):
|
|||||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||||
timezone: str = "utc"
|
timezone: str = "utc"
|
||||||
cameras: Optional[str] = "all"
|
cameras: Optional[str] = "all"
|
||||||
|
|
||||||
|
|
||||||
|
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||||
|
cameras: str = "all"
|
||||||
|
before: Union[float, SkipJsonSchema[None]] = None
|
||||||
|
after: Union[float, SkipJsonSchema[None]] = None
|
||||||
|
scale: int = 30
|
||||||
|
|||||||
@ -1,9 +1,13 @@
|
|||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from frigate.events.types import RegenerateDescriptionEnum
|
from frigate.events.types import RegenerateDescriptionEnum
|
||||||
|
|
||||||
|
|
||||||
class RegenerateQueryParameters(BaseModel):
|
class RegenerateQueryParameters(BaseModel):
|
||||||
source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails
|
source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails
|
||||||
|
force: Optional[bool] = Field(
|
||||||
|
default=False,
|
||||||
|
description="Force (re)generating the description even if GenAI is disabled for this camera.",
|
||||||
|
)
|
||||||
|
|||||||
@ -1,10 +1,12 @@
|
|||||||
from typing import Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
class AppConfigSetBody(BaseModel):
|
class AppConfigSetBody(BaseModel):
|
||||||
requires_restart: int = 1
|
requires_restart: int = 1
|
||||||
|
update_topic: str | None = None
|
||||||
|
config_data: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
|
||||||
class AppPutPasswordBody(BaseModel):
|
class AppPutPasswordBody(BaseModel):
|
||||||
|
|||||||
@ -3,3 +3,7 @@ from pydantic import BaseModel
|
|||||||
|
|
||||||
class RenameFaceBody(BaseModel):
|
class RenameFaceBody(BaseModel):
|
||||||
new_name: str
|
new_name: str
|
||||||
|
|
||||||
|
|
||||||
|
class AudioTranscriptionBody(BaseModel):
|
||||||
|
event_id: str
|
||||||
|
|||||||
@ -2,6 +2,8 @@ from typing import List, Optional, Union
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from frigate.config.classification import TriggerType
|
||||||
|
|
||||||
|
|
||||||
class EventsSubLabelBody(BaseModel):
|
class EventsSubLabelBody(BaseModel):
|
||||||
subLabel: str = Field(title="Sub label", max_length=100)
|
subLabel: str = Field(title="Sub label", max_length=100)
|
||||||
@ -45,3 +47,9 @@ class EventsDeleteBody(BaseModel):
|
|||||||
|
|
||||||
class SubmitPlusBody(BaseModel):
|
class SubmitPlusBody(BaseModel):
|
||||||
include_annotation: int = Field(default=1)
|
include_annotation: int = Field(default=1)
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerEmbeddingBody(BaseModel):
|
||||||
|
type: TriggerType
|
||||||
|
data: str
|
||||||
|
threshold: float = Field(default=0.5, ge=0.0, le=1.0)
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
"""Event apis."""
|
"""Event apis."""
|
||||||
|
|
||||||
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@ -10,9 +11,11 @@ from pathlib import Path
|
|||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
|
import numpy as np
|
||||||
from fastapi import APIRouter, Request
|
from fastapi import APIRouter, Request
|
||||||
from fastapi.params import Depends
|
from fastapi.params import Depends
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
|
from pathvalidate import sanitize_filename
|
||||||
from peewee import JOIN, DoesNotExist, fn, operator
|
from peewee import JOIN, DoesNotExist, fn, operator
|
||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
@ -34,6 +37,7 @@ from frigate.api.defs.request.events_body import (
|
|||||||
EventsLPRBody,
|
EventsLPRBody,
|
||||||
EventsSubLabelBody,
|
EventsSubLabelBody,
|
||||||
SubmitPlusBody,
|
SubmitPlusBody,
|
||||||
|
TriggerEmbeddingBody,
|
||||||
)
|
)
|
||||||
from frigate.api.defs.response.event_response import (
|
from frigate.api.defs.response.event_response import (
|
||||||
EventCreateResponse,
|
EventCreateResponse,
|
||||||
@ -44,11 +48,12 @@ from frigate.api.defs.response.event_response import (
|
|||||||
from frigate.api.defs.response.generic_response import GenericResponse
|
from frigate.api.defs.response.generic_response import GenericResponse
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
|
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
|
||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Event, ReviewSegment, Timeline
|
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||||
from frigate.track.object_processing import TrackedObject
|
from frigate.track.object_processing import TrackedObject
|
||||||
from frigate.util.builtin import get_tz_modifiers
|
from frigate.util.builtin import get_tz_modifiers
|
||||||
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -724,15 +729,24 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
|
|
||||||
if (sort is None or sort == "relevance") and search_results:
|
if (sort is None or sort == "relevance") and search_results:
|
||||||
processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
|
processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
|
||||||
elif min_score is not None and max_score is not None and sort == "score_asc":
|
elif sort == "score_asc":
|
||||||
processed_events.sort(key=lambda x: x["data"]["score"])
|
processed_events.sort(key=lambda x: x["data"]["score"])
|
||||||
elif min_score is not None and max_score is not None and sort == "score_desc":
|
elif sort == "score_desc":
|
||||||
processed_events.sort(key=lambda x: x["data"]["score"], reverse=True)
|
processed_events.sort(key=lambda x: x["data"]["score"], reverse=True)
|
||||||
elif min_speed is not None and max_speed is not None and sort == "speed_asc":
|
elif sort == "speed_asc":
|
||||||
processed_events.sort(key=lambda x: x["data"]["average_estimated_speed"])
|
|
||||||
elif min_speed is not None and max_speed is not None and sort == "speed_desc":
|
|
||||||
processed_events.sort(
|
processed_events.sort(
|
||||||
key=lambda x: x["data"]["average_estimated_speed"], reverse=True
|
key=lambda x: (
|
||||||
|
x["data"].get("average_estimated_speed") is None,
|
||||||
|
x["data"].get("average_estimated_speed"),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif sort == "speed_desc":
|
||||||
|
processed_events.sort(
|
||||||
|
key=lambda x: (
|
||||||
|
x["data"].get("average_estimated_speed") is None,
|
||||||
|
x["data"].get("average_estimated_speed", float("-inf")),
|
||||||
|
),
|
||||||
|
reverse=True,
|
||||||
)
|
)
|
||||||
elif sort == "date_asc":
|
elif sort == "date_asc":
|
||||||
processed_events.sort(key=lambda x: x["start_time"])
|
processed_events.sort(key=lambda x: x["start_time"])
|
||||||
@ -1090,7 +1104,7 @@ def set_sub_label(
|
|||||||
new_score = None
|
new_score = None
|
||||||
|
|
||||||
request.app.event_metadata_updater.publish(
|
request.app.event_metadata_updater.publish(
|
||||||
EventMetadataTypeEnum.sub_label, (event_id, new_sub_label, new_score)
|
(event_id, new_sub_label, new_score), EventMetadataTypeEnum.sub_label.value
|
||||||
)
|
)
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -1144,7 +1158,8 @@ def set_plate(
|
|||||||
new_score = None
|
new_score = None
|
||||||
|
|
||||||
request.app.event_metadata_updater.publish(
|
request.app.event_metadata_updater.publish(
|
||||||
EventMetadataTypeEnum.recognized_license_plate, (event_id, new_plate, new_score)
|
(event_id, "recognized_license_plate", new_plate, new_score),
|
||||||
|
EventMetadataTypeEnum.attribute.value,
|
||||||
)
|
)
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -1225,9 +1240,10 @@ def regenerate_description(
|
|||||||
|
|
||||||
camera_config = request.app.frigate_config.cameras[event.camera]
|
camera_config = request.app.frigate_config.cameras[event.camera]
|
||||||
|
|
||||||
if camera_config.genai.enabled:
|
if camera_config.objects.genai.enabled or params.force:
|
||||||
request.app.event_metadata_updater.publish(
|
request.app.event_metadata_updater.publish(
|
||||||
EventMetadataTypeEnum.regenerate_description, (event.id, params.source)
|
(event.id, params.source, params.force),
|
||||||
|
EventMetadataTypeEnum.regenerate_description.value,
|
||||||
)
|
)
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -1254,6 +1270,38 @@ def regenerate_description(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/description/generate",
|
||||||
|
response_model=GenericResponse,
|
||||||
|
# dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def generate_description_embedding(
|
||||||
|
request: Request,
|
||||||
|
body: EventsDescriptionBody,
|
||||||
|
):
|
||||||
|
new_description = body.description
|
||||||
|
|
||||||
|
# If semantic search is enabled, update the index
|
||||||
|
if request.app.frigate_config.semantic_search.enabled:
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
if len(new_description) > 0:
|
||||||
|
result = context.generate_description_embedding(
|
||||||
|
new_description,
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": True,
|
||||||
|
"message": f"Embedding for description is {result}"
|
||||||
|
if result
|
||||||
|
else "Failed to generate embedding",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def delete_single_event(event_id: str, request: Request) -> dict:
|
def delete_single_event(event_id: str, request: Request) -> dict:
|
||||||
try:
|
try:
|
||||||
event = Event.get(Event.id == event_id)
|
event = Event.get(Event.id == event_id)
|
||||||
@ -1352,7 +1400,6 @@ def create_event(
|
|||||||
event_id = f"{now}-{rand_id}"
|
event_id = f"{now}-{rand_id}"
|
||||||
|
|
||||||
request.app.event_metadata_updater.publish(
|
request.app.event_metadata_updater.publish(
|
||||||
EventMetadataTypeEnum.manual_event_create,
|
|
||||||
(
|
(
|
||||||
now,
|
now,
|
||||||
camera_name,
|
camera_name,
|
||||||
@ -1365,6 +1412,7 @@ def create_event(
|
|||||||
body.source_type,
|
body.source_type,
|
||||||
body.draw,
|
body.draw,
|
||||||
),
|
),
|
||||||
|
EventMetadataTypeEnum.manual_event_create.value,
|
||||||
)
|
)
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -1388,7 +1436,7 @@ def end_event(request: Request, event_id: str, body: EventsEndBody):
|
|||||||
try:
|
try:
|
||||||
end_time = body.end_time or datetime.datetime.now().timestamp()
|
end_time = body.end_time or datetime.datetime.now().timestamp()
|
||||||
request.app.event_metadata_updater.publish(
|
request.app.event_metadata_updater.publish(
|
||||||
EventMetadataTypeEnum.manual_event_end, (event_id, end_time)
|
(event_id, end_time), EventMetadataTypeEnum.manual_event_end.value
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -1402,3 +1450,423 @@ def end_event(request: Request, event_id: str, body: EventsEndBody):
|
|||||||
content=({"success": True, "message": "Event successfully ended."}),
|
content=({"success": True, "message": "Event successfully ended."}),
|
||||||
status_code=200,
|
status_code=200,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/trigger/embedding",
|
||||||
|
response_model=dict,
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def create_trigger_embedding(
|
||||||
|
request: Request,
|
||||||
|
body: TriggerEmbeddingBody,
|
||||||
|
camera: str,
|
||||||
|
name: str,
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
if not request.app.frigate_config.semantic_search.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Semantic search is not enabled",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if trigger already exists
|
||||||
|
if (
|
||||||
|
Trigger.select()
|
||||||
|
.where(Trigger.camera == camera, Trigger.name == name)
|
||||||
|
.exists()
|
||||||
|
):
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Trigger {camera}:{name} already exists",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
# Generate embedding based on type
|
||||||
|
embedding = None
|
||||||
|
if body.type == "description":
|
||||||
|
embedding = context.generate_description_embedding(body.data)
|
||||||
|
elif body.type == "thumbnail":
|
||||||
|
try:
|
||||||
|
event: Event = Event.get(Event.id == body.data)
|
||||||
|
except DoesNotExist:
|
||||||
|
# TODO: check triggers directory for image
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Failed to fetch event for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Skip the event if not an object
|
||||||
|
if event.data.get("type") != "object":
|
||||||
|
return
|
||||||
|
|
||||||
|
if thumbnail := get_event_thumbnail_bytes(event):
|
||||||
|
cursor = context.db.execute_sql(
|
||||||
|
"""
|
||||||
|
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
||||||
|
""",
|
||||||
|
[body.data],
|
||||||
|
)
|
||||||
|
|
||||||
|
row = cursor.fetchone() if cursor else None
|
||||||
|
|
||||||
|
if row:
|
||||||
|
query_embedding = row[0]
|
||||||
|
embedding = np.frombuffer(query_embedding, dtype=np.float32)
|
||||||
|
else:
|
||||||
|
# Extract valid thumbnail
|
||||||
|
thumbnail = get_event_thumbnail_bytes(event)
|
||||||
|
|
||||||
|
if thumbnail is None:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
embedding = context.generate_image_embedding(
|
||||||
|
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if embedding is None:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Failed to generate embedding for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
if body.type == "thumbnail":
|
||||||
|
# Save image to the triggers directory
|
||||||
|
try:
|
||||||
|
os.makedirs(
|
||||||
|
os.path.join(TRIGGER_DIR, sanitize_filename(camera)), exist_ok=True
|
||||||
|
)
|
||||||
|
with open(
|
||||||
|
os.path.join(
|
||||||
|
TRIGGER_DIR,
|
||||||
|
sanitize_filename(camera),
|
||||||
|
f"{sanitize_filename(body.data)}.webp",
|
||||||
|
),
|
||||||
|
"wb",
|
||||||
|
) as f:
|
||||||
|
f.write(thumbnail)
|
||||||
|
logger.debug(
|
||||||
|
f"Writing thumbnail for trigger with data {body.data} in {camera}."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
logger.error(
|
||||||
|
f"Failed to write thumbnail for trigger with data {body.data} in {camera}"
|
||||||
|
)
|
||||||
|
|
||||||
|
Trigger.create(
|
||||||
|
camera=camera,
|
||||||
|
name=name,
|
||||||
|
type=body.type,
|
||||||
|
data=body.data,
|
||||||
|
threshold=body.threshold,
|
||||||
|
model=request.app.frigate_config.semantic_search.model,
|
||||||
|
embedding=np.array(embedding, dtype=np.float32).tobytes(),
|
||||||
|
triggering_event_id="",
|
||||||
|
last_triggered=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": True,
|
||||||
|
"message": f"Trigger created successfully for {camera}:{name}",
|
||||||
|
},
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Error creating trigger embedding",
|
||||||
|
},
|
||||||
|
status_code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put(
|
||||||
|
"/trigger/embedding/{camera}/{name}",
|
||||||
|
response_model=dict,
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def update_trigger_embedding(
|
||||||
|
request: Request,
|
||||||
|
camera: str,
|
||||||
|
name: str,
|
||||||
|
body: TriggerEmbeddingBody,
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
if not request.app.frigate_config.semantic_search.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Semantic search is not enabled",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
# Generate embedding based on type
|
||||||
|
embedding = None
|
||||||
|
if body.type == "description":
|
||||||
|
embedding = context.generate_description_embedding(body.data)
|
||||||
|
elif body.type == "thumbnail":
|
||||||
|
webp_file = sanitize_filename(body.data) + ".webp"
|
||||||
|
webp_path = os.path.join(TRIGGER_DIR, sanitize_filename(camera), webp_file)
|
||||||
|
|
||||||
|
try:
|
||||||
|
event: Event = Event.get(Event.id == body.data)
|
||||||
|
# Skip the event if not an object
|
||||||
|
if event.data.get("type") != "object":
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Event {body.data} is not a tracked object for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
# Extract valid thumbnail
|
||||||
|
thumbnail = get_event_thumbnail_bytes(event)
|
||||||
|
|
||||||
|
with open(webp_path, "wb") as f:
|
||||||
|
f.write(thumbnail)
|
||||||
|
except DoesNotExist:
|
||||||
|
# check triggers directory for image
|
||||||
|
if not os.path.exists(webp_path):
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Failed to fetch event for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Load the image from the triggers directory
|
||||||
|
with open(webp_path, "rb") as f:
|
||||||
|
thumbnail = f.read()
|
||||||
|
|
||||||
|
embedding = context.generate_image_embedding(
|
||||||
|
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if embedding is None:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Failed to generate embedding for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if trigger exists for upsert
|
||||||
|
trigger = Trigger.get_or_none(Trigger.camera == camera, Trigger.name == name)
|
||||||
|
|
||||||
|
if trigger:
|
||||||
|
# Update existing trigger
|
||||||
|
if trigger.data != body.data: # Delete old thumbnail only if data changes
|
||||||
|
try:
|
||||||
|
os.remove(
|
||||||
|
os.path.join(
|
||||||
|
TRIGGER_DIR,
|
||||||
|
sanitize_filename(camera),
|
||||||
|
f"{trigger.data}.webp",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
logger.debug(
|
||||||
|
f"Deleted thumbnail for trigger with data {trigger.data} in {camera}."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
logger.error(
|
||||||
|
f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera}"
|
||||||
|
)
|
||||||
|
|
||||||
|
Trigger.update(
|
||||||
|
data=body.data,
|
||||||
|
model=request.app.frigate_config.semantic_search.model,
|
||||||
|
embedding=np.array(embedding, dtype=np.float32).tobytes(),
|
||||||
|
threshold=body.threshold,
|
||||||
|
triggering_event_id="",
|
||||||
|
last_triggered=None,
|
||||||
|
).where(Trigger.camera == camera, Trigger.name == name).execute()
|
||||||
|
else:
|
||||||
|
# Create new trigger (for rename case)
|
||||||
|
Trigger.create(
|
||||||
|
camera=camera,
|
||||||
|
name=name,
|
||||||
|
type=body.type,
|
||||||
|
data=body.data,
|
||||||
|
threshold=body.threshold,
|
||||||
|
model=request.app.frigate_config.semantic_search.model,
|
||||||
|
embedding=np.array(embedding, dtype=np.float32).tobytes(),
|
||||||
|
triggering_event_id="",
|
||||||
|
last_triggered=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if body.type == "thumbnail":
|
||||||
|
# Save image to the triggers directory
|
||||||
|
try:
|
||||||
|
camera_path = os.path.join(TRIGGER_DIR, sanitize_filename(camera))
|
||||||
|
os.makedirs(camera_path, exist_ok=True)
|
||||||
|
with open(
|
||||||
|
os.path.join(camera_path, f"{sanitize_filename(body.data)}.webp"),
|
||||||
|
"wb",
|
||||||
|
) as f:
|
||||||
|
f.write(thumbnail)
|
||||||
|
logger.debug(
|
||||||
|
f"Writing thumbnail for trigger with data {body.data} in {camera}."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
logger.error(
|
||||||
|
f"Failed to write thumbnail for trigger with data {body.data} in {camera}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": True,
|
||||||
|
"message": f"Trigger updated successfully for {camera}:{name}",
|
||||||
|
},
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Error updating trigger embedding",
|
||||||
|
},
|
||||||
|
status_code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete(
|
||||||
|
"/trigger/embedding/{camera}/{name}",
|
||||||
|
response_model=dict,
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def delete_trigger_embedding(
|
||||||
|
request: Request,
|
||||||
|
camera: str,
|
||||||
|
name: str,
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
trigger = Trigger.get_or_none(Trigger.camera == camera, Trigger.name == name)
|
||||||
|
if trigger is None:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Trigger {camera}:{name} not found",
|
||||||
|
},
|
||||||
|
status_code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
deleted = (
|
||||||
|
Trigger.delete()
|
||||||
|
.where(Trigger.camera == camera, Trigger.name == name)
|
||||||
|
.execute()
|
||||||
|
)
|
||||||
|
if deleted == 0:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Error deleting trigger {camera}:{name}",
|
||||||
|
},
|
||||||
|
status_code=401,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.remove(
|
||||||
|
os.path.join(
|
||||||
|
TRIGGER_DIR, sanitize_filename(camera), f"{trigger.data}.webp"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
logger.debug(
|
||||||
|
f"Deleted thumbnail for trigger with data {trigger.data} in {camera}."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
logger.error(
|
||||||
|
f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": True,
|
||||||
|
"message": f"Trigger deleted successfully for {camera}:{name}",
|
||||||
|
},
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e.with_traceback())
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Error deleting trigger embedding",
|
||||||
|
},
|
||||||
|
status_code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get(
|
||||||
|
"/triggers/status/{camera_name}",
|
||||||
|
response_model=dict,
|
||||||
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
|
)
|
||||||
|
def get_triggers_status(
|
||||||
|
camera_name: str,
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
# Fetch all triggers for the specified camera
|
||||||
|
triggers = Trigger.select().where(Trigger.camera == camera_name)
|
||||||
|
|
||||||
|
# Prepare the response with trigger status
|
||||||
|
status = {
|
||||||
|
trigger.name: {
|
||||||
|
"last_triggered": trigger.last_triggered.timestamp()
|
||||||
|
if trigger.last_triggered
|
||||||
|
else None,
|
||||||
|
"triggering_event_id": trigger.triggering_event_id
|
||||||
|
if trigger.triggering_event_id
|
||||||
|
else None,
|
||||||
|
}
|
||||||
|
for trigger in triggers
|
||||||
|
}
|
||||||
|
|
||||||
|
if not status:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"No triggers found for camera {camera_name}",
|
||||||
|
},
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {"success": True, "triggers": status}
|
||||||
|
except Exception as ex:
|
||||||
|
logger.exception(ex)
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": False, "message": "Error fetching trigger status"}),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|||||||
@ -1,8 +1,10 @@
|
|||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from fastapi import FastAPI, Request
|
from fastapi import FastAPI, Request
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
|
from joserfc.jwk import OctKey
|
||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
from slowapi import _rate_limit_exceeded_handler
|
from slowapi import _rate_limit_exceeded_handler
|
||||||
from slowapi.errors import RateLimitExceeded
|
from slowapi.errors import RateLimitExceeded
|
||||||
@ -26,6 +28,7 @@ from frigate.comms.event_metadata_updater import (
|
|||||||
EventMetadataPublisher,
|
EventMetadataPublisher,
|
||||||
)
|
)
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.config.camera.updater import CameraConfigUpdatePublisher
|
||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.ptz.onvif import OnvifController
|
from frigate.ptz.onvif import OnvifController
|
||||||
from frigate.stats.emitter import StatsEmitter
|
from frigate.stats.emitter import StatsEmitter
|
||||||
@ -57,6 +60,7 @@ def create_fastapi_app(
|
|||||||
onvif: OnvifController,
|
onvif: OnvifController,
|
||||||
stats_emitter: StatsEmitter,
|
stats_emitter: StatsEmitter,
|
||||||
event_metadata_updater: EventMetadataPublisher,
|
event_metadata_updater: EventMetadataPublisher,
|
||||||
|
config_publisher: CameraConfigUpdatePublisher,
|
||||||
):
|
):
|
||||||
logger.info("Starting FastAPI app")
|
logger.info("Starting FastAPI app")
|
||||||
app = FastAPI(
|
app = FastAPI(
|
||||||
@ -127,6 +131,27 @@ def create_fastapi_app(
|
|||||||
app.onvif = onvif
|
app.onvif = onvif
|
||||||
app.stats_emitter = stats_emitter
|
app.stats_emitter = stats_emitter
|
||||||
app.event_metadata_updater = event_metadata_updater
|
app.event_metadata_updater = event_metadata_updater
|
||||||
app.jwt_token = get_jwt_secret() if frigate_config.auth.enabled else None
|
app.config_publisher = config_publisher
|
||||||
|
|
||||||
|
if frigate_config.auth.enabled:
|
||||||
|
secret = get_jwt_secret()
|
||||||
|
key_bytes = None
|
||||||
|
if isinstance(secret, str):
|
||||||
|
# If the secret looks like hex (e.g., generated by secrets.token_hex), use raw bytes
|
||||||
|
if len(secret) % 2 == 0 and re.fullmatch(r"[0-9a-fA-F]+", secret or ""):
|
||||||
|
try:
|
||||||
|
key_bytes = bytes.fromhex(secret)
|
||||||
|
except ValueError:
|
||||||
|
key_bytes = secret.encode("utf-8")
|
||||||
|
else:
|
||||||
|
key_bytes = secret.encode("utf-8")
|
||||||
|
elif isinstance(secret, (bytes, bytearray)):
|
||||||
|
key_bytes = bytes(secret)
|
||||||
|
else:
|
||||||
|
key_bytes = str(secret).encode("utf-8")
|
||||||
|
|
||||||
|
app.jwt_token = OctKey.import_key(key_bytes)
|
||||||
|
else:
|
||||||
|
app.jwt_token = None
|
||||||
|
|
||||||
return app
|
return app
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import os
|
|||||||
import subprocess as sp
|
import subprocess as sp
|
||||||
import time
|
import time
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from functools import reduce
|
||||||
from pathlib import Path as FilePath
|
from pathlib import Path as FilePath
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
@ -19,7 +20,7 @@ from fastapi import APIRouter, Path, Query, Request, Response
|
|||||||
from fastapi.params import Depends
|
from fastapi.params import Depends
|
||||||
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
|
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
|
||||||
from pathvalidate import sanitize_filename
|
from pathvalidate import sanitize_filename
|
||||||
from peewee import DoesNotExist, fn
|
from peewee import DoesNotExist, fn, operator
|
||||||
from tzlocal import get_localzone_name
|
from tzlocal import get_localzone_name
|
||||||
|
|
||||||
from frigate.api.defs.query.media_query_parameters import (
|
from frigate.api.defs.query.media_query_parameters import (
|
||||||
@ -27,6 +28,7 @@ from frigate.api.defs.query.media_query_parameters import (
|
|||||||
MediaEventsSnapshotQueryParams,
|
MediaEventsSnapshotQueryParams,
|
||||||
MediaLatestFrameQueryParams,
|
MediaLatestFrameQueryParams,
|
||||||
MediaMjpegFeedQueryParams,
|
MediaMjpegFeedQueryParams,
|
||||||
|
MediaRecordingsAvailabilityQueryParams,
|
||||||
MediaRecordingsSummaryQueryParams,
|
MediaRecordingsSummaryQueryParams,
|
||||||
)
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
@ -139,6 +141,7 @@ def latest_frame(
|
|||||||
"zones": params.zones,
|
"zones": params.zones,
|
||||||
"mask": params.mask,
|
"mask": params.mask,
|
||||||
"motion_boxes": params.motion,
|
"motion_boxes": params.motion,
|
||||||
|
"paths": params.paths,
|
||||||
"regions": params.regions,
|
"regions": params.regions,
|
||||||
}
|
}
|
||||||
quality = params.quality
|
quality = params.quality
|
||||||
@ -542,6 +545,66 @@ def recordings(
|
|||||||
return JSONResponse(content=list(recordings))
|
return JSONResponse(content=list(recordings))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/recordings/unavailable", response_model=list[dict])
|
||||||
|
def no_recordings(params: MediaRecordingsAvailabilityQueryParams = Depends()):
|
||||||
|
"""Get time ranges with no recordings."""
|
||||||
|
cameras = params.cameras
|
||||||
|
before = params.before or datetime.datetime.now().timestamp()
|
||||||
|
after = (
|
||||||
|
params.after
|
||||||
|
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||||
|
)
|
||||||
|
scale = params.scale
|
||||||
|
|
||||||
|
clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)]
|
||||||
|
if cameras != "all":
|
||||||
|
camera_list = cameras.split(",")
|
||||||
|
clauses.append((Recordings.camera << camera_list))
|
||||||
|
|
||||||
|
# Get recording start times
|
||||||
|
data: list[Recordings] = (
|
||||||
|
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||||
|
.where(reduce(operator.and_, clauses))
|
||||||
|
.order_by(Recordings.start_time.asc())
|
||||||
|
.dicts()
|
||||||
|
.iterator()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert recordings to list of (start, end) tuples
|
||||||
|
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||||
|
|
||||||
|
# Generate all time segments
|
||||||
|
current = after
|
||||||
|
no_recording_segments = []
|
||||||
|
current_start = None
|
||||||
|
|
||||||
|
while current < before:
|
||||||
|
segment_end = current + scale
|
||||||
|
# Check if segment overlaps with any recording
|
||||||
|
has_recording = any(
|
||||||
|
start <= segment_end and end >= current for start, end in recordings
|
||||||
|
)
|
||||||
|
if not has_recording:
|
||||||
|
if current_start is None:
|
||||||
|
current_start = current # Start a new gap
|
||||||
|
else:
|
||||||
|
if current_start is not None:
|
||||||
|
# End the current gap and append it
|
||||||
|
no_recording_segments.append(
|
||||||
|
{"start_time": int(current_start), "end_time": int(current)}
|
||||||
|
)
|
||||||
|
current_start = None
|
||||||
|
current = segment_end
|
||||||
|
|
||||||
|
# Append the last gap if it exists
|
||||||
|
if current_start is not None:
|
||||||
|
no_recording_segments.append(
|
||||||
|
{"start_time": int(current_start), "end_time": int(before)}
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(content=no_recording_segments)
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
|
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
|
||||||
description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.",
|
description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.",
|
||||||
|
|||||||
@ -21,7 +21,12 @@ router = APIRouter(tags=[Tags.notifications])
|
|||||||
|
|
||||||
@router.get("/notifications/pubkey")
|
@router.get("/notifications/pubkey")
|
||||||
def get_vapid_pub_key(request: Request):
|
def get_vapid_pub_key(request: Request):
|
||||||
if not request.app.frigate_config.notifications.enabled:
|
config = request.app.frigate_config
|
||||||
|
notifications_enabled = config.notifications.enabled
|
||||||
|
camera_notifications_enabled = [
|
||||||
|
c for c in config.cameras.values() if c.enabled and c.notifications.enabled
|
||||||
|
]
|
||||||
|
if not (notifications_enabled or camera_notifications_enabled):
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=({"success": False, "message": "Notifications are not enabled."}),
|
content=({"success": False, "message": "Notifications are not enabled."}),
|
||||||
status_code=400,
|
status_code=400,
|
||||||
|
|||||||
@ -6,7 +6,7 @@ from functools import reduce
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from fastapi import APIRouter
|
from fastapi import APIRouter, Request
|
||||||
from fastapi.params import Depends
|
from fastapi.params import Depends
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
from peewee import Case, DoesNotExist, IntegrityError, fn, operator
|
from peewee import Case, DoesNotExist, IntegrityError, fn, operator
|
||||||
@ -26,6 +26,8 @@ from frigate.api.defs.response.review_response import (
|
|||||||
ReviewSummaryResponse,
|
ReviewSummaryResponse,
|
||||||
)
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||||
from frigate.review.types import SeverityEnum
|
from frigate.review.types import SeverityEnum
|
||||||
from frigate.util.builtin import get_tz_modifiers
|
from frigate.util.builtin import get_tz_modifiers
|
||||||
@ -606,3 +608,35 @@ async def set_not_reviewed(
|
|||||||
content=({"success": True, "message": f"Set Review {review_id} as not viewed"}),
|
content=({"success": True, "message": f"Set Review {review_id} as not viewed"}),
|
||||||
status_code=200,
|
status_code=200,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/review/summarize/start/{start_ts}/end/{end_ts}",
|
||||||
|
description="Use GenAI to summarize review items over a period of time.",
|
||||||
|
)
|
||||||
|
def generate_review_summary(request: Request, start_ts: float, end_ts: float):
|
||||||
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
|
if not config.genai.provider:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": "GenAI must be configured to use this feature.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
summary = context.generate_review_summary(start_ts, end_ts)
|
||||||
|
|
||||||
|
if summary:
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": True, "summary": summary}), status_code=200
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": False, "message": "Failed to create summary."}),
|
||||||
|
status_code=500,
|
||||||
|
)
|
||||||
|
|||||||
273
frigate/app.py
273
frigate/app.py
@ -5,6 +5,7 @@ import os
|
|||||||
import secrets
|
import secrets
|
||||||
import shutil
|
import shutil
|
||||||
from multiprocessing import Queue
|
from multiprocessing import Queue
|
||||||
|
from multiprocessing.managers import DictProxy, SyncManager
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
@ -14,19 +15,20 @@ import uvicorn
|
|||||||
from peewee_migrate import Router
|
from peewee_migrate import Router
|
||||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||||
|
|
||||||
import frigate.util as util
|
|
||||||
from frigate.api.auth import hash_password
|
from frigate.api.auth import hash_password
|
||||||
from frigate.api.fastapi_app import create_fastapi_app
|
from frigate.api.fastapi_app import create_fastapi_app
|
||||||
from frigate.camera import CameraMetrics, PTZMetrics
|
from frigate.camera import CameraMetrics, PTZMetrics
|
||||||
|
from frigate.camera.maintainer import CameraMaintainer
|
||||||
from frigate.comms.base_communicator import Communicator
|
from frigate.comms.base_communicator import Communicator
|
||||||
from frigate.comms.config_updater import ConfigPublisher
|
|
||||||
from frigate.comms.dispatcher import Dispatcher
|
from frigate.comms.dispatcher import Dispatcher
|
||||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||||
from frigate.comms.inter_process import InterProcessCommunicator
|
from frigate.comms.inter_process import InterProcessCommunicator
|
||||||
from frigate.comms.mqtt import MqttClient
|
from frigate.comms.mqtt import MqttClient
|
||||||
|
from frigate.comms.object_detector_signaler import DetectorProxy
|
||||||
from frigate.comms.webpush import WebPushClient
|
from frigate.comms.webpush import WebPushClient
|
||||||
from frigate.comms.ws import WebSocketClient
|
from frigate.comms.ws import WebSocketClient
|
||||||
from frigate.comms.zmq_proxy import ZmqProxy
|
from frigate.comms.zmq_proxy import ZmqProxy
|
||||||
|
from frigate.config.camera.updater import CameraConfigUpdatePublisher
|
||||||
from frigate.config.config import FrigateConfig
|
from frigate.config.config import FrigateConfig
|
||||||
from frigate.const import (
|
from frigate.const import (
|
||||||
CACHE_DIR,
|
CACHE_DIR,
|
||||||
@ -36,12 +38,12 @@ from frigate.const import (
|
|||||||
FACE_DIR,
|
FACE_DIR,
|
||||||
MODEL_CACHE_DIR,
|
MODEL_CACHE_DIR,
|
||||||
RECORD_DIR,
|
RECORD_DIR,
|
||||||
SHM_FRAMES_VAR,
|
|
||||||
THUMB_DIR,
|
THUMB_DIR,
|
||||||
|
TRIGGER_DIR,
|
||||||
)
|
)
|
||||||
from frigate.data_processing.types import DataProcessorMetrics
|
from frigate.data_processing.types import DataProcessorMetrics
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
|
||||||
from frigate.events.audio import AudioProcessor
|
from frigate.events.audio import AudioProcessor
|
||||||
from frigate.events.cleanup import EventCleanup
|
from frigate.events.cleanup import EventCleanup
|
||||||
from frigate.events.maintainer import EventProcessor
|
from frigate.events.maintainer import EventProcessor
|
||||||
@ -55,56 +57,58 @@ from frigate.models import (
|
|||||||
Regions,
|
Regions,
|
||||||
ReviewSegment,
|
ReviewSegment,
|
||||||
Timeline,
|
Timeline,
|
||||||
|
Trigger,
|
||||||
User,
|
User,
|
||||||
)
|
)
|
||||||
from frigate.object_detection.base import ObjectDetectProcess
|
from frigate.object_detection.base import ObjectDetectProcess
|
||||||
from frigate.output.output import output_frames
|
from frigate.output.output import OutputProcess
|
||||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||||
from frigate.ptz.onvif import OnvifController
|
from frigate.ptz.onvif import OnvifController
|
||||||
from frigate.record.cleanup import RecordingCleanup
|
from frigate.record.cleanup import RecordingCleanup
|
||||||
from frigate.record.export import migrate_exports
|
from frigate.record.export import migrate_exports
|
||||||
from frigate.record.record import manage_recordings
|
from frigate.record.record import RecordProcess
|
||||||
from frigate.review.review import manage_review_segments
|
from frigate.review.review import ReviewProcess
|
||||||
from frigate.stats.emitter import StatsEmitter
|
from frigate.stats.emitter import StatsEmitter
|
||||||
from frigate.stats.util import stats_init
|
from frigate.stats.util import stats_init
|
||||||
from frigate.storage import StorageMaintainer
|
from frigate.storage import StorageMaintainer
|
||||||
from frigate.timeline import TimelineProcessor
|
from frigate.timeline import TimelineProcessor
|
||||||
from frigate.track.object_processing import TrackedObjectProcessor
|
from frigate.track.object_processing import TrackedObjectProcessor
|
||||||
from frigate.util.builtin import empty_and_close_queue
|
from frigate.util.builtin import empty_and_close_queue
|
||||||
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
from frigate.util.image import UntrackedSharedMemory
|
||||||
from frigate.util.object import get_camera_regions_grid
|
|
||||||
from frigate.util.services import set_file_limit
|
from frigate.util.services import set_file_limit
|
||||||
from frigate.version import VERSION
|
from frigate.version import VERSION
|
||||||
from frigate.video import capture_camera, track_camera
|
|
||||||
from frigate.watchdog import FrigateWatchdog
|
from frigate.watchdog import FrigateWatchdog
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FrigateApp:
|
class FrigateApp:
|
||||||
def __init__(self, config: FrigateConfig) -> None:
|
def __init__(
|
||||||
|
self, config: FrigateConfig, manager: SyncManager, stop_event: MpEvent
|
||||||
|
) -> None:
|
||||||
|
self.metrics_manager = manager
|
||||||
self.audio_process: Optional[mp.Process] = None
|
self.audio_process: Optional[mp.Process] = None
|
||||||
self.stop_event: MpEvent = mp.Event()
|
self.stop_event = stop_event
|
||||||
self.detection_queue: Queue = mp.Queue()
|
self.detection_queue: Queue = mp.Queue()
|
||||||
self.detectors: dict[str, ObjectDetectProcess] = {}
|
self.detectors: dict[str, ObjectDetectProcess] = {}
|
||||||
self.detection_out_events: dict[str, MpEvent] = {}
|
|
||||||
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
||||||
self.log_queue: Queue = mp.Queue()
|
self.log_queue: Queue = mp.Queue()
|
||||||
self.camera_metrics: dict[str, CameraMetrics] = {}
|
self.camera_metrics: DictProxy = self.metrics_manager.dict()
|
||||||
self.embeddings_metrics: DataProcessorMetrics | None = (
|
self.embeddings_metrics: DataProcessorMetrics | None = (
|
||||||
DataProcessorMetrics()
|
DataProcessorMetrics(
|
||||||
|
self.metrics_manager, list(config.classification.custom.keys())
|
||||||
|
)
|
||||||
if (
|
if (
|
||||||
config.semantic_search.enabled
|
config.semantic_search.enabled
|
||||||
or config.lpr.enabled
|
or config.lpr.enabled
|
||||||
or config.face_recognition.enabled
|
or config.face_recognition.enabled
|
||||||
|
or len(config.classification.custom) > 0
|
||||||
)
|
)
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
||||||
self.processes: dict[str, int] = {}
|
self.processes: dict[str, int] = {}
|
||||||
self.embeddings: Optional[EmbeddingsContext] = None
|
self.embeddings: Optional[EmbeddingsContext] = None
|
||||||
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
|
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
def ensure_dirs(self) -> None:
|
def ensure_dirs(self) -> None:
|
||||||
@ -121,6 +125,9 @@ class FrigateApp:
|
|||||||
if self.config.face_recognition.enabled:
|
if self.config.face_recognition.enabled:
|
||||||
dirs.append(FACE_DIR)
|
dirs.append(FACE_DIR)
|
||||||
|
|
||||||
|
if self.config.semantic_search.enabled:
|
||||||
|
dirs.append(TRIGGER_DIR)
|
||||||
|
|
||||||
for d in dirs:
|
for d in dirs:
|
||||||
if not os.path.exists(d) and not os.path.islink(d):
|
if not os.path.exists(d) and not os.path.islink(d):
|
||||||
logger.info(f"Creating directory: {d}")
|
logger.info(f"Creating directory: {d}")
|
||||||
@ -131,7 +138,7 @@ class FrigateApp:
|
|||||||
def init_camera_metrics(self) -> None:
|
def init_camera_metrics(self) -> None:
|
||||||
# create camera_metrics
|
# create camera_metrics
|
||||||
for camera_name in self.config.cameras.keys():
|
for camera_name in self.config.cameras.keys():
|
||||||
self.camera_metrics[camera_name] = CameraMetrics()
|
self.camera_metrics[camera_name] = CameraMetrics(self.metrics_manager)
|
||||||
self.ptz_metrics[camera_name] = PTZMetrics(
|
self.ptz_metrics[camera_name] = PTZMetrics(
|
||||||
autotracker_enabled=self.config.cameras[
|
autotracker_enabled=self.config.cameras[
|
||||||
camera_name
|
camera_name
|
||||||
@ -140,8 +147,16 @@ class FrigateApp:
|
|||||||
|
|
||||||
def init_queues(self) -> None:
|
def init_queues(self) -> None:
|
||||||
# Queue for cameras to push tracked objects to
|
# Queue for cameras to push tracked objects to
|
||||||
|
# leaving room for 2 extra cameras to be added
|
||||||
self.detected_frames_queue: Queue = mp.Queue(
|
self.detected_frames_queue: Queue = mp.Queue(
|
||||||
maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2
|
maxsize=(
|
||||||
|
sum(
|
||||||
|
camera.enabled_in_config == True
|
||||||
|
for camera in self.config.cameras.values()
|
||||||
|
)
|
||||||
|
+ 2
|
||||||
|
)
|
||||||
|
* 2
|
||||||
)
|
)
|
||||||
|
|
||||||
# Queue for timeline events
|
# Queue for timeline events
|
||||||
@ -217,51 +232,24 @@ class FrigateApp:
|
|||||||
self.processes["go2rtc"] = proc.info["pid"]
|
self.processes["go2rtc"] = proc.info["pid"]
|
||||||
|
|
||||||
def init_recording_manager(self) -> None:
|
def init_recording_manager(self) -> None:
|
||||||
recording_process = util.Process(
|
recording_process = RecordProcess(self.config, self.stop_event)
|
||||||
target=manage_recordings,
|
|
||||||
name="recording_manager",
|
|
||||||
args=(self.config,),
|
|
||||||
)
|
|
||||||
recording_process.daemon = True
|
|
||||||
self.recording_process = recording_process
|
self.recording_process = recording_process
|
||||||
recording_process.start()
|
recording_process.start()
|
||||||
self.processes["recording"] = recording_process.pid or 0
|
self.processes["recording"] = recording_process.pid or 0
|
||||||
logger.info(f"Recording process started: {recording_process.pid}")
|
logger.info(f"Recording process started: {recording_process.pid}")
|
||||||
|
|
||||||
def init_review_segment_manager(self) -> None:
|
def init_review_segment_manager(self) -> None:
|
||||||
review_segment_process = util.Process(
|
review_segment_process = ReviewProcess(self.config, self.stop_event)
|
||||||
target=manage_review_segments,
|
|
||||||
name="review_segment_manager",
|
|
||||||
args=(self.config,),
|
|
||||||
)
|
|
||||||
review_segment_process.daemon = True
|
|
||||||
self.review_segment_process = review_segment_process
|
self.review_segment_process = review_segment_process
|
||||||
review_segment_process.start()
|
review_segment_process.start()
|
||||||
self.processes["review_segment"] = review_segment_process.pid or 0
|
self.processes["review_segment"] = review_segment_process.pid or 0
|
||||||
logger.info(f"Review process started: {review_segment_process.pid}")
|
logger.info(f"Review process started: {review_segment_process.pid}")
|
||||||
|
|
||||||
def init_embeddings_manager(self) -> None:
|
def init_embeddings_manager(self) -> None:
|
||||||
genai_cameras = [
|
# always start the embeddings process
|
||||||
c for c in self.config.cameras.values() if c.enabled and c.genai.enabled
|
embedding_process = EmbeddingProcess(
|
||||||
]
|
self.config, self.embeddings_metrics, self.stop_event
|
||||||
|
|
||||||
if (
|
|
||||||
not self.config.semantic_search.enabled
|
|
||||||
and not genai_cameras
|
|
||||||
and not self.config.lpr.enabled
|
|
||||||
and not self.config.face_recognition.enabled
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
embedding_process = util.Process(
|
|
||||||
target=manage_embeddings,
|
|
||||||
name="embeddings_manager",
|
|
||||||
args=(
|
|
||||||
self.config,
|
|
||||||
self.embeddings_metrics,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
embedding_process.daemon = True
|
|
||||||
self.embedding_process = embedding_process
|
self.embedding_process = embedding_process
|
||||||
embedding_process.start()
|
embedding_process.start()
|
||||||
self.processes["embeddings"] = embedding_process.pid or 0
|
self.processes["embeddings"] = embedding_process.pid or 0
|
||||||
@ -278,7 +266,9 @@ class FrigateApp:
|
|||||||
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
|
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
|
||||||
},
|
},
|
||||||
timeout=max(
|
timeout=max(
|
||||||
60, 10 * len([c for c in self.config.cameras.values() if c.enabled])
|
60,
|
||||||
|
10
|
||||||
|
* len([c for c in self.config.cameras.values() if c.enabled_in_config]),
|
||||||
),
|
),
|
||||||
load_vec_extension=self.config.semantic_search.enabled,
|
load_vec_extension=self.config.semantic_search.enabled,
|
||||||
)
|
)
|
||||||
@ -292,6 +282,7 @@ class FrigateApp:
|
|||||||
ReviewSegment,
|
ReviewSegment,
|
||||||
Timeline,
|
Timeline,
|
||||||
User,
|
User,
|
||||||
|
Trigger,
|
||||||
]
|
]
|
||||||
self.db.bind(models)
|
self.db.bind(models)
|
||||||
|
|
||||||
@ -307,24 +298,15 @@ class FrigateApp:
|
|||||||
migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys()))
|
migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys()))
|
||||||
|
|
||||||
def init_embeddings_client(self) -> None:
|
def init_embeddings_client(self) -> None:
|
||||||
genai_cameras = [
|
# Create a client for other processes to use
|
||||||
c for c in self.config.cameras.values() if c.enabled and c.genai.enabled
|
self.embeddings = EmbeddingsContext(self.db)
|
||||||
]
|
|
||||||
|
|
||||||
if (
|
|
||||||
self.config.semantic_search.enabled
|
|
||||||
or self.config.lpr.enabled
|
|
||||||
or genai_cameras
|
|
||||||
or self.config.face_recognition.enabled
|
|
||||||
):
|
|
||||||
# Create a client for other processes to use
|
|
||||||
self.embeddings = EmbeddingsContext(self.db)
|
|
||||||
|
|
||||||
def init_inter_process_communicator(self) -> None:
|
def init_inter_process_communicator(self) -> None:
|
||||||
self.inter_process_communicator = InterProcessCommunicator()
|
self.inter_process_communicator = InterProcessCommunicator()
|
||||||
self.inter_config_updater = ConfigPublisher()
|
self.inter_config_updater = CameraConfigUpdatePublisher()
|
||||||
self.event_metadata_updater = EventMetadataPublisher()
|
self.event_metadata_updater = EventMetadataPublisher()
|
||||||
self.inter_zmq_proxy = ZmqProxy()
|
self.inter_zmq_proxy = ZmqProxy()
|
||||||
|
self.detection_proxy = DetectorProxy()
|
||||||
|
|
||||||
def init_onvif(self) -> None:
|
def init_onvif(self) -> None:
|
||||||
self.onvif_controller = OnvifController(self.config, self.ptz_metrics)
|
self.onvif_controller = OnvifController(self.config, self.ptz_metrics)
|
||||||
@ -357,8 +339,6 @@ class FrigateApp:
|
|||||||
|
|
||||||
def start_detectors(self) -> None:
|
def start_detectors(self) -> None:
|
||||||
for name in self.config.cameras.keys():
|
for name in self.config.cameras.keys():
|
||||||
self.detection_out_events[name] = mp.Event()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
largest_frame = max(
|
largest_frame = max(
|
||||||
[
|
[
|
||||||
@ -390,8 +370,10 @@ class FrigateApp:
|
|||||||
self.detectors[name] = ObjectDetectProcess(
|
self.detectors[name] = ObjectDetectProcess(
|
||||||
name,
|
name,
|
||||||
self.detection_queue,
|
self.detection_queue,
|
||||||
self.detection_out_events,
|
list(self.config.cameras.keys()),
|
||||||
|
self.config,
|
||||||
detector_config,
|
detector_config,
|
||||||
|
self.stop_event,
|
||||||
)
|
)
|
||||||
|
|
||||||
def start_ptz_autotracker(self) -> None:
|
def start_ptz_autotracker(self) -> None:
|
||||||
@ -415,79 +397,22 @@ class FrigateApp:
|
|||||||
self.detected_frames_processor.start()
|
self.detected_frames_processor.start()
|
||||||
|
|
||||||
def start_video_output_processor(self) -> None:
|
def start_video_output_processor(self) -> None:
|
||||||
output_processor = util.Process(
|
output_processor = OutputProcess(self.config, self.stop_event)
|
||||||
target=output_frames,
|
|
||||||
name="output_processor",
|
|
||||||
args=(self.config,),
|
|
||||||
)
|
|
||||||
output_processor.daemon = True
|
|
||||||
self.output_processor = output_processor
|
self.output_processor = output_processor
|
||||||
output_processor.start()
|
output_processor.start()
|
||||||
logger.info(f"Output process started: {output_processor.pid}")
|
logger.info(f"Output process started: {output_processor.pid}")
|
||||||
|
|
||||||
def init_historical_regions(self) -> None:
|
def start_camera_processor(self) -> None:
|
||||||
# delete region grids for removed or renamed cameras
|
self.camera_maintainer = CameraMaintainer(
|
||||||
cameras = list(self.config.cameras.keys())
|
self.config,
|
||||||
Regions.delete().where(~(Regions.camera << cameras)).execute()
|
self.detection_queue,
|
||||||
|
self.detected_frames_queue,
|
||||||
# create or update region grids for each camera
|
self.camera_metrics,
|
||||||
for camera in self.config.cameras.values():
|
self.ptz_metrics,
|
||||||
assert camera.name is not None
|
self.stop_event,
|
||||||
self.region_grids[camera.name] = get_camera_regions_grid(
|
self.metrics_manager,
|
||||||
camera.name,
|
)
|
||||||
camera.detect,
|
self.camera_maintainer.start()
|
||||||
max(self.config.model.width, self.config.model.height),
|
|
||||||
)
|
|
||||||
|
|
||||||
def start_camera_processors(self) -> None:
|
|
||||||
for name, config in self.config.cameras.items():
|
|
||||||
if not self.config.cameras[name].enabled_in_config:
|
|
||||||
logger.info(f"Camera processor not started for disabled camera {name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
camera_process = util.Process(
|
|
||||||
target=track_camera,
|
|
||||||
name=f"camera_processor:{name}",
|
|
||||||
args=(
|
|
||||||
name,
|
|
||||||
config,
|
|
||||||
self.config.model,
|
|
||||||
self.config.model.merged_labelmap,
|
|
||||||
self.detection_queue,
|
|
||||||
self.detection_out_events[name],
|
|
||||||
self.detected_frames_queue,
|
|
||||||
self.camera_metrics[name],
|
|
||||||
self.ptz_metrics[name],
|
|
||||||
self.region_grids[name],
|
|
||||||
),
|
|
||||||
daemon=True,
|
|
||||||
)
|
|
||||||
self.camera_metrics[name].process = camera_process
|
|
||||||
camera_process.start()
|
|
||||||
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
|
|
||||||
|
|
||||||
def start_camera_capture_processes(self) -> None:
|
|
||||||
shm_frame_count = self.shm_frame_count()
|
|
||||||
|
|
||||||
for name, config in self.config.cameras.items():
|
|
||||||
if not self.config.cameras[name].enabled_in_config:
|
|
||||||
logger.info(f"Capture process not started for disabled camera {name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# pre-create shms
|
|
||||||
for i in range(shm_frame_count):
|
|
||||||
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
|
||||||
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
|
|
||||||
|
|
||||||
capture_process = util.Process(
|
|
||||||
target=capture_camera,
|
|
||||||
name=f"camera_capture:{name}",
|
|
||||||
args=(name, config, shm_frame_count, self.camera_metrics[name]),
|
|
||||||
)
|
|
||||||
capture_process.daemon = True
|
|
||||||
self.camera_metrics[name].capture_process = capture_process
|
|
||||||
capture_process.start()
|
|
||||||
logger.info(f"Capture process started for {name}: {capture_process.pid}")
|
|
||||||
|
|
||||||
def start_audio_processor(self) -> None:
|
def start_audio_processor(self) -> None:
|
||||||
audio_cameras = [
|
audio_cameras = [
|
||||||
@ -497,7 +422,9 @@ class FrigateApp:
|
|||||||
]
|
]
|
||||||
|
|
||||||
if audio_cameras:
|
if audio_cameras:
|
||||||
self.audio_process = AudioProcessor(audio_cameras, self.camera_metrics)
|
self.audio_process = AudioProcessor(
|
||||||
|
self.config, audio_cameras, self.camera_metrics, self.stop_event
|
||||||
|
)
|
||||||
self.audio_process.start()
|
self.audio_process.start()
|
||||||
self.processes["audio_detector"] = self.audio_process.pid or 0
|
self.processes["audio_detector"] = self.audio_process.pid or 0
|
||||||
|
|
||||||
@ -545,45 +472,6 @@ class FrigateApp:
|
|||||||
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
|
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
|
||||||
self.frigate_watchdog.start()
|
self.frigate_watchdog.start()
|
||||||
|
|
||||||
def shm_frame_count(self) -> int:
|
|
||||||
total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1)
|
|
||||||
|
|
||||||
# required for log files + nginx cache
|
|
||||||
min_req_shm = 40 + 10
|
|
||||||
|
|
||||||
if self.config.birdseye.restream:
|
|
||||||
min_req_shm += 8
|
|
||||||
|
|
||||||
available_shm = total_shm - min_req_shm
|
|
||||||
cam_total_frame_size = 0.0
|
|
||||||
|
|
||||||
for camera in self.config.cameras.values():
|
|
||||||
if camera.enabled and camera.detect.width and camera.detect.height:
|
|
||||||
cam_total_frame_size += round(
|
|
||||||
(camera.detect.width * camera.detect.height * 1.5 + 270480)
|
|
||||||
/ 1048576,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
|
|
||||||
if cam_total_frame_size == 0.0:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
shm_frame_count = min(
|
|
||||||
int(os.environ.get(SHM_FRAMES_VAR, "50")),
|
|
||||||
int(available_shm / (cam_total_frame_size)),
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
|
|
||||||
)
|
|
||||||
|
|
||||||
if shm_frame_count < 20:
|
|
||||||
logger.warning(
|
|
||||||
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB."
|
|
||||||
)
|
|
||||||
|
|
||||||
return shm_frame_count
|
|
||||||
|
|
||||||
def init_auth(self) -> None:
|
def init_auth(self) -> None:
|
||||||
if self.config.auth.enabled:
|
if self.config.auth.enabled:
|
||||||
if User.select().count() == 0:
|
if User.select().count() == 0:
|
||||||
@ -644,19 +532,17 @@ class FrigateApp:
|
|||||||
self.init_recording_manager()
|
self.init_recording_manager()
|
||||||
self.init_review_segment_manager()
|
self.init_review_segment_manager()
|
||||||
self.init_go2rtc()
|
self.init_go2rtc()
|
||||||
self.start_detectors()
|
|
||||||
self.init_embeddings_manager()
|
self.init_embeddings_manager()
|
||||||
self.bind_database()
|
self.bind_database()
|
||||||
self.check_db_data_migrations()
|
self.check_db_data_migrations()
|
||||||
self.init_inter_process_communicator()
|
self.init_inter_process_communicator()
|
||||||
|
self.start_detectors()
|
||||||
self.init_dispatcher()
|
self.init_dispatcher()
|
||||||
self.init_embeddings_client()
|
self.init_embeddings_client()
|
||||||
self.start_video_output_processor()
|
self.start_video_output_processor()
|
||||||
self.start_ptz_autotracker()
|
self.start_ptz_autotracker()
|
||||||
self.init_historical_regions()
|
|
||||||
self.start_detected_frames_processor()
|
self.start_detected_frames_processor()
|
||||||
self.start_camera_processors()
|
self.start_camera_processor()
|
||||||
self.start_camera_capture_processes()
|
|
||||||
self.start_audio_processor()
|
self.start_audio_processor()
|
||||||
self.start_storage_maintainer()
|
self.start_storage_maintainer()
|
||||||
self.start_stats_emitter()
|
self.start_stats_emitter()
|
||||||
@ -679,6 +565,7 @@ class FrigateApp:
|
|||||||
self.onvif_controller,
|
self.onvif_controller,
|
||||||
self.stats_emitter,
|
self.stats_emitter,
|
||||||
self.event_metadata_updater,
|
self.event_metadata_updater,
|
||||||
|
self.inter_config_updater,
|
||||||
),
|
),
|
||||||
host="127.0.0.1",
|
host="127.0.0.1",
|
||||||
port=5001,
|
port=5001,
|
||||||
@ -712,24 +599,6 @@ class FrigateApp:
|
|||||||
if self.onvif_controller:
|
if self.onvif_controller:
|
||||||
self.onvif_controller.close()
|
self.onvif_controller.close()
|
||||||
|
|
||||||
# ensure the capture processes are done
|
|
||||||
for camera, metrics in self.camera_metrics.items():
|
|
||||||
capture_process = metrics.capture_process
|
|
||||||
if capture_process is not None:
|
|
||||||
logger.info(f"Waiting for capture process for {camera} to stop")
|
|
||||||
capture_process.terminate()
|
|
||||||
capture_process.join()
|
|
||||||
|
|
||||||
# ensure the camera processors are done
|
|
||||||
for camera, metrics in self.camera_metrics.items():
|
|
||||||
camera_process = metrics.process
|
|
||||||
if camera_process is not None:
|
|
||||||
logger.info(f"Waiting for process for {camera} to stop")
|
|
||||||
camera_process.terminate()
|
|
||||||
camera_process.join()
|
|
||||||
logger.info(f"Closing frame queue for {camera}")
|
|
||||||
empty_and_close_queue(metrics.frame_queue)
|
|
||||||
|
|
||||||
# ensure the detectors are done
|
# ensure the detectors are done
|
||||||
for detector in self.detectors.values():
|
for detector in self.detectors.values():
|
||||||
detector.stop()
|
detector.stop()
|
||||||
@ -773,14 +642,12 @@ class FrigateApp:
|
|||||||
self.inter_config_updater.stop()
|
self.inter_config_updater.stop()
|
||||||
self.event_metadata_updater.stop()
|
self.event_metadata_updater.stop()
|
||||||
self.inter_zmq_proxy.stop()
|
self.inter_zmq_proxy.stop()
|
||||||
|
self.detection_proxy.stop()
|
||||||
|
|
||||||
self.frame_manager.cleanup()
|
|
||||||
while len(self.detection_shms) > 0:
|
while len(self.detection_shms) > 0:
|
||||||
shm = self.detection_shms.pop()
|
shm = self.detection_shms.pop()
|
||||||
shm.close()
|
shm.close()
|
||||||
shm.unlink()
|
shm.unlink()
|
||||||
|
|
||||||
# exit the mp Manager process
|
|
||||||
_stop_logging()
|
_stop_logging()
|
||||||
|
self.metrics_manager.shutdown()
|
||||||
os._exit(os.EX_OK)
|
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
from multiprocessing.managers import SyncManager
|
||||||
from multiprocessing.sharedctypes import Synchronized
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
from multiprocessing.synchronize import Event
|
from multiprocessing.synchronize import Event
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
|
||||||
class CameraMetrics:
|
class CameraMetrics:
|
||||||
@ -16,25 +16,25 @@ class CameraMetrics:
|
|||||||
|
|
||||||
frame_queue: mp.Queue
|
frame_queue: mp.Queue
|
||||||
|
|
||||||
process: Optional[mp.Process]
|
process_pid: Synchronized
|
||||||
capture_process: Optional[mp.Process]
|
capture_process_pid: Synchronized
|
||||||
ffmpeg_pid: Synchronized
|
ffmpeg_pid: Synchronized
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, manager: SyncManager):
|
||||||
self.camera_fps = mp.Value("d", 0)
|
self.camera_fps = manager.Value("d", 0)
|
||||||
self.detection_fps = mp.Value("d", 0)
|
self.detection_fps = manager.Value("d", 0)
|
||||||
self.detection_frame = mp.Value("d", 0)
|
self.detection_frame = manager.Value("d", 0)
|
||||||
self.process_fps = mp.Value("d", 0)
|
self.process_fps = manager.Value("d", 0)
|
||||||
self.skipped_fps = mp.Value("d", 0)
|
self.skipped_fps = manager.Value("d", 0)
|
||||||
self.read_start = mp.Value("d", 0)
|
self.read_start = manager.Value("d", 0)
|
||||||
self.audio_rms = mp.Value("d", 0)
|
self.audio_rms = manager.Value("d", 0)
|
||||||
self.audio_dBFS = mp.Value("d", 0)
|
self.audio_dBFS = manager.Value("d", 0)
|
||||||
|
|
||||||
self.frame_queue = mp.Queue(maxsize=2)
|
self.frame_queue = manager.Queue(maxsize=2)
|
||||||
|
|
||||||
self.process = None
|
self.process_pid = manager.Value("i", 0)
|
||||||
self.capture_process = None
|
self.capture_process_pid = manager.Value("i", 0)
|
||||||
self.ffmpeg_pid = mp.Value("i", 0)
|
self.ffmpeg_pid = manager.Value("i", 0)
|
||||||
|
|
||||||
|
|
||||||
class PTZMetrics:
|
class PTZMetrics:
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
from collections import Counter
|
from collections import Counter
|
||||||
from typing import Any, Callable
|
from typing import Any, Callable
|
||||||
|
|
||||||
from frigate.config.config import FrigateConfig
|
from frigate.config import CameraConfig, FrigateConfig
|
||||||
|
|
||||||
|
|
||||||
class CameraActivityManager:
|
class CameraActivityManager:
|
||||||
@ -23,26 +23,33 @@ class CameraActivityManager:
|
|||||||
if not camera_config.enabled_in_config:
|
if not camera_config.enabled_in_config:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.last_camera_activity[camera_config.name] = {}
|
self.__init_camera(camera_config)
|
||||||
self.camera_all_object_counts[camera_config.name] = Counter()
|
|
||||||
self.camera_active_object_counts[camera_config.name] = Counter()
|
|
||||||
|
|
||||||
for zone, zone_config in camera_config.zones.items():
|
def __init_camera(self, camera_config: CameraConfig) -> None:
|
||||||
if zone not in self.all_zone_labels:
|
self.last_camera_activity[camera_config.name] = {}
|
||||||
self.zone_all_object_counts[zone] = Counter()
|
self.camera_all_object_counts[camera_config.name] = Counter()
|
||||||
self.zone_active_object_counts[zone] = Counter()
|
self.camera_active_object_counts[camera_config.name] = Counter()
|
||||||
self.all_zone_labels[zone] = set()
|
|
||||||
|
|
||||||
self.all_zone_labels[zone].update(
|
for zone, zone_config in camera_config.zones.items():
|
||||||
zone_config.objects
|
if zone not in self.all_zone_labels:
|
||||||
if zone_config.objects
|
self.zone_all_object_counts[zone] = Counter()
|
||||||
else camera_config.objects.track
|
self.zone_active_object_counts[zone] = Counter()
|
||||||
)
|
self.all_zone_labels[zone] = set()
|
||||||
|
|
||||||
|
self.all_zone_labels[zone].update(
|
||||||
|
zone_config.objects
|
||||||
|
if zone_config.objects
|
||||||
|
else camera_config.objects.track
|
||||||
|
)
|
||||||
|
|
||||||
def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None:
|
def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None:
|
||||||
all_objects: list[dict[str, Any]] = []
|
all_objects: list[dict[str, Any]] = []
|
||||||
|
|
||||||
for camera in new_activity.keys():
|
for camera in new_activity.keys():
|
||||||
|
# handle cameras that were added dynamically
|
||||||
|
if camera not in self.camera_all_object_counts:
|
||||||
|
self.__init_camera(self.config.cameras[camera])
|
||||||
|
|
||||||
new_objects = new_activity[camera].get("objects", [])
|
new_objects = new_activity[camera].get("objects", [])
|
||||||
all_objects.extend(new_objects)
|
all_objects.extend(new_objects)
|
||||||
|
|
||||||
|
|||||||
250
frigate/camera/maintainer.py
Normal file
250
frigate/camera/maintainer.py
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
"""Create and maintain camera processes / management."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import threading
|
||||||
|
from multiprocessing import Queue
|
||||||
|
from multiprocessing.managers import DictProxy, SyncManager
|
||||||
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
|
|
||||||
|
from frigate.camera import CameraMetrics, PTZMetrics
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.config.camera import CameraConfig
|
||||||
|
from frigate.config.camera.updater import (
|
||||||
|
CameraConfigUpdateEnum,
|
||||||
|
CameraConfigUpdateSubscriber,
|
||||||
|
)
|
||||||
|
from frigate.const import SHM_FRAMES_VAR
|
||||||
|
from frigate.models import Regions
|
||||||
|
from frigate.util.builtin import empty_and_close_queue
|
||||||
|
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||||
|
from frigate.util.object import get_camera_regions_grid
|
||||||
|
from frigate.video import CameraCapture, CameraTracker
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CameraMaintainer(threading.Thread):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
detection_queue: Queue,
|
||||||
|
detected_frames_queue: Queue,
|
||||||
|
camera_metrics: DictProxy,
|
||||||
|
ptz_metrics: dict[str, PTZMetrics],
|
||||||
|
stop_event: MpEvent,
|
||||||
|
metrics_manager: SyncManager,
|
||||||
|
):
|
||||||
|
super().__init__(name="camera_processor")
|
||||||
|
self.config = config
|
||||||
|
self.detection_queue = detection_queue
|
||||||
|
self.detected_frames_queue = detected_frames_queue
|
||||||
|
self.stop_event = stop_event
|
||||||
|
self.camera_metrics = camera_metrics
|
||||||
|
self.ptz_metrics = ptz_metrics
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
|
||||||
|
self.update_subscriber = CameraConfigUpdateSubscriber(
|
||||||
|
self.config,
|
||||||
|
{},
|
||||||
|
[
|
||||||
|
CameraConfigUpdateEnum.add,
|
||||||
|
CameraConfigUpdateEnum.remove,
|
||||||
|
],
|
||||||
|
)
|
||||||
|
self.shm_count = self.__calculate_shm_frame_count()
|
||||||
|
self.camera_processes: dict[str, mp.Process] = {}
|
||||||
|
self.capture_processes: dict[str, mp.Process] = {}
|
||||||
|
self.metrics_manager = metrics_manager
|
||||||
|
|
||||||
|
def __init_historical_regions(self) -> None:
|
||||||
|
# delete region grids for removed or renamed cameras
|
||||||
|
cameras = list(self.config.cameras.keys())
|
||||||
|
Regions.delete().where(~(Regions.camera << cameras)).execute()
|
||||||
|
|
||||||
|
# create or update region grids for each camera
|
||||||
|
for camera in self.config.cameras.values():
|
||||||
|
assert camera.name is not None
|
||||||
|
self.region_grids[camera.name] = get_camera_regions_grid(
|
||||||
|
camera.name,
|
||||||
|
camera.detect,
|
||||||
|
max(self.config.model.width, self.config.model.height),
|
||||||
|
)
|
||||||
|
|
||||||
|
def __calculate_shm_frame_count(self) -> int:
|
||||||
|
total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1)
|
||||||
|
|
||||||
|
# required for log files + nginx cache
|
||||||
|
min_req_shm = 40 + 10
|
||||||
|
|
||||||
|
if self.config.birdseye.restream:
|
||||||
|
min_req_shm += 8
|
||||||
|
|
||||||
|
available_shm = total_shm - min_req_shm
|
||||||
|
cam_total_frame_size = 0.0
|
||||||
|
|
||||||
|
for camera in self.config.cameras.values():
|
||||||
|
if (
|
||||||
|
camera.enabled_in_config
|
||||||
|
and camera.detect.width
|
||||||
|
and camera.detect.height
|
||||||
|
):
|
||||||
|
cam_total_frame_size += round(
|
||||||
|
(camera.detect.width * camera.detect.height * 1.5 + 270480)
|
||||||
|
/ 1048576,
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
|
||||||
|
# leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them.
|
||||||
|
cam_total_frame_size += 2 * round(
|
||||||
|
(1280 * 720 * 1.5 + 270480) / 1048576,
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cam_total_frame_size == 0.0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
shm_frame_count = min(
|
||||||
|
int(os.environ.get(SHM_FRAMES_VAR, "50")),
|
||||||
|
int(available_shm / (cam_total_frame_size)),
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
|
||||||
|
)
|
||||||
|
|
||||||
|
if shm_frame_count < 20:
|
||||||
|
logger.warning(
|
||||||
|
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB."
|
||||||
|
)
|
||||||
|
|
||||||
|
return shm_frame_count
|
||||||
|
|
||||||
|
def __start_camera_processor(
|
||||||
|
self, name: str, config: CameraConfig, runtime: bool = False
|
||||||
|
) -> None:
|
||||||
|
if not config.enabled_in_config:
|
||||||
|
logger.info(f"Camera processor not started for disabled camera {name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if runtime:
|
||||||
|
self.camera_metrics[name] = CameraMetrics(self.metrics_manager)
|
||||||
|
self.ptz_metrics[name] = PTZMetrics(autotracker_enabled=False)
|
||||||
|
self.region_grids[name] = get_camera_regions_grid(
|
||||||
|
name,
|
||||||
|
config.detect,
|
||||||
|
max(self.config.model.width, self.config.model.height),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
largest_frame = max(
|
||||||
|
[
|
||||||
|
det.model.height * det.model.width * 3
|
||||||
|
if det.model is not None
|
||||||
|
else 320
|
||||||
|
for det in self.config.detectors.values()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
UntrackedSharedMemory(name=f"out-{name}", create=True, size=20 * 6 * 4)
|
||||||
|
UntrackedSharedMemory(
|
||||||
|
name=name,
|
||||||
|
create=True,
|
||||||
|
size=largest_frame,
|
||||||
|
)
|
||||||
|
except FileExistsError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
camera_process = CameraTracker(
|
||||||
|
config,
|
||||||
|
self.config.model,
|
||||||
|
self.config.model.merged_labelmap,
|
||||||
|
self.detection_queue,
|
||||||
|
self.detected_frames_queue,
|
||||||
|
self.camera_metrics[name],
|
||||||
|
self.ptz_metrics[name],
|
||||||
|
self.region_grids[name],
|
||||||
|
self.stop_event,
|
||||||
|
)
|
||||||
|
self.camera_processes[config.name] = camera_process
|
||||||
|
camera_process.start()
|
||||||
|
self.camera_metrics[config.name].process_pid.value = camera_process.pid
|
||||||
|
logger.info(f"Camera processor started for {config.name}: {camera_process.pid}")
|
||||||
|
|
||||||
|
def __start_camera_capture(
|
||||||
|
self, name: str, config: CameraConfig, runtime: bool = False
|
||||||
|
) -> None:
|
||||||
|
if not config.enabled_in_config:
|
||||||
|
logger.info(f"Capture process not started for disabled camera {name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# pre-create shms
|
||||||
|
count = 10 if runtime else self.shm_count
|
||||||
|
for i in range(count):
|
||||||
|
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
||||||
|
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
|
||||||
|
|
||||||
|
capture_process = CameraCapture(
|
||||||
|
config, count, self.camera_metrics[name], self.stop_event
|
||||||
|
)
|
||||||
|
capture_process.daemon = True
|
||||||
|
self.capture_processes[name] = capture_process
|
||||||
|
capture_process.start()
|
||||||
|
self.camera_metrics[name].capture_process_pid.value = capture_process.pid
|
||||||
|
logger.info(f"Capture process started for {name}: {capture_process.pid}")
|
||||||
|
|
||||||
|
def __stop_camera_capture_process(self, camera: str) -> None:
|
||||||
|
capture_process = self.capture_processes[camera]
|
||||||
|
if capture_process is not None:
|
||||||
|
logger.info(f"Waiting for capture process for {camera} to stop")
|
||||||
|
capture_process.terminate()
|
||||||
|
capture_process.join()
|
||||||
|
|
||||||
|
def __stop_camera_process(self, camera: str) -> None:
|
||||||
|
camera_process = self.camera_processes[camera]
|
||||||
|
if camera_process is not None:
|
||||||
|
logger.info(f"Waiting for process for {camera} to stop")
|
||||||
|
camera_process.terminate()
|
||||||
|
camera_process.join()
|
||||||
|
logger.info(f"Closing frame queue for {camera}")
|
||||||
|
empty_and_close_queue(self.camera_metrics[camera].frame_queue)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.__init_historical_regions()
|
||||||
|
|
||||||
|
# start camera processes
|
||||||
|
for camera, config in self.config.cameras.items():
|
||||||
|
self.__start_camera_processor(camera, config)
|
||||||
|
self.__start_camera_capture(camera, config)
|
||||||
|
|
||||||
|
while not self.stop_event.wait(1):
|
||||||
|
updates = self.update_subscriber.check_for_updates()
|
||||||
|
|
||||||
|
for update_type, updated_cameras in updates.items():
|
||||||
|
if update_type == CameraConfigUpdateEnum.add.name:
|
||||||
|
for camera in updated_cameras:
|
||||||
|
self.__start_camera_processor(
|
||||||
|
camera,
|
||||||
|
self.update_subscriber.camera_configs[camera],
|
||||||
|
runtime=True,
|
||||||
|
)
|
||||||
|
self.__start_camera_capture(
|
||||||
|
camera,
|
||||||
|
self.update_subscriber.camera_configs[camera],
|
||||||
|
runtime=True,
|
||||||
|
)
|
||||||
|
elif update_type == CameraConfigUpdateEnum.remove.name:
|
||||||
|
self.__stop_camera_capture_process(camera)
|
||||||
|
self.__stop_camera_process(camera)
|
||||||
|
|
||||||
|
# ensure the capture processes are done
|
||||||
|
for camera in self.camera_processes.keys():
|
||||||
|
self.__stop_camera_capture_process(camera)
|
||||||
|
|
||||||
|
# ensure the camera processors are done
|
||||||
|
for camera in self.capture_processes.keys():
|
||||||
|
self.__stop_camera_process(camera)
|
||||||
|
|
||||||
|
self.update_subscriber.stop()
|
||||||
|
self.frame_manager.cleanup()
|
||||||
@ -54,7 +54,7 @@ class CameraState:
|
|||||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||||
self.prev_enabled = self.camera_config.enabled
|
self.prev_enabled = self.camera_config.enabled
|
||||||
|
|
||||||
def get_current_frame(self, draw_options: dict[str, Any] = {}):
|
def get_current_frame(self, draw_options: dict[str, Any] = {}) -> np.ndarray:
|
||||||
with self.current_frame_lock:
|
with self.current_frame_lock:
|
||||||
frame_copy = np.copy(self._current_frame)
|
frame_copy = np.copy(self._current_frame)
|
||||||
frame_time = self.current_frame_time
|
frame_time = self.current_frame_time
|
||||||
@ -228,12 +228,51 @@ class CameraState:
|
|||||||
position=self.camera_config.timestamp_style.position,
|
position=self.camera_config.timestamp_style.position,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if draw_options.get("paths"):
|
||||||
|
for obj in tracked_objects.values():
|
||||||
|
if obj["frame_time"] == frame_time and obj["path_data"]:
|
||||||
|
color = self.config.model.colormap.get(
|
||||||
|
obj["label"], (255, 255, 255)
|
||||||
|
)
|
||||||
|
|
||||||
|
path_points = [
|
||||||
|
(
|
||||||
|
int(point[0][0] * self.camera_config.detect.width),
|
||||||
|
int(point[0][1] * self.camera_config.detect.height),
|
||||||
|
)
|
||||||
|
for point in obj["path_data"]
|
||||||
|
]
|
||||||
|
|
||||||
|
for point in path_points:
|
||||||
|
cv2.circle(frame_copy, point, 5, color, -1)
|
||||||
|
|
||||||
|
for i in range(1, len(path_points)):
|
||||||
|
cv2.line(
|
||||||
|
frame_copy,
|
||||||
|
path_points[i - 1],
|
||||||
|
path_points[i],
|
||||||
|
color,
|
||||||
|
2,
|
||||||
|
)
|
||||||
|
|
||||||
|
bottom_center = (
|
||||||
|
int((obj["box"][0] + obj["box"][2]) / 2),
|
||||||
|
int(obj["box"][3]),
|
||||||
|
)
|
||||||
|
cv2.line(
|
||||||
|
frame_copy,
|
||||||
|
path_points[-1],
|
||||||
|
bottom_center,
|
||||||
|
color,
|
||||||
|
2,
|
||||||
|
)
|
||||||
|
|
||||||
return frame_copy
|
return frame_copy
|
||||||
|
|
||||||
def finished(self, obj_id):
|
def finished(self, obj_id):
|
||||||
del self.tracked_objects[obj_id]
|
del self.tracked_objects[obj_id]
|
||||||
|
|
||||||
def on(self, event_type: str, callback: Callable[[dict], None]):
|
def on(self, event_type: str, callback: Callable):
|
||||||
self.callbacks[event_type].append(callback)
|
self.callbacks[event_type].append(callback)
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
|
|||||||
@ -1,8 +1,9 @@
|
|||||||
"""Facilitates communication between processes."""
|
"""Facilitates communication between processes."""
|
||||||
|
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
from _pickle import UnpicklingError
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from typing import Any, Optional
|
from typing import Any
|
||||||
|
|
||||||
import zmq
|
import zmq
|
||||||
|
|
||||||
@ -32,7 +33,7 @@ class ConfigPublisher:
|
|||||||
class ConfigSubscriber:
|
class ConfigSubscriber:
|
||||||
"""Simplifies receiving an updated config."""
|
"""Simplifies receiving an updated config."""
|
||||||
|
|
||||||
def __init__(self, topic: str, exact=False) -> None:
|
def __init__(self, topic: str, exact: bool = False) -> None:
|
||||||
self.topic = topic
|
self.topic = topic
|
||||||
self.exact = exact
|
self.exact = exact
|
||||||
self.context = zmq.Context()
|
self.context = zmq.Context()
|
||||||
@ -40,7 +41,7 @@ class ConfigSubscriber:
|
|||||||
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic)
|
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic)
|
||||||
self.socket.connect(SOCKET_PUB_SUB)
|
self.socket.connect(SOCKET_PUB_SUB)
|
||||||
|
|
||||||
def check_for_update(self) -> Optional[tuple[str, Any]]:
|
def check_for_update(self) -> tuple[str, Any] | tuple[None, None]:
|
||||||
"""Returns updated config or None if no update."""
|
"""Returns updated config or None if no update."""
|
||||||
try:
|
try:
|
||||||
topic = self.socket.recv_string(flags=zmq.NOBLOCK)
|
topic = self.socket.recv_string(flags=zmq.NOBLOCK)
|
||||||
@ -50,7 +51,7 @@ class ConfigSubscriber:
|
|||||||
return (topic, obj)
|
return (topic, obj)
|
||||||
else:
|
else:
|
||||||
return (None, None)
|
return (None, None)
|
||||||
except zmq.ZMQError:
|
except (zmq.ZMQError, UnicodeDecodeError, UnpicklingError):
|
||||||
return (None, None)
|
return (None, None)
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
"""Facilitates communication between processes."""
|
"""Facilitates communication between processes."""
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Optional
|
from typing import Any
|
||||||
|
|
||||||
from .zmq_proxy import Publisher, Subscriber
|
from .zmq_proxy import Publisher, Subscriber
|
||||||
|
|
||||||
@ -19,8 +19,7 @@ class DetectionPublisher(Publisher):
|
|||||||
|
|
||||||
topic_base = "detection/"
|
topic_base = "detection/"
|
||||||
|
|
||||||
def __init__(self, topic: DetectionTypeEnum) -> None:
|
def __init__(self, topic: str) -> None:
|
||||||
topic = topic.value
|
|
||||||
super().__init__(topic)
|
super().__init__(topic)
|
||||||
|
|
||||||
|
|
||||||
@ -29,16 +28,15 @@ class DetectionSubscriber(Subscriber):
|
|||||||
|
|
||||||
topic_base = "detection/"
|
topic_base = "detection/"
|
||||||
|
|
||||||
def __init__(self, topic: DetectionTypeEnum) -> None:
|
def __init__(self, topic: str) -> None:
|
||||||
topic = topic.value
|
|
||||||
super().__init__(topic)
|
super().__init__(topic)
|
||||||
|
|
||||||
def check_for_update(
|
def check_for_update(
|
||||||
self, timeout: float = None
|
self, timeout: float | None = None
|
||||||
) -> Optional[tuple[DetectionTypeEnum, Any]]:
|
) -> tuple[str, Any] | tuple[None, None] | None:
|
||||||
return super().check_for_update(timeout)
|
return super().check_for_update(timeout)
|
||||||
|
|
||||||
def _return_object(self, topic: str, payload: Any) -> Any:
|
def _return_object(self, topic: str, payload: Any) -> Any:
|
||||||
if payload is None:
|
if payload is None:
|
||||||
return (None, None)
|
return (None, None)
|
||||||
return (DetectionTypeEnum[topic[len(self.topic_base) :]], payload)
|
return (topic[len(self.topic_base) :], payload)
|
||||||
|
|||||||
@ -3,24 +3,30 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, Callable, Optional
|
from typing import Any, Callable, Optional, cast
|
||||||
|
|
||||||
from frigate.camera import PTZMetrics
|
from frigate.camera import PTZMetrics
|
||||||
from frigate.camera.activity_manager import CameraActivityManager
|
from frigate.camera.activity_manager import CameraActivityManager
|
||||||
from frigate.comms.base_communicator import Communicator
|
from frigate.comms.base_communicator import Communicator
|
||||||
from frigate.comms.config_updater import ConfigPublisher
|
|
||||||
from frigate.comms.webpush import WebPushClient
|
from frigate.comms.webpush import WebPushClient
|
||||||
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
||||||
|
from frigate.config.camera.updater import (
|
||||||
|
CameraConfigUpdateEnum,
|
||||||
|
CameraConfigUpdatePublisher,
|
||||||
|
CameraConfigUpdateTopic,
|
||||||
|
)
|
||||||
from frigate.const import (
|
from frigate.const import (
|
||||||
CLEAR_ONGOING_REVIEW_SEGMENTS,
|
CLEAR_ONGOING_REVIEW_SEGMENTS,
|
||||||
INSERT_MANY_RECORDINGS,
|
INSERT_MANY_RECORDINGS,
|
||||||
INSERT_PREVIEW,
|
INSERT_PREVIEW,
|
||||||
NOTIFICATION_TEST,
|
NOTIFICATION_TEST,
|
||||||
REQUEST_REGION_GRID,
|
REQUEST_REGION_GRID,
|
||||||
|
UPDATE_BIRDSEYE_LAYOUT,
|
||||||
UPDATE_CAMERA_ACTIVITY,
|
UPDATE_CAMERA_ACTIVITY,
|
||||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||||
UPDATE_EVENT_DESCRIPTION,
|
UPDATE_EVENT_DESCRIPTION,
|
||||||
UPDATE_MODEL_STATE,
|
UPDATE_MODEL_STATE,
|
||||||
|
UPDATE_REVIEW_DESCRIPTION,
|
||||||
UPSERT_REVIEW_SEGMENT,
|
UPSERT_REVIEW_SEGMENT,
|
||||||
)
|
)
|
||||||
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
||||||
@ -38,7 +44,7 @@ class Dispatcher:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
config_updater: ConfigPublisher,
|
config_updater: CameraConfigUpdatePublisher,
|
||||||
onvif: OnvifController,
|
onvif: OnvifController,
|
||||||
ptz_metrics: dict[str, PTZMetrics],
|
ptz_metrics: dict[str, PTZMetrics],
|
||||||
communicators: list[Communicator],
|
communicators: list[Communicator],
|
||||||
@ -49,11 +55,12 @@ class Dispatcher:
|
|||||||
self.ptz_metrics = ptz_metrics
|
self.ptz_metrics = ptz_metrics
|
||||||
self.comms = communicators
|
self.comms = communicators
|
||||||
self.camera_activity = CameraActivityManager(config, self.publish)
|
self.camera_activity = CameraActivityManager(config, self.publish)
|
||||||
self.model_state = {}
|
self.model_state: dict[str, ModelStatusTypesEnum] = {}
|
||||||
self.embeddings_reindex = {}
|
self.embeddings_reindex: dict[str, Any] = {}
|
||||||
|
self.birdseye_layout: dict[str, Any] = {}
|
||||||
self._camera_settings_handlers: dict[str, Callable] = {
|
self._camera_settings_handlers: dict[str, Callable] = {
|
||||||
"audio": self._on_audio_command,
|
"audio": self._on_audio_command,
|
||||||
|
"audio_transcription": self._on_audio_transcription_command,
|
||||||
"detect": self._on_detect_command,
|
"detect": self._on_detect_command,
|
||||||
"enabled": self._on_enabled_command,
|
"enabled": self._on_enabled_command,
|
||||||
"improve_contrast": self._on_motion_improve_contrast_command,
|
"improve_contrast": self._on_motion_improve_contrast_command,
|
||||||
@ -68,6 +75,8 @@ class Dispatcher:
|
|||||||
"birdseye_mode": self._on_birdseye_mode_command,
|
"birdseye_mode": self._on_birdseye_mode_command,
|
||||||
"review_alerts": self._on_alerts_command,
|
"review_alerts": self._on_alerts_command,
|
||||||
"review_detections": self._on_detections_command,
|
"review_detections": self._on_detections_command,
|
||||||
|
"object_descriptions": self._on_object_description_command,
|
||||||
|
"review_descriptions": self._on_review_description_command,
|
||||||
}
|
}
|
||||||
self._global_settings_handlers: dict[str, Callable] = {
|
self._global_settings_handlers: dict[str, Callable] = {
|
||||||
"notifications": self._on_global_notification_command,
|
"notifications": self._on_global_notification_command,
|
||||||
@ -80,10 +89,12 @@ class Dispatcher:
|
|||||||
(comm for comm in communicators if isinstance(comm, WebPushClient)), None
|
(comm for comm in communicators if isinstance(comm, WebPushClient)), None
|
||||||
)
|
)
|
||||||
|
|
||||||
def _receive(self, topic: str, payload: str) -> Optional[Any]:
|
def _receive(self, topic: str, payload: Any) -> Optional[Any]:
|
||||||
"""Handle receiving of payload from communicators."""
|
"""Handle receiving of payload from communicators."""
|
||||||
|
|
||||||
def handle_camera_command(command_type, camera_name, command, payload):
|
def handle_camera_command(
|
||||||
|
command_type: str, camera_name: str, command: str, payload: str
|
||||||
|
) -> None:
|
||||||
try:
|
try:
|
||||||
if command_type == "set":
|
if command_type == "set":
|
||||||
self._camera_settings_handlers[command](camera_name, payload)
|
self._camera_settings_handlers[command](camera_name, payload)
|
||||||
@ -92,13 +103,13 @@ class Dispatcher:
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
logger.error(f"Invalid command type or handler: {command_type}")
|
logger.error(f"Invalid command type or handler: {command_type}")
|
||||||
|
|
||||||
def handle_restart():
|
def handle_restart() -> None:
|
||||||
restart_frigate()
|
restart_frigate()
|
||||||
|
|
||||||
def handle_insert_many_recordings():
|
def handle_insert_many_recordings() -> None:
|
||||||
Recordings.insert_many(payload).execute()
|
Recordings.insert_many(payload).execute()
|
||||||
|
|
||||||
def handle_request_region_grid():
|
def handle_request_region_grid() -> Any:
|
||||||
camera = payload
|
camera = payload
|
||||||
grid = get_camera_regions_grid(
|
grid = get_camera_regions_grid(
|
||||||
camera,
|
camera,
|
||||||
@ -107,26 +118,26 @@ class Dispatcher:
|
|||||||
)
|
)
|
||||||
return grid
|
return grid
|
||||||
|
|
||||||
def handle_insert_preview():
|
def handle_insert_preview() -> None:
|
||||||
Previews.insert(payload).execute()
|
Previews.insert(payload).execute()
|
||||||
|
|
||||||
def handle_upsert_review_segment():
|
def handle_upsert_review_segment() -> None:
|
||||||
ReviewSegment.insert(payload).on_conflict(
|
ReviewSegment.insert(payload).on_conflict(
|
||||||
conflict_target=[ReviewSegment.id],
|
conflict_target=[ReviewSegment.id],
|
||||||
update=payload,
|
update=payload,
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
def handle_clear_ongoing_review_segments():
|
def handle_clear_ongoing_review_segments() -> None:
|
||||||
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
|
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
|
||||||
ReviewSegment.end_time.is_null(True)
|
ReviewSegment.end_time.is_null(True)
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
def handle_update_camera_activity():
|
def handle_update_camera_activity() -> None:
|
||||||
self.camera_activity.update_activity(payload)
|
self.camera_activity.update_activity(payload)
|
||||||
|
|
||||||
def handle_update_event_description():
|
def handle_update_event_description() -> None:
|
||||||
event: Event = Event.get(Event.id == payload["id"])
|
event: Event = Event.get(Event.id == payload["id"])
|
||||||
event.data["description"] = payload["description"]
|
cast(dict, event.data)["description"] = payload["description"]
|
||||||
event.save()
|
event.save()
|
||||||
self.publish(
|
self.publish(
|
||||||
"tracked_object_update",
|
"tracked_object_update",
|
||||||
@ -140,30 +151,46 @@ class Dispatcher:
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_update_model_state():
|
def handle_update_review_description() -> None:
|
||||||
|
final_data = payload["after"]
|
||||||
|
ReviewSegment.insert(final_data).on_conflict(
|
||||||
|
conflict_target=[ReviewSegment.id],
|
||||||
|
update=final_data,
|
||||||
|
).execute()
|
||||||
|
self.publish("reviews", json.dumps(payload))
|
||||||
|
|
||||||
|
def handle_update_model_state() -> None:
|
||||||
if payload:
|
if payload:
|
||||||
model = payload["model"]
|
model = payload["model"]
|
||||||
state = payload["state"]
|
state = payload["state"]
|
||||||
self.model_state[model] = ModelStatusTypesEnum[state]
|
self.model_state[model] = ModelStatusTypesEnum[state]
|
||||||
self.publish("model_state", json.dumps(self.model_state))
|
self.publish("model_state", json.dumps(self.model_state))
|
||||||
|
|
||||||
def handle_model_state():
|
def handle_model_state() -> None:
|
||||||
self.publish("model_state", json.dumps(self.model_state.copy()))
|
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||||
|
|
||||||
def handle_update_embeddings_reindex_progress():
|
def handle_update_embeddings_reindex_progress() -> None:
|
||||||
self.embeddings_reindex = payload
|
self.embeddings_reindex = payload
|
||||||
self.publish(
|
self.publish(
|
||||||
"embeddings_reindex_progress",
|
"embeddings_reindex_progress",
|
||||||
json.dumps(payload),
|
json.dumps(payload),
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_embeddings_reindex_progress():
|
def handle_embeddings_reindex_progress() -> None:
|
||||||
self.publish(
|
self.publish(
|
||||||
"embeddings_reindex_progress",
|
"embeddings_reindex_progress",
|
||||||
json.dumps(self.embeddings_reindex.copy()),
|
json.dumps(self.embeddings_reindex.copy()),
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_on_connect():
|
def handle_update_birdseye_layout() -> None:
|
||||||
|
if payload:
|
||||||
|
self.birdseye_layout = payload
|
||||||
|
self.publish("birdseye_layout", json.dumps(self.birdseye_layout))
|
||||||
|
|
||||||
|
def handle_birdseye_layout() -> None:
|
||||||
|
self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy()))
|
||||||
|
|
||||||
|
def handle_on_connect() -> None:
|
||||||
camera_status = self.camera_activity.last_camera_activity.copy()
|
camera_status = self.camera_activity.last_camera_activity.copy()
|
||||||
cameras_with_status = camera_status.keys()
|
cameras_with_status = camera_status.keys()
|
||||||
|
|
||||||
@ -177,6 +204,9 @@ class Dispatcher:
|
|||||||
"snapshots": self.config.cameras[camera].snapshots.enabled,
|
"snapshots": self.config.cameras[camera].snapshots.enabled,
|
||||||
"record": self.config.cameras[camera].record.enabled,
|
"record": self.config.cameras[camera].record.enabled,
|
||||||
"audio": self.config.cameras[camera].audio.enabled,
|
"audio": self.config.cameras[camera].audio.enabled,
|
||||||
|
"audio_transcription": self.config.cameras[
|
||||||
|
camera
|
||||||
|
].audio_transcription.live_enabled,
|
||||||
"notifications": self.config.cameras[camera].notifications.enabled,
|
"notifications": self.config.cameras[camera].notifications.enabled,
|
||||||
"notifications_suspended": int(
|
"notifications_suspended": int(
|
||||||
self.web_push_client.suspended_cameras.get(camera, 0)
|
self.web_push_client.suspended_cameras.get(camera, 0)
|
||||||
@ -189,6 +219,12 @@ class Dispatcher:
|
|||||||
].onvif.autotracking.enabled,
|
].onvif.autotracking.enabled,
|
||||||
"alerts": self.config.cameras[camera].review.alerts.enabled,
|
"alerts": self.config.cameras[camera].review.alerts.enabled,
|
||||||
"detections": self.config.cameras[camera].review.detections.enabled,
|
"detections": self.config.cameras[camera].review.detections.enabled,
|
||||||
|
"object_descriptions": self.config.cameras[
|
||||||
|
camera
|
||||||
|
].objects.genai.enabled,
|
||||||
|
"review_descriptions": self.config.cameras[
|
||||||
|
camera
|
||||||
|
].review.genai.enabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.publish("camera_activity", json.dumps(camera_status))
|
self.publish("camera_activity", json.dumps(camera_status))
|
||||||
@ -197,8 +233,9 @@ class Dispatcher:
|
|||||||
"embeddings_reindex_progress",
|
"embeddings_reindex_progress",
|
||||||
json.dumps(self.embeddings_reindex.copy()),
|
json.dumps(self.embeddings_reindex.copy()),
|
||||||
)
|
)
|
||||||
|
self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy()))
|
||||||
|
|
||||||
def handle_notification_test():
|
def handle_notification_test() -> None:
|
||||||
self.publish("notification_test", "Test notification")
|
self.publish("notification_test", "Test notification")
|
||||||
|
|
||||||
# Dictionary mapping topic to handlers
|
# Dictionary mapping topic to handlers
|
||||||
@ -210,12 +247,15 @@ class Dispatcher:
|
|||||||
CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
|
CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
|
||||||
UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
|
UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
|
||||||
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
|
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
|
||||||
|
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
|
||||||
UPDATE_MODEL_STATE: handle_update_model_state,
|
UPDATE_MODEL_STATE: handle_update_model_state,
|
||||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
||||||
|
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
|
||||||
NOTIFICATION_TEST: handle_notification_test,
|
NOTIFICATION_TEST: handle_notification_test,
|
||||||
"restart": handle_restart,
|
"restart": handle_restart,
|
||||||
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
||||||
"modelState": handle_model_state,
|
"modelState": handle_model_state,
|
||||||
|
"birdseyeLayout": handle_birdseye_layout,
|
||||||
"onConnect": handle_on_connect,
|
"onConnect": handle_on_connect,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,11 +283,12 @@ class Dispatcher:
|
|||||||
logger.error(
|
logger.error(
|
||||||
f"Received invalid {topic.split('/')[-1]} command: {topic}"
|
f"Received invalid {topic.split('/')[-1]} command: {topic}"
|
||||||
)
|
)
|
||||||
return
|
return None
|
||||||
elif topic in topic_handlers:
|
elif topic in topic_handlers:
|
||||||
return topic_handlers[topic]()
|
return topic_handlers[topic]()
|
||||||
else:
|
else:
|
||||||
self.publish(topic, payload, retain=False)
|
self.publish(topic, payload, retain=False)
|
||||||
|
return None
|
||||||
|
|
||||||
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
||||||
"""Handle publishing to communicators."""
|
"""Handle publishing to communicators."""
|
||||||
@ -273,8 +314,11 @@ class Dispatcher:
|
|||||||
f"Turning on motion for {camera_name} due to detection being enabled."
|
f"Turning on motion for {camera_name} due to detection being enabled."
|
||||||
)
|
)
|
||||||
motion_settings.enabled = True
|
motion_settings.enabled = True
|
||||||
self.config_updater.publish(
|
self.config_updater.publish_update(
|
||||||
f"config/motion/{camera_name}", motion_settings
|
CameraConfigUpdateTopic(
|
||||||
|
CameraConfigUpdateEnum.motion, camera_name
|
||||||
|
),
|
||||||
|
motion_settings,
|
||||||
)
|
)
|
||||||
self.publish(f"{camera_name}/motion/state", payload, retain=True)
|
self.publish(f"{camera_name}/motion/state", payload, retain=True)
|
||||||
elif payload == "OFF":
|
elif payload == "OFF":
|
||||||
@ -282,7 +326,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off detection for {camera_name}")
|
logger.info(f"Turning off detection for {camera_name}")
|
||||||
detect_settings.enabled = False
|
detect_settings.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/detect/{camera_name}", detect_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.detect, camera_name),
|
||||||
|
detect_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/detect/state", payload, retain=True)
|
self.publish(f"{camera_name}/detect/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_enabled_command(self, camera_name: str, payload: str) -> None:
|
def _on_enabled_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -303,7 +350,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off camera {camera_name}")
|
logger.info(f"Turning off camera {camera_name}")
|
||||||
camera_settings.enabled = False
|
camera_settings.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/enabled/{camera_name}", camera_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.enabled, camera_name),
|
||||||
|
camera_settings.enabled,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/enabled/state", payload, retain=True)
|
self.publish(f"{camera_name}/enabled/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_motion_command(self, camera_name: str, payload: str) -> None:
|
def _on_motion_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -326,7 +376,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off motion for {camera_name}")
|
logger.info(f"Turning off motion for {camera_name}")
|
||||||
motion_settings.enabled = False
|
motion_settings.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
|
||||||
|
motion_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/motion/state", payload, retain=True)
|
self.publish(f"{camera_name}/motion/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_motion_improve_contrast_command(
|
def _on_motion_improve_contrast_command(
|
||||||
@ -338,13 +391,16 @@ class Dispatcher:
|
|||||||
if payload == "ON":
|
if payload == "ON":
|
||||||
if not motion_settings.improve_contrast:
|
if not motion_settings.improve_contrast:
|
||||||
logger.info(f"Turning on improve contrast for {camera_name}")
|
logger.info(f"Turning on improve contrast for {camera_name}")
|
||||||
motion_settings.improve_contrast = True # type: ignore[union-attr]
|
motion_settings.improve_contrast = True
|
||||||
elif payload == "OFF":
|
elif payload == "OFF":
|
||||||
if motion_settings.improve_contrast:
|
if motion_settings.improve_contrast:
|
||||||
logger.info(f"Turning off improve contrast for {camera_name}")
|
logger.info(f"Turning off improve contrast for {camera_name}")
|
||||||
motion_settings.improve_contrast = False # type: ignore[union-attr]
|
motion_settings.improve_contrast = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
|
||||||
|
motion_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
|
self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None:
|
def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -383,8 +439,11 @@ class Dispatcher:
|
|||||||
|
|
||||||
motion_settings = self.config.cameras[camera_name].motion
|
motion_settings = self.config.cameras[camera_name].motion
|
||||||
logger.info(f"Setting motion contour area for {camera_name}: {payload}")
|
logger.info(f"Setting motion contour area for {camera_name}: {payload}")
|
||||||
motion_settings.contour_area = payload # type: ignore[union-attr]
|
motion_settings.contour_area = payload
|
||||||
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
|
||||||
|
motion_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True)
|
self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None:
|
def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None:
|
||||||
@ -397,8 +456,11 @@ class Dispatcher:
|
|||||||
|
|
||||||
motion_settings = self.config.cameras[camera_name].motion
|
motion_settings = self.config.cameras[camera_name].motion
|
||||||
logger.info(f"Setting motion threshold for {camera_name}: {payload}")
|
logger.info(f"Setting motion threshold for {camera_name}: {payload}")
|
||||||
motion_settings.threshold = payload # type: ignore[union-attr]
|
motion_settings.threshold = payload
|
||||||
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.motion, camera_name),
|
||||||
|
motion_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
|
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_global_notification_command(self, payload: str) -> None:
|
def _on_global_notification_command(self, payload: str) -> None:
|
||||||
@ -409,9 +471,9 @@ class Dispatcher:
|
|||||||
|
|
||||||
notification_settings = self.config.notifications
|
notification_settings = self.config.notifications
|
||||||
logger.info(f"Setting all notifications: {payload}")
|
logger.info(f"Setting all notifications: {payload}")
|
||||||
notification_settings.enabled = payload == "ON" # type: ignore[union-attr]
|
notification_settings.enabled = payload == "ON"
|
||||||
self.config_updater.publish(
|
self.config_updater.publisher.publish(
|
||||||
"config/notifications", {"_global_notifications": notification_settings}
|
"config/notifications", notification_settings
|
||||||
)
|
)
|
||||||
self.publish("notifications/state", payload, retain=True)
|
self.publish("notifications/state", payload, retain=True)
|
||||||
|
|
||||||
@ -434,9 +496,43 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off audio detection for {camera_name}")
|
logger.info(f"Turning off audio detection for {camera_name}")
|
||||||
audio_settings.enabled = False
|
audio_settings.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/audio/{camera_name}", audio_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.audio, camera_name),
|
||||||
|
audio_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/audio/state", payload, retain=True)
|
self.publish(f"{camera_name}/audio/state", payload, retain=True)
|
||||||
|
|
||||||
|
def _on_audio_transcription_command(self, camera_name: str, payload: str) -> None:
|
||||||
|
"""Callback for live audio transcription topic."""
|
||||||
|
audio_transcription_settings = self.config.cameras[
|
||||||
|
camera_name
|
||||||
|
].audio_transcription
|
||||||
|
|
||||||
|
if payload == "ON":
|
||||||
|
if not self.config.cameras[
|
||||||
|
camera_name
|
||||||
|
].audio_transcription.enabled_in_config:
|
||||||
|
logger.error(
|
||||||
|
"Audio transcription must be enabled in the config to be turned on via MQTT."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not audio_transcription_settings.live_enabled:
|
||||||
|
logger.info(f"Turning on live audio transcription for {camera_name}")
|
||||||
|
audio_transcription_settings.live_enabled = True
|
||||||
|
elif payload == "OFF":
|
||||||
|
if audio_transcription_settings.live_enabled:
|
||||||
|
logger.info(f"Turning off live audio transcription for {camera_name}")
|
||||||
|
audio_transcription_settings.live_enabled = False
|
||||||
|
|
||||||
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(
|
||||||
|
CameraConfigUpdateEnum.audio_transcription, camera_name
|
||||||
|
),
|
||||||
|
audio_transcription_settings,
|
||||||
|
)
|
||||||
|
self.publish(f"{camera_name}/audio_transcription/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_recordings_command(self, camera_name: str, payload: str) -> None:
|
def _on_recordings_command(self, camera_name: str, payload: str) -> None:
|
||||||
"""Callback for recordings topic."""
|
"""Callback for recordings topic."""
|
||||||
record_settings = self.config.cameras[camera_name].record
|
record_settings = self.config.cameras[camera_name].record
|
||||||
@ -456,7 +552,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off recordings for {camera_name}")
|
logger.info(f"Turning off recordings for {camera_name}")
|
||||||
record_settings.enabled = False
|
record_settings.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/record/{camera_name}", record_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.record, camera_name),
|
||||||
|
record_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/recordings/state", payload, retain=True)
|
self.publish(f"{camera_name}/recordings/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_snapshots_command(self, camera_name: str, payload: str) -> None:
|
def _on_snapshots_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -472,6 +571,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off snapshots for {camera_name}")
|
logger.info(f"Turning off snapshots for {camera_name}")
|
||||||
snapshots_settings.enabled = False
|
snapshots_settings.enabled = False
|
||||||
|
|
||||||
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.snapshots, camera_name),
|
||||||
|
snapshots_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/snapshots/state", payload, retain=True)
|
self.publish(f"{camera_name}/snapshots/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_ptz_command(self, camera_name: str, payload: str) -> None:
|
def _on_ptz_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -506,7 +609,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off birdseye for {camera_name}")
|
logger.info(f"Turning off birdseye for {camera_name}")
|
||||||
birdseye_settings.enabled = False
|
birdseye_settings.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.birdseye, camera_name),
|
||||||
|
birdseye_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/birdseye/state", payload, retain=True)
|
self.publish(f"{camera_name}/birdseye/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None:
|
def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -527,7 +633,10 @@ class Dispatcher:
|
|||||||
f"Setting birdseye mode for {camera_name} to {birdseye_settings.mode}"
|
f"Setting birdseye mode for {camera_name} to {birdseye_settings.mode}"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.birdseye, camera_name),
|
||||||
|
birdseye_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True)
|
self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_camera_notification_command(self, camera_name: str, payload: str) -> None:
|
def _on_camera_notification_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -559,8 +668,9 @@ class Dispatcher:
|
|||||||
):
|
):
|
||||||
self.web_push_client.suspended_cameras[camera_name] = 0
|
self.web_push_client.suspended_cameras[camera_name] = 0
|
||||||
|
|
||||||
self.config_updater.publish(
|
self.config_updater.publish_update(
|
||||||
"config/notifications", {camera_name: notification_settings}
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.notifications, camera_name),
|
||||||
|
notification_settings,
|
||||||
)
|
)
|
||||||
self.publish(f"{camera_name}/notifications/state", payload, retain=True)
|
self.publish(f"{camera_name}/notifications/state", payload, retain=True)
|
||||||
self.publish(f"{camera_name}/notifications/suspended", "0", retain=True)
|
self.publish(f"{camera_name}/notifications/suspended", "0", retain=True)
|
||||||
@ -617,7 +727,10 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off alerts for {camera_name}")
|
logger.info(f"Turning off alerts for {camera_name}")
|
||||||
review_settings.alerts.enabled = False
|
review_settings.alerts.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/review/{camera_name}", review_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.review, camera_name),
|
||||||
|
review_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/review_alerts/state", payload, retain=True)
|
self.publish(f"{camera_name}/review_alerts/state", payload, retain=True)
|
||||||
|
|
||||||
def _on_detections_command(self, camera_name: str, payload: str) -> None:
|
def _on_detections_command(self, camera_name: str, payload: str) -> None:
|
||||||
@ -639,5 +752,58 @@ class Dispatcher:
|
|||||||
logger.info(f"Turning off detections for {camera_name}")
|
logger.info(f"Turning off detections for {camera_name}")
|
||||||
review_settings.detections.enabled = False
|
review_settings.detections.enabled = False
|
||||||
|
|
||||||
self.config_updater.publish(f"config/review/{camera_name}", review_settings)
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.review, camera_name),
|
||||||
|
review_settings,
|
||||||
|
)
|
||||||
self.publish(f"{camera_name}/review_detections/state", payload, retain=True)
|
self.publish(f"{camera_name}/review_detections/state", payload, retain=True)
|
||||||
|
|
||||||
|
def _on_object_description_command(self, camera_name: str, payload: str) -> None:
|
||||||
|
"""Callback for object description topic."""
|
||||||
|
genai_settings = self.config.cameras[camera_name].objects.genai
|
||||||
|
|
||||||
|
if payload == "ON":
|
||||||
|
if not self.config.cameras[camera_name].objects.genai.enabled_in_config:
|
||||||
|
logger.error(
|
||||||
|
"GenAI must be enabled in the config to be turned on via MQTT."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not genai_settings.enabled:
|
||||||
|
logger.info(f"Turning on object descriptions for {camera_name}")
|
||||||
|
genai_settings.enabled = True
|
||||||
|
elif payload == "OFF":
|
||||||
|
if genai_settings.enabled:
|
||||||
|
logger.info(f"Turning off object descriptions for {camera_name}")
|
||||||
|
genai_settings.enabled = False
|
||||||
|
|
||||||
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.object_genai, camera_name),
|
||||||
|
genai_settings,
|
||||||
|
)
|
||||||
|
self.publish(f"{camera_name}/object_descriptions/state", payload, retain=True)
|
||||||
|
|
||||||
|
def _on_review_description_command(self, camera_name: str, payload: str) -> None:
|
||||||
|
"""Callback for review description topic."""
|
||||||
|
genai_settings = self.config.cameras[camera_name].review.genai
|
||||||
|
|
||||||
|
if payload == "ON":
|
||||||
|
if not self.config.cameras[camera_name].review.genai.enabled_in_config:
|
||||||
|
logger.error(
|
||||||
|
"GenAI Alerts or Detections must be enabled in the config to be turned on via MQTT."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not genai_settings.enabled:
|
||||||
|
logger.info(f"Turning on review descriptions for {camera_name}")
|
||||||
|
genai_settings.enabled = True
|
||||||
|
elif payload == "OFF":
|
||||||
|
if genai_settings.enabled:
|
||||||
|
logger.info(f"Turning off review descriptions for {camera_name}")
|
||||||
|
genai_settings.enabled = False
|
||||||
|
|
||||||
|
self.config_updater.publish_update(
|
||||||
|
CameraConfigUpdateTopic(CameraConfigUpdateEnum.review_genai, camera_name),
|
||||||
|
genai_settings,
|
||||||
|
)
|
||||||
|
self.publish(f"{camera_name}/review_descriptions/state", payload, retain=True)
|
||||||
|
|||||||
@ -1,23 +1,36 @@
|
|||||||
"""Facilitates communication between processes."""
|
"""Facilitates communication between processes."""
|
||||||
|
|
||||||
|
import logging
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Callable
|
from typing import Any, Callable
|
||||||
|
|
||||||
import zmq
|
import zmq
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
|
SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingsRequestEnum(Enum):
|
class EmbeddingsRequestEnum(Enum):
|
||||||
|
# audio
|
||||||
|
transcribe_audio = "transcribe_audio"
|
||||||
|
# custom classification
|
||||||
|
reload_classification_model = "reload_classification_model"
|
||||||
|
# face
|
||||||
clear_face_classifier = "clear_face_classifier"
|
clear_face_classifier = "clear_face_classifier"
|
||||||
embed_description = "embed_description"
|
|
||||||
embed_thumbnail = "embed_thumbnail"
|
|
||||||
generate_search = "generate_search"
|
|
||||||
recognize_face = "recognize_face"
|
recognize_face = "recognize_face"
|
||||||
register_face = "register_face"
|
register_face = "register_face"
|
||||||
reprocess_face = "reprocess_face"
|
reprocess_face = "reprocess_face"
|
||||||
reprocess_plate = "reprocess_plate"
|
# semantic search
|
||||||
|
embed_description = "embed_description"
|
||||||
|
embed_thumbnail = "embed_thumbnail"
|
||||||
|
generate_search = "generate_search"
|
||||||
reindex = "reindex"
|
reindex = "reindex"
|
||||||
|
# LPR
|
||||||
|
reprocess_plate = "reprocess_plate"
|
||||||
|
# Review Descriptions
|
||||||
|
summarize_review = "summarize_review"
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingsResponder:
|
class EmbeddingsResponder:
|
||||||
@ -34,9 +47,16 @@ class EmbeddingsResponder:
|
|||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(topic, value) = self.socket.recv_json(flags=zmq.NOBLOCK)
|
raw = self.socket.recv_json(flags=zmq.NOBLOCK)
|
||||||
|
|
||||||
response = process(topic, value)
|
if isinstance(raw, list):
|
||||||
|
(topic, value) = raw
|
||||||
|
response = process(topic, value)
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f"Received unexpected data type in ZMQ recv_json: {type(raw)}"
|
||||||
|
)
|
||||||
|
response = None
|
||||||
|
|
||||||
if response is not None:
|
if response is not None:
|
||||||
self.socket.send_json(response)
|
self.socket.send_json(response)
|
||||||
@ -58,7 +78,7 @@ class EmbeddingsRequestor:
|
|||||||
self.socket = self.context.socket(zmq.REQ)
|
self.socket = self.context.socket(zmq.REQ)
|
||||||
self.socket.connect(SOCKET_REP_REQ)
|
self.socket.connect(SOCKET_REP_REQ)
|
||||||
|
|
||||||
def send_data(self, topic: str, data: Any) -> str:
|
def send_data(self, topic: str, data: Any) -> Any:
|
||||||
"""Sends data and then waits for reply."""
|
"""Sends data and then waits for reply."""
|
||||||
try:
|
try:
|
||||||
self.socket.send_json((topic, data))
|
self.socket.send_json((topic, data))
|
||||||
|
|||||||
@ -15,7 +15,7 @@ class EventMetadataTypeEnum(str, Enum):
|
|||||||
manual_event_end = "manual_event_end"
|
manual_event_end = "manual_event_end"
|
||||||
regenerate_description = "regenerate_description"
|
regenerate_description = "regenerate_description"
|
||||||
sub_label = "sub_label"
|
sub_label = "sub_label"
|
||||||
recognized_license_plate = "recognized_license_plate"
|
attribute = "attribute"
|
||||||
lpr_event_create = "lpr_event_create"
|
lpr_event_create = "lpr_event_create"
|
||||||
save_lpr_snapshot = "save_lpr_snapshot"
|
save_lpr_snapshot = "save_lpr_snapshot"
|
||||||
|
|
||||||
@ -28,8 +28,8 @@ class EventMetadataPublisher(Publisher):
|
|||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
def publish(self, topic: EventMetadataTypeEnum, payload: Any) -> None:
|
def publish(self, payload: Any, sub_topic: str = "") -> None:
|
||||||
super().publish(payload, topic.value)
|
super().publish(payload, sub_topic)
|
||||||
|
|
||||||
|
|
||||||
class EventMetadataSubscriber(Subscriber):
|
class EventMetadataSubscriber(Subscriber):
|
||||||
@ -40,9 +40,10 @@ class EventMetadataSubscriber(Subscriber):
|
|||||||
def __init__(self, topic: EventMetadataTypeEnum) -> None:
|
def __init__(self, topic: EventMetadataTypeEnum) -> None:
|
||||||
super().__init__(topic.value)
|
super().__init__(topic.value)
|
||||||
|
|
||||||
def _return_object(self, topic: str, payload: tuple) -> tuple:
|
def _return_object(
|
||||||
|
self, topic: str, payload: tuple | None
|
||||||
|
) -> tuple[str, Any] | tuple[None, None]:
|
||||||
if payload is None:
|
if payload is None:
|
||||||
return (None, None)
|
return (None, None)
|
||||||
|
|
||||||
topic = EventMetadataTypeEnum[topic[len(self.topic_base) :]]
|
|
||||||
return (topic, payload)
|
return (topic, payload)
|
||||||
|
|||||||
@ -7,7 +7,9 @@ from frigate.events.types import EventStateEnum, EventTypeEnum
|
|||||||
from .zmq_proxy import Publisher, Subscriber
|
from .zmq_proxy import Publisher, Subscriber
|
||||||
|
|
||||||
|
|
||||||
class EventUpdatePublisher(Publisher):
|
class EventUpdatePublisher(
|
||||||
|
Publisher[tuple[EventTypeEnum, EventStateEnum, str | None, str, dict[str, Any]]]
|
||||||
|
):
|
||||||
"""Publishes events (objects, audio, manual)."""
|
"""Publishes events (objects, audio, manual)."""
|
||||||
|
|
||||||
topic_base = "event/"
|
topic_base = "event/"
|
||||||
@ -16,9 +18,11 @@ class EventUpdatePublisher(Publisher):
|
|||||||
super().__init__("update")
|
super().__init__("update")
|
||||||
|
|
||||||
def publish(
|
def publish(
|
||||||
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, Any]]
|
self,
|
||||||
|
payload: tuple[EventTypeEnum, EventStateEnum, str | None, str, dict[str, Any]],
|
||||||
|
sub_topic: str = "",
|
||||||
) -> None:
|
) -> None:
|
||||||
super().publish(payload)
|
super().publish(payload, sub_topic)
|
||||||
|
|
||||||
|
|
||||||
class EventUpdateSubscriber(Subscriber):
|
class EventUpdateSubscriber(Subscriber):
|
||||||
@ -30,7 +34,9 @@ class EventUpdateSubscriber(Subscriber):
|
|||||||
super().__init__("update")
|
super().__init__("update")
|
||||||
|
|
||||||
|
|
||||||
class EventEndPublisher(Publisher):
|
class EventEndPublisher(
|
||||||
|
Publisher[tuple[EventTypeEnum, EventStateEnum, str, dict[str, Any]]]
|
||||||
|
):
|
||||||
"""Publishes events that have ended."""
|
"""Publishes events that have ended."""
|
||||||
|
|
||||||
topic_base = "event/"
|
topic_base = "event/"
|
||||||
@ -39,9 +45,11 @@ class EventEndPublisher(Publisher):
|
|||||||
super().__init__("finalized")
|
super().__init__("finalized")
|
||||||
|
|
||||||
def publish(
|
def publish(
|
||||||
self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, Any]]
|
self,
|
||||||
|
payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, Any]],
|
||||||
|
sub_topic: str = "",
|
||||||
) -> None:
|
) -> None:
|
||||||
super().publish(payload)
|
super().publish(payload, sub_topic)
|
||||||
|
|
||||||
|
|
||||||
class EventEndSubscriber(Subscriber):
|
class EventEndSubscriber(Subscriber):
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
"""Facilitates communication between processes."""
|
"""Facilitates communication between processes."""
|
||||||
|
|
||||||
|
import logging
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import threading
|
import threading
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
@ -9,6 +10,8 @@ import zmq
|
|||||||
|
|
||||||
from frigate.comms.base_communicator import Communicator
|
from frigate.comms.base_communicator import Communicator
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
SOCKET_REP_REQ = "ipc:///tmp/cache/comms"
|
SOCKET_REP_REQ = "ipc:///tmp/cache/comms"
|
||||||
|
|
||||||
|
|
||||||
@ -19,7 +22,7 @@ class InterProcessCommunicator(Communicator):
|
|||||||
self.socket.bind(SOCKET_REP_REQ)
|
self.socket.bind(SOCKET_REP_REQ)
|
||||||
self.stop_event: MpEvent = mp.Event()
|
self.stop_event: MpEvent = mp.Event()
|
||||||
|
|
||||||
def publish(self, topic: str, payload: str, retain: bool) -> None:
|
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
||||||
"""There is no communication back to the processes."""
|
"""There is no communication back to the processes."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -37,9 +40,16 @@ class InterProcessCommunicator(Communicator):
|
|||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(topic, value) = self.socket.recv_json(flags=zmq.NOBLOCK)
|
raw = self.socket.recv_json(flags=zmq.NOBLOCK)
|
||||||
|
|
||||||
response = self._dispatcher(topic, value)
|
if isinstance(raw, list):
|
||||||
|
(topic, value) = raw
|
||||||
|
response = self._dispatcher(topic, value)
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f"Received unexpected data type in ZMQ recv_json: {type(raw)}"
|
||||||
|
)
|
||||||
|
response = None
|
||||||
|
|
||||||
if response is not None:
|
if response is not None:
|
||||||
self.socket.send_json(response)
|
self.socket.send_json(response)
|
||||||
|
|||||||
@ -11,7 +11,7 @@ from frigate.config import FrigateConfig
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class MqttClient(Communicator): # type: ignore[misc]
|
class MqttClient(Communicator):
|
||||||
"""Frigate wrapper for mqtt client."""
|
"""Frigate wrapper for mqtt client."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig) -> None:
|
def __init__(self, config: FrigateConfig) -> None:
|
||||||
@ -75,7 +75,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
)
|
)
|
||||||
self.publish(
|
self.publish(
|
||||||
f"{camera_name}/improve_contrast/state",
|
f"{camera_name}/improve_contrast/state",
|
||||||
"ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr]
|
"ON" if camera.motion.improve_contrast else "OFF",
|
||||||
retain=True,
|
retain=True,
|
||||||
)
|
)
|
||||||
self.publish(
|
self.publish(
|
||||||
@ -85,12 +85,12 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
)
|
)
|
||||||
self.publish(
|
self.publish(
|
||||||
f"{camera_name}/motion_threshold/state",
|
f"{camera_name}/motion_threshold/state",
|
||||||
camera.motion.threshold, # type: ignore[union-attr]
|
camera.motion.threshold,
|
||||||
retain=True,
|
retain=True,
|
||||||
)
|
)
|
||||||
self.publish(
|
self.publish(
|
||||||
f"{camera_name}/motion_contour_area/state",
|
f"{camera_name}/motion_contour_area/state",
|
||||||
camera.motion.contour_area, # type: ignore[union-attr]
|
camera.motion.contour_area,
|
||||||
retain=True,
|
retain=True,
|
||||||
)
|
)
|
||||||
self.publish(
|
self.publish(
|
||||||
@ -122,6 +122,16 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
"ON" if camera.review.detections.enabled_in_config else "OFF",
|
"ON" if camera.review.detections.enabled_in_config else "OFF",
|
||||||
retain=True,
|
retain=True,
|
||||||
)
|
)
|
||||||
|
self.publish(
|
||||||
|
f"{camera_name}/object_descriptions/state",
|
||||||
|
"ON" if camera.objects.genai.enabled_in_config else "OFF",
|
||||||
|
retain=True,
|
||||||
|
)
|
||||||
|
self.publish(
|
||||||
|
f"{camera_name}/review_descriptions/state",
|
||||||
|
"ON" if camera.review.genai.enabled_in_config else "OFF",
|
||||||
|
retain=True,
|
||||||
|
)
|
||||||
|
|
||||||
if self.config.notifications.enabled_in_config:
|
if self.config.notifications.enabled_in_config:
|
||||||
self.publish(
|
self.publish(
|
||||||
@ -145,7 +155,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
client: mqtt.Client,
|
client: mqtt.Client,
|
||||||
userdata: Any,
|
userdata: Any,
|
||||||
flags: Any,
|
flags: Any,
|
||||||
reason_code: mqtt.ReasonCode,
|
reason_code: mqtt.ReasonCode, # type: ignore[name-defined]
|
||||||
properties: Any,
|
properties: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Mqtt connection callback."""
|
"""Mqtt connection callback."""
|
||||||
@ -177,7 +187,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
client: mqtt.Client,
|
client: mqtt.Client,
|
||||||
userdata: Any,
|
userdata: Any,
|
||||||
flags: Any,
|
flags: Any,
|
||||||
reason_code: mqtt.ReasonCode,
|
reason_code: mqtt.ReasonCode, # type: ignore[name-defined]
|
||||||
properties: Any,
|
properties: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Mqtt disconnection callback."""
|
"""Mqtt disconnection callback."""
|
||||||
@ -215,6 +225,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
"birdseye_mode",
|
"birdseye_mode",
|
||||||
"review_alerts",
|
"review_alerts",
|
||||||
"review_detections",
|
"review_detections",
|
||||||
|
"genai",
|
||||||
]
|
]
|
||||||
|
|
||||||
for name in self.config.cameras.keys():
|
for name in self.config.cameras.keys():
|
||||||
|
|||||||
92
frigate/comms/object_detector_signaler.py
Normal file
92
frigate/comms/object_detector_signaler.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
"""Facilitates communication between processes for object detection signals."""
|
||||||
|
|
||||||
|
import threading
|
||||||
|
|
||||||
|
import zmq
|
||||||
|
|
||||||
|
SOCKET_PUB = "ipc:///tmp/cache/detector_pub"
|
||||||
|
SOCKET_SUB = "ipc:///tmp/cache/detector_sub"
|
||||||
|
|
||||||
|
|
||||||
|
class ZmqProxyRunner(threading.Thread):
|
||||||
|
def __init__(self, context: zmq.Context[zmq.Socket]) -> None:
|
||||||
|
super().__init__(name="detector_proxy")
|
||||||
|
self.context = context
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
"""Run the proxy."""
|
||||||
|
incoming = self.context.socket(zmq.XSUB)
|
||||||
|
incoming.bind(SOCKET_PUB)
|
||||||
|
outgoing = self.context.socket(zmq.XPUB)
|
||||||
|
outgoing.bind(SOCKET_SUB)
|
||||||
|
|
||||||
|
# Blocking: This will unblock (via exception) when we destroy the context
|
||||||
|
# The incoming and outgoing sockets will be closed automatically
|
||||||
|
# when the context is destroyed as well.
|
||||||
|
try:
|
||||||
|
zmq.proxy(incoming, outgoing)
|
||||||
|
except zmq.ZMQError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DetectorProxy:
|
||||||
|
"""Proxies object detection signals."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.context = zmq.Context()
|
||||||
|
self.runner = ZmqProxyRunner(self.context)
|
||||||
|
self.runner.start()
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
# destroying the context will tell the proxy to stop
|
||||||
|
self.context.destroy()
|
||||||
|
self.runner.join()
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectDetectorPublisher:
|
||||||
|
"""Publishes signal for object detection to different processes."""
|
||||||
|
|
||||||
|
topic_base = "object_detector/"
|
||||||
|
|
||||||
|
def __init__(self, topic: str = "") -> None:
|
||||||
|
self.topic = f"{self.topic_base}{topic}"
|
||||||
|
self.context = zmq.Context()
|
||||||
|
self.socket = self.context.socket(zmq.PUB)
|
||||||
|
self.socket.connect(SOCKET_PUB)
|
||||||
|
|
||||||
|
def publish(self, sub_topic: str = "") -> None:
|
||||||
|
"""Publish message."""
|
||||||
|
self.socket.send_string(f"{self.topic}{sub_topic}/")
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.socket.close()
|
||||||
|
self.context.destroy()
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectDetectorSubscriber:
|
||||||
|
"""Simplifies receiving a signal for object detection."""
|
||||||
|
|
||||||
|
topic_base = "object_detector/"
|
||||||
|
|
||||||
|
def __init__(self, topic: str = "") -> None:
|
||||||
|
self.topic = f"{self.topic_base}{topic}/"
|
||||||
|
self.context = zmq.Context()
|
||||||
|
self.socket = self.context.socket(zmq.SUB)
|
||||||
|
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
|
||||||
|
self.socket.connect(SOCKET_SUB)
|
||||||
|
|
||||||
|
def check_for_update(self, timeout: float = 5) -> str | None:
|
||||||
|
"""Returns message or None if no update."""
|
||||||
|
try:
|
||||||
|
has_update, _, _ = zmq.select([self.socket], [], [], timeout)
|
||||||
|
|
||||||
|
if has_update:
|
||||||
|
return self.socket.recv_string(flags=zmq.NOBLOCK)
|
||||||
|
except zmq.ZMQError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.socket.close()
|
||||||
|
self.context.destroy()
|
||||||
@ -13,17 +13,16 @@ class RecordingsDataTypeEnum(str, Enum):
|
|||||||
recordings_available_through = "recordings_available_through"
|
recordings_available_through = "recordings_available_through"
|
||||||
|
|
||||||
|
|
||||||
class RecordingsDataPublisher(Publisher):
|
class RecordingsDataPublisher(Publisher[tuple[str, float]]):
|
||||||
"""Publishes latest recording data."""
|
"""Publishes latest recording data."""
|
||||||
|
|
||||||
topic_base = "recordings/"
|
topic_base = "recordings/"
|
||||||
|
|
||||||
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
||||||
topic = topic.value
|
super().__init__(topic.value)
|
||||||
super().__init__(topic)
|
|
||||||
|
|
||||||
def publish(self, payload: tuple[str, float]) -> None:
|
def publish(self, payload: tuple[str, float], sub_topic: str = "") -> None:
|
||||||
super().publish(payload)
|
super().publish(payload, sub_topic)
|
||||||
|
|
||||||
|
|
||||||
class RecordingsDataSubscriber(Subscriber):
|
class RecordingsDataSubscriber(Subscriber):
|
||||||
@ -32,5 +31,4 @@ class RecordingsDataSubscriber(Subscriber):
|
|||||||
topic_base = "recordings/"
|
topic_base = "recordings/"
|
||||||
|
|
||||||
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
||||||
topic = topic.value
|
super().__init__(topic.value)
|
||||||
super().__init__(topic)
|
|
||||||
|
|||||||
30
frigate/comms/review_updater.py
Normal file
30
frigate/comms/review_updater.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
"""Facilitates communication between processes."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from .zmq_proxy import Publisher, Subscriber
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewDataPublisher(
|
||||||
|
Publisher
|
||||||
|
): # update when typing improvement is added Publisher[tuple[str, float]]
|
||||||
|
"""Publishes review item data."""
|
||||||
|
|
||||||
|
topic_base = "review/"
|
||||||
|
|
||||||
|
def __init__(self, topic: str) -> None:
|
||||||
|
super().__init__(topic)
|
||||||
|
|
||||||
|
def publish(self, payload: tuple[str, float], sub_topic: str = "") -> None:
|
||||||
|
super().publish(payload, sub_topic)
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewDataSubscriber(Subscriber):
|
||||||
|
"""Receives review item data."""
|
||||||
|
|
||||||
|
topic_base = "review/"
|
||||||
|
|
||||||
|
def __init__(self, topic: str) -> None:
|
||||||
|
super().__init__(topic)
|
||||||
@ -17,6 +17,10 @@ from titlecase import titlecase
|
|||||||
from frigate.comms.base_communicator import Communicator
|
from frigate.comms.base_communicator import Communicator
|
||||||
from frigate.comms.config_updater import ConfigSubscriber
|
from frigate.comms.config_updater import ConfigSubscriber
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.config.camera.updater import (
|
||||||
|
CameraConfigUpdateEnum,
|
||||||
|
CameraConfigUpdateSubscriber,
|
||||||
|
)
|
||||||
from frigate.const import CONFIG_DIR
|
from frigate.const import CONFIG_DIR
|
||||||
from frigate.models import User
|
from frigate.models import User
|
||||||
|
|
||||||
@ -35,7 +39,7 @@ class PushNotification:
|
|||||||
ttl: int = 0
|
ttl: int = 0
|
||||||
|
|
||||||
|
|
||||||
class WebPushClient(Communicator): # type: ignore[misc]
|
class WebPushClient(Communicator):
|
||||||
"""Frigate wrapper for webpush client."""
|
"""Frigate wrapper for webpush client."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None:
|
def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None:
|
||||||
@ -46,10 +50,12 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
self.web_pushers: dict[str, list[WebPusher]] = {}
|
self.web_pushers: dict[str, list[WebPusher]] = {}
|
||||||
self.expired_subs: dict[str, list[str]] = {}
|
self.expired_subs: dict[str, list[str]] = {}
|
||||||
self.suspended_cameras: dict[str, int] = {
|
self.suspended_cameras: dict[str, int] = {
|
||||||
c.name: 0 for c in self.config.cameras.values()
|
c.name: 0 # type: ignore[misc]
|
||||||
|
for c in self.config.cameras.values()
|
||||||
}
|
}
|
||||||
self.last_camera_notification_time: dict[str, float] = {
|
self.last_camera_notification_time: dict[str, float] = {
|
||||||
c.name: 0 for c in self.config.cameras.values()
|
c.name: 0 # type: ignore[misc]
|
||||||
|
for c in self.config.cameras.values()
|
||||||
}
|
}
|
||||||
self.last_notification_time: float = 0
|
self.last_notification_time: float = 0
|
||||||
self.notification_queue: queue.Queue[PushNotification] = queue.Queue()
|
self.notification_queue: queue.Queue[PushNotification] = queue.Queue()
|
||||||
@ -64,7 +70,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
# Pull keys from PEM or generate if they do not exist
|
# Pull keys from PEM or generate if they do not exist
|
||||||
self.vapid = Vapid01.from_file(os.path.join(CONFIG_DIR, "notifications.pem"))
|
self.vapid = Vapid01.from_file(os.path.join(CONFIG_DIR, "notifications.pem"))
|
||||||
|
|
||||||
users: list[User] = (
|
users: list[dict[str, Any]] = (
|
||||||
User.select(User.username, User.notification_tokens).dicts().iterator()
|
User.select(User.username, User.notification_tokens).dicts().iterator()
|
||||||
)
|
)
|
||||||
for user in users:
|
for user in users:
|
||||||
@ -73,7 +79,12 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
self.web_pushers[user["username"]].append(WebPusher(sub))
|
self.web_pushers[user["username"]].append(WebPusher(sub))
|
||||||
|
|
||||||
# notification config updater
|
# notification config updater
|
||||||
self.config_subscriber = ConfigSubscriber("config/notifications")
|
self.global_config_subscriber = ConfigSubscriber(
|
||||||
|
"config/notifications", exact=True
|
||||||
|
)
|
||||||
|
self.config_subscriber = CameraConfigUpdateSubscriber(
|
||||||
|
self.config, self.config.cameras, [CameraConfigUpdateEnum.notifications]
|
||||||
|
)
|
||||||
|
|
||||||
def subscribe(self, receiver: Callable) -> None:
|
def subscribe(self, receiver: Callable) -> None:
|
||||||
"""Wrapper for allowing dispatcher to subscribe."""
|
"""Wrapper for allowing dispatcher to subscribe."""
|
||||||
@ -154,15 +165,19 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
||||||
"""Wrapper for publishing when client is in valid state."""
|
"""Wrapper for publishing when client is in valid state."""
|
||||||
# check for updated notification config
|
# check for updated notification config
|
||||||
_, updated_notification_config = self.config_subscriber.check_for_update()
|
_, updated_notification_config = (
|
||||||
|
self.global_config_subscriber.check_for_update()
|
||||||
|
)
|
||||||
|
|
||||||
if updated_notification_config:
|
if updated_notification_config:
|
||||||
for key, value in updated_notification_config.items():
|
self.config.notifications = updated_notification_config
|
||||||
if key == "_global_notifications":
|
|
||||||
self.config.notifications = value
|
|
||||||
|
|
||||||
elif key in self.config.cameras:
|
updates = self.config_subscriber.check_for_updates()
|
||||||
self.config.cameras[key].notifications = value
|
|
||||||
|
if "add" in updates:
|
||||||
|
for camera in updates["add"]:
|
||||||
|
self.suspended_cameras[camera] = 0
|
||||||
|
self.last_camera_notification_time[camera] = 0
|
||||||
|
|
||||||
if topic == "reviews":
|
if topic == "reviews":
|
||||||
decoded = json.loads(payload)
|
decoded = json.loads(payload)
|
||||||
@ -173,6 +188,28 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
logger.debug(f"Notifications for {camera} are currently suspended.")
|
logger.debug(f"Notifications for {camera} are currently suspended.")
|
||||||
return
|
return
|
||||||
self.send_alert(decoded)
|
self.send_alert(decoded)
|
||||||
|
if topic == "triggers":
|
||||||
|
decoded = json.loads(payload)
|
||||||
|
|
||||||
|
camera = decoded["camera"]
|
||||||
|
name = decoded["name"]
|
||||||
|
|
||||||
|
# ensure notifications are enabled and the specific trigger has
|
||||||
|
# notification action enabled
|
||||||
|
if (
|
||||||
|
not self.config.cameras[camera].notifications.enabled
|
||||||
|
or name not in self.config.cameras[camera].semantic_search.triggers
|
||||||
|
or "notification"
|
||||||
|
not in self.config.cameras[camera]
|
||||||
|
.semantic_search.triggers[name]
|
||||||
|
.actions
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.is_camera_suspended(camera):
|
||||||
|
logger.debug(f"Notifications for {camera} are currently suspended.")
|
||||||
|
return
|
||||||
|
self.send_trigger(decoded)
|
||||||
elif topic == "notification_test":
|
elif topic == "notification_test":
|
||||||
if not self.config.notifications.enabled and not any(
|
if not self.config.notifications.enabled and not any(
|
||||||
cam.notifications.enabled for cam in self.config.cameras.values()
|
cam.notifications.enabled for cam in self.config.cameras.values()
|
||||||
@ -254,6 +291,23 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error processing notification: {str(e)}")
|
logger.error(f"Error processing notification: {str(e)}")
|
||||||
|
|
||||||
|
def _within_cooldown(self, camera: str) -> bool:
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
if now - self.last_notification_time < self.config.notifications.cooldown:
|
||||||
|
logger.debug(
|
||||||
|
f"Skipping notification for {camera} - in global cooldown period"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
if (
|
||||||
|
now - self.last_camera_notification_time[camera]
|
||||||
|
< self.config.cameras[camera].notifications.cooldown
|
||||||
|
):
|
||||||
|
logger.debug(
|
||||||
|
f"Skipping notification for {camera} - in camera-specific cooldown period"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def send_notification_test(self) -> None:
|
def send_notification_test(self) -> None:
|
||||||
if not self.config.notifications.email:
|
if not self.config.notifications.email:
|
||||||
return
|
return
|
||||||
@ -282,24 +336,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
camera: str = payload["after"]["camera"]
|
camera: str = payload["after"]["camera"]
|
||||||
current_time = datetime.datetime.now().timestamp()
|
current_time = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
# Check global cooldown period
|
if self._within_cooldown(camera):
|
||||||
if (
|
|
||||||
current_time - self.last_notification_time
|
|
||||||
< self.config.notifications.cooldown
|
|
||||||
):
|
|
||||||
logger.debug(
|
|
||||||
f"Skipping notification for {camera} - in global cooldown period"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Check camera-specific cooldown period
|
|
||||||
if (
|
|
||||||
current_time - self.last_camera_notification_time[camera]
|
|
||||||
< self.config.cameras[camera].notifications.cooldown
|
|
||||||
):
|
|
||||||
logger.debug(
|
|
||||||
f"Skipping notification for {camera} - in camera-specific cooldown period"
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
self.check_registrations()
|
self.check_registrations()
|
||||||
@ -332,12 +369,22 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
sorted_objects.update(payload["after"]["data"]["sub_labels"])
|
sorted_objects.update(payload["after"]["data"]["sub_labels"])
|
||||||
|
|
||||||
title = f"{titlecase(', '.join(sorted_objects).replace('_', ' '))}{' was' if state == 'end' else ''} detected in {titlecase(', '.join(payload['after']['data']['zones']).replace('_', ' '))}"
|
title = f"{titlecase(', '.join(sorted_objects).replace('_', ' '))}{' was' if state == 'end' else ''} detected in {titlecase(', '.join(payload['after']['data']['zones']).replace('_', ' '))}"
|
||||||
message = f"Detected on {titlecase(camera.replace('_', ' '))}"
|
|
||||||
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
|
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
|
||||||
|
ended = state == "end" or state == "genai"
|
||||||
|
|
||||||
|
if state == "genai" and payload["after"]["data"]["metadata"]:
|
||||||
|
message = payload["after"]["data"]["metadata"]["scene"]
|
||||||
|
else:
|
||||||
|
message = f"Detected on {titlecase(camera.replace('_', ' '))}"
|
||||||
|
|
||||||
|
if ended:
|
||||||
|
logger.debug(
|
||||||
|
f"Sending a notification with state {state} and message {message}"
|
||||||
|
)
|
||||||
|
|
||||||
# if event is ongoing open to live view otherwise open to recordings view
|
# if event is ongoing open to live view otherwise open to recordings view
|
||||||
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"
|
direct_url = f"/review?id={reviewId}" if ended else f"/#{camera}"
|
||||||
ttl = 3600 if state == "end" else 0
|
ttl = 3600 if ended else 0
|
||||||
|
|
||||||
logger.debug(f"Sending push notification for {camera}, review ID {reviewId}")
|
logger.debug(f"Sending push notification for {camera}, review ID {reviewId}")
|
||||||
|
|
||||||
@ -354,6 +401,48 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
|||||||
|
|
||||||
self.cleanup_registrations()
|
self.cleanup_registrations()
|
||||||
|
|
||||||
|
def send_trigger(self, payload: dict[str, Any]) -> None:
|
||||||
|
if not self.config.notifications.email:
|
||||||
|
return
|
||||||
|
|
||||||
|
camera: str = payload["camera"]
|
||||||
|
current_time = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
if self._within_cooldown(camera):
|
||||||
|
return
|
||||||
|
|
||||||
|
self.check_registrations()
|
||||||
|
|
||||||
|
self.last_camera_notification_time[camera] = current_time
|
||||||
|
self.last_notification_time = current_time
|
||||||
|
|
||||||
|
trigger_type = payload["type"]
|
||||||
|
event_id = payload["event_id"]
|
||||||
|
name = payload["name"]
|
||||||
|
score = payload["score"]
|
||||||
|
|
||||||
|
title = f"{name.replace('_', ' ')} triggered on {titlecase(camera.replace('_', ' '))}"
|
||||||
|
message = f"{titlecase(trigger_type)} trigger fired for {titlecase(camera.replace('_', ' '))} with score {score:.2f}"
|
||||||
|
image = f"clips/triggers/{camera}/{event_id}.webp"
|
||||||
|
|
||||||
|
direct_url = f"/explore?event_id={event_id}"
|
||||||
|
ttl = 0
|
||||||
|
|
||||||
|
logger.debug(f"Sending push notification for {camera}, trigger name {name}")
|
||||||
|
|
||||||
|
for user in self.web_pushers:
|
||||||
|
self.send_push_notification(
|
||||||
|
user=user,
|
||||||
|
payload=payload,
|
||||||
|
title=title,
|
||||||
|
message=message,
|
||||||
|
direct_url=direct_url,
|
||||||
|
image=image,
|
||||||
|
ttl=ttl,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup_registrations()
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
logger.info("Closing notification queue")
|
logger.info("Closing notification queue")
|
||||||
self.notification_thread.join()
|
self.notification_thread.join()
|
||||||
|
|||||||
@ -4,7 +4,7 @@ import errno
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import threading
|
import threading
|
||||||
from typing import Callable
|
from typing import Any, Callable
|
||||||
from wsgiref.simple_server import make_server
|
from wsgiref.simple_server import make_server
|
||||||
|
|
||||||
from ws4py.server.wsgirefserver import (
|
from ws4py.server.wsgirefserver import (
|
||||||
@ -21,8 +21,8 @@ from frigate.config import FrigateConfig
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class WebSocket(WebSocket_):
|
class WebSocket(WebSocket_): # type: ignore[misc]
|
||||||
def unhandled_error(self, error):
|
def unhandled_error(self, error: Any) -> None:
|
||||||
"""
|
"""
|
||||||
Handles the unfriendly socket closures on the server side
|
Handles the unfriendly socket closures on the server side
|
||||||
without showing a confusing error message
|
without showing a confusing error message
|
||||||
@ -33,12 +33,12 @@ class WebSocket(WebSocket_):
|
|||||||
logging.getLogger("ws4py").exception("Failed to receive data")
|
logging.getLogger("ws4py").exception("Failed to receive data")
|
||||||
|
|
||||||
|
|
||||||
class WebSocketClient(Communicator): # type: ignore[misc]
|
class WebSocketClient(Communicator):
|
||||||
"""Frigate wrapper for ws client."""
|
"""Frigate wrapper for ws client."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig) -> None:
|
def __init__(self, config: FrigateConfig) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.websocket_server = None
|
self.websocket_server: WSGIServer | None = None
|
||||||
|
|
||||||
def subscribe(self, receiver: Callable) -> None:
|
def subscribe(self, receiver: Callable) -> None:
|
||||||
self._dispatcher = receiver
|
self._dispatcher = receiver
|
||||||
@ -47,10 +47,10 @@ class WebSocketClient(Communicator): # type: ignore[misc]
|
|||||||
def start(self) -> None:
|
def start(self) -> None:
|
||||||
"""Start the websocket client."""
|
"""Start the websocket client."""
|
||||||
|
|
||||||
class _WebSocketHandler(WebSocket): # type: ignore[misc]
|
class _WebSocketHandler(WebSocket):
|
||||||
receiver = self._dispatcher
|
receiver = self._dispatcher
|
||||||
|
|
||||||
def received_message(self, message: WebSocket.received_message) -> None:
|
def received_message(self, message: WebSocket.received_message) -> None: # type: ignore[name-defined]
|
||||||
try:
|
try:
|
||||||
json_message = json.loads(message.data.decode("utf-8"))
|
json_message = json.loads(message.data.decode("utf-8"))
|
||||||
json_message = {
|
json_message = {
|
||||||
@ -86,7 +86,7 @@ class WebSocketClient(Communicator): # type: ignore[misc]
|
|||||||
)
|
)
|
||||||
self.websocket_thread.start()
|
self.websocket_thread.start()
|
||||||
|
|
||||||
def publish(self, topic: str, payload: str, _: bool) -> None:
|
def publish(self, topic: str, payload: Any, _: bool = False) -> None:
|
||||||
try:
|
try:
|
||||||
ws_message = json.dumps(
|
ws_message = json.dumps(
|
||||||
{
|
{
|
||||||
@ -109,9 +109,11 @@ class WebSocketClient(Communicator): # type: ignore[misc]
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
self.websocket_server.manager.close_all()
|
if self.websocket_server is not None:
|
||||||
self.websocket_server.manager.stop()
|
self.websocket_server.manager.close_all()
|
||||||
self.websocket_server.manager.join()
|
self.websocket_server.manager.stop()
|
||||||
self.websocket_server.shutdown()
|
self.websocket_server.manager.join()
|
||||||
|
self.websocket_server.shutdown()
|
||||||
|
|
||||||
self.websocket_thread.join()
|
self.websocket_thread.join()
|
||||||
logger.info("Exiting websocket client...")
|
logger.info("Exiting websocket client...")
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import threading
|
import threading
|
||||||
from typing import Any, Optional
|
from typing import Generic, TypeVar
|
||||||
|
|
||||||
import zmq
|
import zmq
|
||||||
|
|
||||||
@ -47,7 +47,10 @@ class ZmqProxy:
|
|||||||
self.runner.join()
|
self.runner.join()
|
||||||
|
|
||||||
|
|
||||||
class Publisher:
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
|
class Publisher(Generic[T]):
|
||||||
"""Publishes messages."""
|
"""Publishes messages."""
|
||||||
|
|
||||||
topic_base: str = ""
|
topic_base: str = ""
|
||||||
@ -58,7 +61,7 @@ class Publisher:
|
|||||||
self.socket = self.context.socket(zmq.PUB)
|
self.socket = self.context.socket(zmq.PUB)
|
||||||
self.socket.connect(SOCKET_PUB)
|
self.socket.connect(SOCKET_PUB)
|
||||||
|
|
||||||
def publish(self, payload: Any, sub_topic: str = "") -> None:
|
def publish(self, payload: T, sub_topic: str = "") -> None:
|
||||||
"""Publish message."""
|
"""Publish message."""
|
||||||
self.socket.send_string(f"{self.topic}{sub_topic} {json.dumps(payload)}")
|
self.socket.send_string(f"{self.topic}{sub_topic} {json.dumps(payload)}")
|
||||||
|
|
||||||
@ -67,7 +70,7 @@ class Publisher:
|
|||||||
self.context.destroy()
|
self.context.destroy()
|
||||||
|
|
||||||
|
|
||||||
class Subscriber:
|
class Subscriber(Generic[T]):
|
||||||
"""Receives messages."""
|
"""Receives messages."""
|
||||||
|
|
||||||
topic_base: str = ""
|
topic_base: str = ""
|
||||||
@ -79,9 +82,7 @@ class Subscriber:
|
|||||||
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
|
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
|
||||||
self.socket.connect(SOCKET_SUB)
|
self.socket.connect(SOCKET_SUB)
|
||||||
|
|
||||||
def check_for_update(
|
def check_for_update(self, timeout: float | None = FAST_QUEUE_TIMEOUT) -> T | None:
|
||||||
self, timeout: float = FAST_QUEUE_TIMEOUT
|
|
||||||
) -> Optional[tuple[str, Any]]:
|
|
||||||
"""Returns message or None if no update."""
|
"""Returns message or None if no update."""
|
||||||
try:
|
try:
|
||||||
has_update, _, _ = zmq.select([self.socket], [], [], timeout)
|
has_update, _, _ = zmq.select([self.socket], [], [], timeout)
|
||||||
@ -98,5 +99,5 @@ class Subscriber:
|
|||||||
self.socket.close()
|
self.socket.close()
|
||||||
self.context.destroy()
|
self.context.destroy()
|
||||||
|
|
||||||
def _return_object(self, topic: str, payload: Any) -> Any:
|
def _return_object(self, topic: str, payload: T | None) -> T | None:
|
||||||
return payload
|
return payload
|
||||||
|
|||||||
@ -1,5 +1,29 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict
|
from pydantic import BaseModel, ConfigDict
|
||||||
|
|
||||||
|
|
||||||
class FrigateBaseModel(BaseModel):
|
class FrigateBaseModel(BaseModel):
|
||||||
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
||||||
|
|
||||||
|
def get_nested_object(self, path: str) -> Any:
|
||||||
|
parts = path.split("/")
|
||||||
|
obj = self
|
||||||
|
for part in parts:
|
||||||
|
if part == "config":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if isinstance(obj, BaseModel):
|
||||||
|
try:
|
||||||
|
obj = getattr(obj, part)
|
||||||
|
except AttributeError:
|
||||||
|
return None
|
||||||
|
elif isinstance(obj, dict):
|
||||||
|
try:
|
||||||
|
obj = obj[part]
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|||||||
@ -19,14 +19,15 @@ from frigate.util.builtin import (
|
|||||||
|
|
||||||
from ..base import FrigateBaseModel
|
from ..base import FrigateBaseModel
|
||||||
from ..classification import (
|
from ..classification import (
|
||||||
|
AudioTranscriptionConfig,
|
||||||
CameraFaceRecognitionConfig,
|
CameraFaceRecognitionConfig,
|
||||||
CameraLicensePlateRecognitionConfig,
|
CameraLicensePlateRecognitionConfig,
|
||||||
|
CameraSemanticSearchConfig,
|
||||||
)
|
)
|
||||||
from .audio import AudioConfig
|
from .audio import AudioConfig
|
||||||
from .birdseye import BirdseyeCameraConfig
|
from .birdseye import BirdseyeCameraConfig
|
||||||
from .detect import DetectConfig
|
from .detect import DetectConfig
|
||||||
from .ffmpeg import CameraFfmpegConfig, CameraInput
|
from .ffmpeg import CameraFfmpegConfig, CameraInput
|
||||||
from .genai import GenAICameraConfig
|
|
||||||
from .live import CameraLiveConfig
|
from .live import CameraLiveConfig
|
||||||
from .motion import MotionConfig
|
from .motion import MotionConfig
|
||||||
from .mqtt import CameraMqttConfig
|
from .mqtt import CameraMqttConfig
|
||||||
@ -56,6 +57,9 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
audio: AudioConfig = Field(
|
audio: AudioConfig = Field(
|
||||||
default_factory=AudioConfig, title="Audio events configuration."
|
default_factory=AudioConfig, title="Audio events configuration."
|
||||||
)
|
)
|
||||||
|
audio_transcription: AudioTranscriptionConfig = Field(
|
||||||
|
default_factory=AudioTranscriptionConfig, title="Audio transcription config."
|
||||||
|
)
|
||||||
birdseye: BirdseyeCameraConfig = Field(
|
birdseye: BirdseyeCameraConfig = Field(
|
||||||
default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration."
|
default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration."
|
||||||
)
|
)
|
||||||
@ -66,18 +70,13 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
|
default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
|
||||||
)
|
)
|
||||||
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
|
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
|
||||||
genai: GenAICameraConfig = Field(
|
|
||||||
default_factory=GenAICameraConfig, title="Generative AI configuration."
|
|
||||||
)
|
|
||||||
live: CameraLiveConfig = Field(
|
live: CameraLiveConfig = Field(
|
||||||
default_factory=CameraLiveConfig, title="Live playback settings."
|
default_factory=CameraLiveConfig, title="Live playback settings."
|
||||||
)
|
)
|
||||||
lpr: CameraLicensePlateRecognitionConfig = Field(
|
lpr: CameraLicensePlateRecognitionConfig = Field(
|
||||||
default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
|
default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
|
||||||
)
|
)
|
||||||
motion: Optional[MotionConfig] = Field(
|
motion: MotionConfig = Field(None, title="Motion detection configuration.")
|
||||||
None, title="Motion detection configuration."
|
|
||||||
)
|
|
||||||
objects: ObjectConfig = Field(
|
objects: ObjectConfig = Field(
|
||||||
default_factory=ObjectConfig, title="Object configuration."
|
default_factory=ObjectConfig, title="Object configuration."
|
||||||
)
|
)
|
||||||
@ -87,6 +86,10 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
review: ReviewConfig = Field(
|
review: ReviewConfig = Field(
|
||||||
default_factory=ReviewConfig, title="Review configuration."
|
default_factory=ReviewConfig, title="Review configuration."
|
||||||
)
|
)
|
||||||
|
semantic_search: CameraSemanticSearchConfig = Field(
|
||||||
|
default_factory=CameraSemanticSearchConfig,
|
||||||
|
title="Semantic search configuration.",
|
||||||
|
)
|
||||||
snapshots: SnapshotsConfig = Field(
|
snapshots: SnapshotsConfig = Field(
|
||||||
default_factory=SnapshotsConfig, title="Snapshot configuration."
|
default_factory=SnapshotsConfig, title="Snapshot configuration."
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional, Union
|
from typing import Any, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import Field
|
||||||
|
|
||||||
from ..base import FrigateBaseModel
|
from ..base import FrigateBaseModel
|
||||||
from ..env import EnvString
|
from ..env import EnvString
|
||||||
|
|
||||||
__all__ = ["GenAIConfig", "GenAICameraConfig", "GenAIProviderEnum"]
|
__all__ = ["GenAIConfig", "GenAIProviderEnum"]
|
||||||
|
|
||||||
|
|
||||||
class GenAIProviderEnum(str, Enum):
|
class GenAIProviderEnum(str, Enum):
|
||||||
@ -16,70 +16,13 @@ class GenAIProviderEnum(str, Enum):
|
|||||||
ollama = "ollama"
|
ollama = "ollama"
|
||||||
|
|
||||||
|
|
||||||
class GenAISendTriggersConfig(BaseModel):
|
|
||||||
tracked_object_end: bool = Field(
|
|
||||||
default=True, title="Send once the object is no longer tracked."
|
|
||||||
)
|
|
||||||
after_significant_updates: Optional[int] = Field(
|
|
||||||
default=None,
|
|
||||||
title="Send an early request to generative AI when X frames accumulated.",
|
|
||||||
ge=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# uses BaseModel because some global attributes are not available at the camera level
|
|
||||||
class GenAICameraConfig(BaseModel):
|
|
||||||
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
|
|
||||||
use_snapshot: bool = Field(
|
|
||||||
default=False, title="Use snapshots for generating descriptions."
|
|
||||||
)
|
|
||||||
prompt: str = Field(
|
|
||||||
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
|
|
||||||
title="Default caption prompt.",
|
|
||||||
)
|
|
||||||
object_prompts: dict[str, str] = Field(
|
|
||||||
default_factory=dict, title="Object specific prompts."
|
|
||||||
)
|
|
||||||
|
|
||||||
objects: Union[str, list[str]] = Field(
|
|
||||||
default_factory=list,
|
|
||||||
title="List of objects to run generative AI for.",
|
|
||||||
)
|
|
||||||
required_zones: Union[str, list[str]] = Field(
|
|
||||||
default_factory=list,
|
|
||||||
title="List of required zones to be entered in order to run generative AI.",
|
|
||||||
)
|
|
||||||
debug_save_thumbnails: bool = Field(
|
|
||||||
default=False,
|
|
||||||
title="Save thumbnails sent to generative AI for debugging purposes.",
|
|
||||||
)
|
|
||||||
send_triggers: GenAISendTriggersConfig = Field(
|
|
||||||
default_factory=GenAISendTriggersConfig,
|
|
||||||
title="What triggers to use to send frames to generative AI for a tracked object.",
|
|
||||||
)
|
|
||||||
|
|
||||||
@field_validator("required_zones", mode="before")
|
|
||||||
@classmethod
|
|
||||||
def validate_required_zones(cls, v):
|
|
||||||
if isinstance(v, str) and "," not in v:
|
|
||||||
return [v]
|
|
||||||
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
class GenAIConfig(FrigateBaseModel):
|
class GenAIConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable GenAI.")
|
"""Primary GenAI Config to define GenAI Provider."""
|
||||||
prompt: str = Field(
|
|
||||||
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
|
|
||||||
title="Default caption prompt.",
|
|
||||||
)
|
|
||||||
object_prompts: dict[str, str] = Field(
|
|
||||||
default_factory=dict, title="Object specific prompts."
|
|
||||||
)
|
|
||||||
|
|
||||||
api_key: Optional[EnvString] = Field(default=None, title="Provider API key.")
|
api_key: Optional[EnvString] = Field(default=None, title="Provider API key.")
|
||||||
base_url: Optional[str] = Field(default=None, title="Provider base url.")
|
base_url: Optional[str] = Field(default=None, title="Provider base url.")
|
||||||
model: str = Field(default="gpt-4o", title="GenAI model.")
|
model: str = Field(default="gpt-4o", title="GenAI model.")
|
||||||
provider: GenAIProviderEnum = Field(
|
provider: GenAIProviderEnum | None = Field(default=None, title="GenAI provider.")
|
||||||
default=GenAIProviderEnum.openai, title="GenAI provider."
|
provider_options: dict[str, Any] = Field(
|
||||||
|
default={}, title="GenAI Provider extra options."
|
||||||
)
|
)
|
||||||
|
|||||||
@ -10,7 +10,7 @@ __all__ = ["NotificationConfig"]
|
|||||||
class NotificationConfig(FrigateBaseModel):
|
class NotificationConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable notifications")
|
enabled: bool = Field(default=False, title="Enable notifications")
|
||||||
email: Optional[str] = Field(default=None, title="Email required for push.")
|
email: Optional[str] = Field(default=None, title="Email required for push.")
|
||||||
cooldown: Optional[int] = Field(
|
cooldown: int = Field(
|
||||||
default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
|
default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
|
||||||
)
|
)
|
||||||
enabled_in_config: Optional[bool] = Field(
|
enabled_in_config: Optional[bool] = Field(
|
||||||
|
|||||||
@ -1,10 +1,10 @@
|
|||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
from pydantic import Field, PrivateAttr, field_serializer
|
from pydantic import Field, PrivateAttr, field_serializer, field_validator
|
||||||
|
|
||||||
from ..base import FrigateBaseModel
|
from ..base import FrigateBaseModel
|
||||||
|
|
||||||
__all__ = ["ObjectConfig", "FilterConfig"]
|
__all__ = ["ObjectConfig", "GenAIObjectConfig", "FilterConfig"]
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_TRACKED_OBJECTS = ["person"]
|
DEFAULT_TRACKED_OBJECTS = ["person"]
|
||||||
@ -49,12 +49,69 @@ class FilterConfig(FrigateBaseModel):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class GenAIObjectTriggerConfig(FrigateBaseModel):
|
||||||
|
tracked_object_end: bool = Field(
|
||||||
|
default=True, title="Send once the object is no longer tracked."
|
||||||
|
)
|
||||||
|
after_significant_updates: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
title="Send an early request to generative AI when X frames accumulated.",
|
||||||
|
ge=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GenAIObjectConfig(FrigateBaseModel):
|
||||||
|
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
|
||||||
|
use_snapshot: bool = Field(
|
||||||
|
default=False, title="Use snapshots for generating descriptions."
|
||||||
|
)
|
||||||
|
prompt: str = Field(
|
||||||
|
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
|
||||||
|
title="Default caption prompt.",
|
||||||
|
)
|
||||||
|
object_prompts: dict[str, str] = Field(
|
||||||
|
default_factory=dict, title="Object specific prompts."
|
||||||
|
)
|
||||||
|
|
||||||
|
objects: Union[str, list[str]] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
title="List of objects to run generative AI for.",
|
||||||
|
)
|
||||||
|
required_zones: Union[str, list[str]] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
title="List of required zones to be entered in order to run generative AI.",
|
||||||
|
)
|
||||||
|
debug_save_thumbnails: bool = Field(
|
||||||
|
default=False,
|
||||||
|
title="Save thumbnails sent to generative AI for debugging purposes.",
|
||||||
|
)
|
||||||
|
send_triggers: GenAIObjectTriggerConfig = Field(
|
||||||
|
default_factory=GenAIObjectTriggerConfig,
|
||||||
|
title="What triggers to use to send frames to generative AI for a tracked object.",
|
||||||
|
)
|
||||||
|
enabled_in_config: Optional[bool] = Field(
|
||||||
|
default=None, title="Keep track of original state of generative AI."
|
||||||
|
)
|
||||||
|
|
||||||
|
@field_validator("required_zones", mode="before")
|
||||||
|
@classmethod
|
||||||
|
def validate_required_zones(cls, v):
|
||||||
|
if isinstance(v, str) and "," not in v:
|
||||||
|
return [v]
|
||||||
|
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
class ObjectConfig(FrigateBaseModel):
|
class ObjectConfig(FrigateBaseModel):
|
||||||
track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
||||||
filters: dict[str, FilterConfig] = Field(
|
filters: dict[str, FilterConfig] = Field(
|
||||||
default_factory=dict, title="Object filters."
|
default_factory=dict, title="Object filters."
|
||||||
)
|
)
|
||||||
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
|
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
|
||||||
|
genai: GenAIObjectConfig = Field(
|
||||||
|
default_factory=GenAIObjectConfig,
|
||||||
|
title="Config for using genai to analyze objects.",
|
||||||
|
)
|
||||||
_all_objects: list[str] = PrivateAttr()
|
_all_objects: list[str] = PrivateAttr()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@ -22,27 +22,31 @@ __all__ = [
|
|||||||
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
|
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
|
||||||
|
|
||||||
|
|
||||||
|
class RecordRetainConfig(FrigateBaseModel):
|
||||||
|
days: float = Field(default=0, ge=0, title="Default retention period.")
|
||||||
|
|
||||||
|
|
||||||
class RetainModeEnum(str, Enum):
|
class RetainModeEnum(str, Enum):
|
||||||
all = "all"
|
all = "all"
|
||||||
motion = "motion"
|
motion = "motion"
|
||||||
active_objects = "active_objects"
|
active_objects = "active_objects"
|
||||||
|
|
||||||
|
|
||||||
class RecordRetainConfig(FrigateBaseModel):
|
|
||||||
days: float = Field(default=0, title="Default retention period.")
|
|
||||||
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
|
|
||||||
|
|
||||||
|
|
||||||
class ReviewRetainConfig(FrigateBaseModel):
|
class ReviewRetainConfig(FrigateBaseModel):
|
||||||
days: float = Field(default=10, title="Default retention period.")
|
days: float = Field(default=10, ge=0, title="Default retention period.")
|
||||||
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
||||||
|
|
||||||
|
|
||||||
class EventsConfig(FrigateBaseModel):
|
class EventsConfig(FrigateBaseModel):
|
||||||
pre_capture: int = Field(
|
pre_capture: int = Field(
|
||||||
default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE
|
default=5,
|
||||||
|
title="Seconds to retain before event starts.",
|
||||||
|
le=MAX_PRE_CAPTURE,
|
||||||
|
ge=0,
|
||||||
|
)
|
||||||
|
post_capture: int = Field(
|
||||||
|
default=5, ge=0, title="Seconds to retain after event ends."
|
||||||
)
|
)
|
||||||
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
|
|
||||||
retain: ReviewRetainConfig = Field(
|
retain: ReviewRetainConfig = Field(
|
||||||
default_factory=ReviewRetainConfig, title="Event retention settings."
|
default_factory=ReviewRetainConfig, title="Event retention settings."
|
||||||
)
|
)
|
||||||
@ -77,8 +81,12 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
default=60,
|
default=60,
|
||||||
title="Number of minutes to wait between cleanup runs.",
|
title="Number of minutes to wait between cleanup runs.",
|
||||||
)
|
)
|
||||||
retain: RecordRetainConfig = Field(
|
continuous: RecordRetainConfig = Field(
|
||||||
default_factory=RecordRetainConfig, title="Record retention settings."
|
default_factory=RecordRetainConfig,
|
||||||
|
title="Continuous recording retention settings.",
|
||||||
|
)
|
||||||
|
motion: RecordRetainConfig = Field(
|
||||||
|
default_factory=RecordRetainConfig, title="Motion recording retention settings."
|
||||||
)
|
)
|
||||||
detections: EventsConfig = Field(
|
detections: EventsConfig = Field(
|
||||||
default_factory=EventsConfig, title="Detection specific retention settings."
|
default_factory=EventsConfig, title="Detection specific retention settings."
|
||||||
|
|||||||
@ -62,6 +62,30 @@ class DetectionsConfig(FrigateBaseModel):
|
|||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
class GenAIReviewConfig(FrigateBaseModel):
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=False,
|
||||||
|
title="Enable GenAI descriptions for review items.",
|
||||||
|
)
|
||||||
|
alerts: bool = Field(default=True, title="Enable GenAI for alerts.")
|
||||||
|
detections: bool = Field(default=False, title="Enable GenAI for detections.")
|
||||||
|
additional_concerns: list[str] = Field(
|
||||||
|
default=[],
|
||||||
|
title="Additional concerns that GenAI should make note of on this camera.",
|
||||||
|
)
|
||||||
|
debug_save_thumbnails: bool = Field(
|
||||||
|
default=False,
|
||||||
|
title="Save thumbnails sent to generative AI for debugging purposes.",
|
||||||
|
)
|
||||||
|
enabled_in_config: Optional[bool] = Field(
|
||||||
|
default=None, title="Keep track of original state of generative AI."
|
||||||
|
)
|
||||||
|
preferred_language: str | None = Field(
|
||||||
|
title="Preferred language for GenAI Response",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ReviewConfig(FrigateBaseModel):
|
class ReviewConfig(FrigateBaseModel):
|
||||||
"""Configure reviews"""
|
"""Configure reviews"""
|
||||||
|
|
||||||
@ -71,3 +95,6 @@ class ReviewConfig(FrigateBaseModel):
|
|||||||
detections: DetectionsConfig = Field(
|
detections: DetectionsConfig = Field(
|
||||||
default_factory=DetectionsConfig, title="Review detections config."
|
default_factory=DetectionsConfig, title="Review detections config."
|
||||||
)
|
)
|
||||||
|
genai: GenAIReviewConfig = Field(
|
||||||
|
default_factory=GenAIReviewConfig, title="Review description genai config."
|
||||||
|
)
|
||||||
|
|||||||
147
frigate/config/camera/updater.py
Normal file
147
frigate/config/camera/updater.py
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
"""Convenience classes for updating configurations dynamically."""
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from frigate.comms.config_updater import ConfigPublisher, ConfigSubscriber
|
||||||
|
from frigate.config import CameraConfig, FrigateConfig
|
||||||
|
|
||||||
|
|
||||||
|
class CameraConfigUpdateEnum(str, Enum):
|
||||||
|
"""Supported camera config update types."""
|
||||||
|
|
||||||
|
add = "add" # for adding a camera
|
||||||
|
audio = "audio"
|
||||||
|
audio_transcription = "audio_transcription"
|
||||||
|
birdseye = "birdseye"
|
||||||
|
detect = "detect"
|
||||||
|
enabled = "enabled"
|
||||||
|
motion = "motion" # includes motion and motion masks
|
||||||
|
notifications = "notifications"
|
||||||
|
objects = "objects"
|
||||||
|
object_genai = "object_genai"
|
||||||
|
record = "record"
|
||||||
|
remove = "remove" # for removing a camera
|
||||||
|
review = "review"
|
||||||
|
review_genai = "review_genai"
|
||||||
|
semantic_search = "semantic_search" # for semantic search triggers
|
||||||
|
snapshots = "snapshots"
|
||||||
|
zones = "zones"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CameraConfigUpdateTopic:
|
||||||
|
update_type: CameraConfigUpdateEnum
|
||||||
|
camera: str
|
||||||
|
|
||||||
|
@property
|
||||||
|
def topic(self) -> str:
|
||||||
|
return f"config/cameras/{self.camera}/{self.update_type.name}"
|
||||||
|
|
||||||
|
|
||||||
|
class CameraConfigUpdatePublisher:
|
||||||
|
def __init__(self):
|
||||||
|
self.publisher = ConfigPublisher()
|
||||||
|
|
||||||
|
def publish_update(self, topic: CameraConfigUpdateTopic, config: Any) -> None:
|
||||||
|
self.publisher.publish(topic.topic, config)
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.publisher.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class CameraConfigUpdateSubscriber:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig | None,
|
||||||
|
camera_configs: dict[str, CameraConfig],
|
||||||
|
topics: list[CameraConfigUpdateEnum],
|
||||||
|
):
|
||||||
|
self.config = config
|
||||||
|
self.camera_configs = camera_configs
|
||||||
|
self.topics = topics
|
||||||
|
|
||||||
|
base_topic = "config/cameras"
|
||||||
|
|
||||||
|
if len(self.camera_configs) == 1:
|
||||||
|
base_topic += f"/{list(self.camera_configs.keys())[0]}"
|
||||||
|
|
||||||
|
self.subscriber = ConfigSubscriber(
|
||||||
|
base_topic,
|
||||||
|
exact=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __update_config(
|
||||||
|
self, camera: str, update_type: CameraConfigUpdateEnum, updated_config: Any
|
||||||
|
) -> None:
|
||||||
|
if update_type == CameraConfigUpdateEnum.add:
|
||||||
|
self.config.cameras[camera] = updated_config
|
||||||
|
self.camera_configs[camera] = updated_config
|
||||||
|
return
|
||||||
|
elif update_type == CameraConfigUpdateEnum.remove:
|
||||||
|
self.config.cameras.pop(camera)
|
||||||
|
self.camera_configs.pop(camera)
|
||||||
|
return
|
||||||
|
|
||||||
|
config = self.camera_configs.get(camera)
|
||||||
|
|
||||||
|
if not config:
|
||||||
|
return
|
||||||
|
|
||||||
|
if update_type == CameraConfigUpdateEnum.audio:
|
||||||
|
config.audio = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.audio_transcription:
|
||||||
|
config.audio_transcription = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.birdseye:
|
||||||
|
config.birdseye = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.detect:
|
||||||
|
config.detect = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.enabled:
|
||||||
|
config.enabled = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.object_genai:
|
||||||
|
config.objects.genai = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.motion:
|
||||||
|
config.motion = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.notifications:
|
||||||
|
config.notifications = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.objects:
|
||||||
|
config.objects = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.record:
|
||||||
|
config.record = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.review:
|
||||||
|
config.review = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.review_genai:
|
||||||
|
config.review.genai = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.semantic_search:
|
||||||
|
config.semantic_search = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.snapshots:
|
||||||
|
config.snapshots = updated_config
|
||||||
|
elif update_type == CameraConfigUpdateEnum.zones:
|
||||||
|
config.zones = updated_config
|
||||||
|
|
||||||
|
def check_for_updates(self) -> dict[str, list[str]]:
|
||||||
|
updated_topics: dict[str, list[str]] = {}
|
||||||
|
|
||||||
|
# get all updates available
|
||||||
|
while True:
|
||||||
|
update_topic, update_config = self.subscriber.check_for_update()
|
||||||
|
|
||||||
|
if update_topic is None or update_config is None:
|
||||||
|
break
|
||||||
|
|
||||||
|
_, _, camera, raw_type = update_topic.split("/")
|
||||||
|
update_type = CameraConfigUpdateEnum[raw_type]
|
||||||
|
|
||||||
|
if update_type in self.topics:
|
||||||
|
if update_type.name in updated_topics:
|
||||||
|
updated_topics[update_type.name].append(camera)
|
||||||
|
else:
|
||||||
|
updated_topics[update_type.name] = [camera]
|
||||||
|
|
||||||
|
self.__update_config(camera, update_type, update_config)
|
||||||
|
|
||||||
|
return updated_topics
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.subscriber.stop()
|
||||||
@ -10,6 +10,7 @@ __all__ = [
|
|||||||
"CameraLicensePlateRecognitionConfig",
|
"CameraLicensePlateRecognitionConfig",
|
||||||
"FaceRecognitionConfig",
|
"FaceRecognitionConfig",
|
||||||
"SemanticSearchConfig",
|
"SemanticSearchConfig",
|
||||||
|
"CameraSemanticSearchConfig",
|
||||||
"LicensePlateRecognitionConfig",
|
"LicensePlateRecognitionConfig",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -19,11 +20,46 @@ class SemanticSearchModelEnum(str, Enum):
|
|||||||
jinav2 = "jinav2"
|
jinav2 = "jinav2"
|
||||||
|
|
||||||
|
|
||||||
class LPRDeviceEnum(str, Enum):
|
class EnrichmentsDeviceEnum(str, Enum):
|
||||||
GPU = "GPU"
|
GPU = "GPU"
|
||||||
CPU = "CPU"
|
CPU = "CPU"
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerType(str, Enum):
|
||||||
|
THUMBNAIL = "thumbnail"
|
||||||
|
DESCRIPTION = "description"
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerAction(str, Enum):
|
||||||
|
NOTIFICATION = "notification"
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectClassificationType(str, Enum):
|
||||||
|
sub_label = "sub_label"
|
||||||
|
attribute = "attribute"
|
||||||
|
|
||||||
|
|
||||||
|
class AudioTranscriptionConfig(FrigateBaseModel):
|
||||||
|
enabled: bool = Field(default=False, title="Enable audio transcription.")
|
||||||
|
language: str = Field(
|
||||||
|
default="en",
|
||||||
|
title="Language abbreviation to use for audio event transcription/translation.",
|
||||||
|
)
|
||||||
|
device: Optional[EnrichmentsDeviceEnum] = Field(
|
||||||
|
default=EnrichmentsDeviceEnum.CPU,
|
||||||
|
title="The device used for license plate recognition.",
|
||||||
|
)
|
||||||
|
model_size: str = Field(
|
||||||
|
default="small", title="The size of the embeddings model used."
|
||||||
|
)
|
||||||
|
enabled_in_config: Optional[bool] = Field(
|
||||||
|
default=None, title="Keep track of original state of camera."
|
||||||
|
)
|
||||||
|
live_enabled: Optional[bool] = Field(
|
||||||
|
default=False, title="Enable live transcriptions."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BirdClassificationConfig(FrigateBaseModel):
|
class BirdClassificationConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable bird classification.")
|
enabled: bool = Field(default=False, title="Enable bird classification.")
|
||||||
threshold: float = Field(
|
threshold: float = Field(
|
||||||
@ -34,10 +70,52 @@ class BirdClassificationConfig(FrigateBaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomClassificationStateCameraConfig(FrigateBaseModel):
|
||||||
|
crop: list[int, int, int, int] = Field(
|
||||||
|
title="Crop of image frame on this camera to run classification on."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomClassificationStateConfig(FrigateBaseModel):
|
||||||
|
cameras: Dict[str, CustomClassificationStateCameraConfig] = Field(
|
||||||
|
title="Cameras to run classification on."
|
||||||
|
)
|
||||||
|
motion: bool = Field(
|
||||||
|
default=False,
|
||||||
|
title="If classification should be run when motion is detected in the crop.",
|
||||||
|
)
|
||||||
|
interval: int | None = Field(
|
||||||
|
default=None,
|
||||||
|
title="Interval to run classification on in seconds.",
|
||||||
|
gt=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomClassificationObjectConfig(FrigateBaseModel):
|
||||||
|
objects: list[str] = Field(title="Object types to classify.")
|
||||||
|
classification_type: ObjectClassificationType = Field(
|
||||||
|
default=ObjectClassificationType.sub_label,
|
||||||
|
title="Type of classification that is applied.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomClassificationConfig(FrigateBaseModel):
|
||||||
|
enabled: bool = Field(default=True, title="Enable running the model.")
|
||||||
|
name: str | None = Field(default=None, title="Name of classification model.")
|
||||||
|
threshold: float = Field(
|
||||||
|
default=0.8, title="Classification score threshold to change the state."
|
||||||
|
)
|
||||||
|
object_config: CustomClassificationObjectConfig | None = Field(default=None)
|
||||||
|
state_config: CustomClassificationStateConfig | None = Field(default=None)
|
||||||
|
|
||||||
|
|
||||||
class ClassificationConfig(FrigateBaseModel):
|
class ClassificationConfig(FrigateBaseModel):
|
||||||
bird: BirdClassificationConfig = Field(
|
bird: BirdClassificationConfig = Field(
|
||||||
default_factory=BirdClassificationConfig, title="Bird classification config."
|
default_factory=BirdClassificationConfig, title="Bird classification config."
|
||||||
)
|
)
|
||||||
|
custom: Dict[str, CustomClassificationConfig] = Field(
|
||||||
|
default={}, title="Custom Classification Model Configs."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class SemanticSearchConfig(FrigateBaseModel):
|
class SemanticSearchConfig(FrigateBaseModel):
|
||||||
@ -52,6 +130,37 @@ class SemanticSearchConfig(FrigateBaseModel):
|
|||||||
model_size: str = Field(
|
model_size: str = Field(
|
||||||
default="small", title="The size of the embeddings model used."
|
default="small", title="The size of the embeddings model used."
|
||||||
)
|
)
|
||||||
|
device: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
title="The device key to use for semantic search.",
|
||||||
|
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerConfig(FrigateBaseModel):
|
||||||
|
enabled: bool = Field(default=True, title="Enable this trigger")
|
||||||
|
type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger")
|
||||||
|
data: str = Field(title="Trigger content (text phrase or image ID)")
|
||||||
|
threshold: float = Field(
|
||||||
|
title="Confidence score required to run the trigger",
|
||||||
|
default=0.8,
|
||||||
|
gt=0.0,
|
||||||
|
le=1.0,
|
||||||
|
)
|
||||||
|
actions: List[TriggerAction] = Field(
|
||||||
|
default=[], title="Actions to perform when trigger is matched"
|
||||||
|
)
|
||||||
|
|
||||||
|
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
||||||
|
|
||||||
|
|
||||||
|
class CameraSemanticSearchConfig(FrigateBaseModel):
|
||||||
|
triggers: Dict[str, TriggerConfig] = Field(
|
||||||
|
default={},
|
||||||
|
title="Trigger actions on tracked objects that match existing thumbnails or descriptions",
|
||||||
|
)
|
||||||
|
|
||||||
|
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
||||||
|
|
||||||
|
|
||||||
class FaceRecognitionConfig(FrigateBaseModel):
|
class FaceRecognitionConfig(FrigateBaseModel):
|
||||||
@ -92,6 +201,11 @@ class FaceRecognitionConfig(FrigateBaseModel):
|
|||||||
blur_confidence_filter: bool = Field(
|
blur_confidence_filter: bool = Field(
|
||||||
default=True, title="Apply blur quality filter to face confidence."
|
default=True, title="Apply blur quality filter to face confidence."
|
||||||
)
|
)
|
||||||
|
device: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
title="The device key to use for face recognition.",
|
||||||
|
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CameraFaceRecognitionConfig(FrigateBaseModel):
|
class CameraFaceRecognitionConfig(FrigateBaseModel):
|
||||||
@ -105,10 +219,6 @@ class CameraFaceRecognitionConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class LicensePlateRecognitionConfig(FrigateBaseModel):
|
class LicensePlateRecognitionConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable license plate recognition.")
|
enabled: bool = Field(default=False, title="Enable license plate recognition.")
|
||||||
device: Optional[LPRDeviceEnum] = Field(
|
|
||||||
default=LPRDeviceEnum.CPU,
|
|
||||||
title="The device used for license plate recognition.",
|
|
||||||
)
|
|
||||||
model_size: str = Field(
|
model_size: str = Field(
|
||||||
default="small", title="The size of the embeddings model used."
|
default="small", title="The size of the embeddings model used."
|
||||||
)
|
)
|
||||||
@ -154,6 +264,11 @@ class LicensePlateRecognitionConfig(FrigateBaseModel):
|
|||||||
default=False,
|
default=False,
|
||||||
title="Save plates captured for LPR for debugging purposes.",
|
title="Save plates captured for LPR for debugging purposes.",
|
||||||
)
|
)
|
||||||
|
device: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
title="The device key to use for LPR.",
|
||||||
|
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
|
class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
|
||||||
|
|||||||
@ -48,12 +48,13 @@ from .camera.genai import GenAIConfig
|
|||||||
from .camera.motion import MotionConfig
|
from .camera.motion import MotionConfig
|
||||||
from .camera.notification import NotificationConfig
|
from .camera.notification import NotificationConfig
|
||||||
from .camera.objects import FilterConfig, ObjectConfig
|
from .camera.objects import FilterConfig, ObjectConfig
|
||||||
from .camera.record import RecordConfig, RetainModeEnum
|
from .camera.record import RecordConfig
|
||||||
from .camera.review import ReviewConfig
|
from .camera.review import ReviewConfig
|
||||||
from .camera.snapshots import SnapshotsConfig
|
from .camera.snapshots import SnapshotsConfig
|
||||||
from .camera.timestamp import TimestampStyleConfig
|
from .camera.timestamp import TimestampStyleConfig
|
||||||
from .camera_group import CameraGroupConfig
|
from .camera_group import CameraGroupConfig
|
||||||
from .classification import (
|
from .classification import (
|
||||||
|
AudioTranscriptionConfig,
|
||||||
ClassificationConfig,
|
ClassificationConfig,
|
||||||
FaceRecognitionConfig,
|
FaceRecognitionConfig,
|
||||||
LicensePlateRecognitionConfig,
|
LicensePlateRecognitionConfig,
|
||||||
@ -63,6 +64,7 @@ from .database import DatabaseConfig
|
|||||||
from .env import EnvVars
|
from .env import EnvVars
|
||||||
from .logger import LoggerConfig
|
from .logger import LoggerConfig
|
||||||
from .mqtt import MqttConfig
|
from .mqtt import MqttConfig
|
||||||
|
from .network import NetworkingConfig
|
||||||
from .proxy import ProxyConfig
|
from .proxy import ProxyConfig
|
||||||
from .telemetry import TelemetryConfig
|
from .telemetry import TelemetryConfig
|
||||||
from .tls import TlsConfig
|
from .tls import TlsConfig
|
||||||
@ -203,33 +205,6 @@ def verify_valid_live_stream_names(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def verify_recording_retention(camera_config: CameraConfig) -> None:
|
|
||||||
"""Verify that recording retention modes are ranked correctly."""
|
|
||||||
rank_map = {
|
|
||||||
RetainModeEnum.all: 0,
|
|
||||||
RetainModeEnum.motion: 1,
|
|
||||||
RetainModeEnum.active_objects: 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
camera_config.record.retain.days != 0
|
|
||||||
and rank_map[camera_config.record.retain.mode]
|
|
||||||
> rank_map[camera_config.record.alerts.retain.mode]
|
|
||||||
):
|
|
||||||
logger.warning(
|
|
||||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and alert retention is configured for {camera_config.record.alerts.retain.mode}. The more restrictive retention policy will be applied."
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
camera_config.record.retain.days != 0
|
|
||||||
and rank_map[camera_config.record.retain.mode]
|
|
||||||
> rank_map[camera_config.record.detections.retain.mode]
|
|
||||||
):
|
|
||||||
logger.warning(
|
|
||||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and detection retention is configured for {camera_config.record.detections.retain.mode}. The more restrictive retention policy will be applied."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def verify_recording_segments_setup_with_reasonable_time(
|
def verify_recording_segments_setup_with_reasonable_time(
|
||||||
camera_config: CameraConfig,
|
camera_config: CameraConfig,
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -334,6 +309,9 @@ def verify_lpr_and_face(
|
|||||||
|
|
||||||
class FrigateConfig(FrigateBaseModel):
|
class FrigateConfig(FrigateBaseModel):
|
||||||
version: Optional[str] = Field(default=None, title="Current config version.")
|
version: Optional[str] = Field(default=None, title="Current config version.")
|
||||||
|
safe_mode: bool = Field(
|
||||||
|
default=False, title="If Frigate should be started in safe mode."
|
||||||
|
)
|
||||||
|
|
||||||
# Fields that install global state should be defined first, so that their validators run first.
|
# Fields that install global state should be defined first, so that their validators run first.
|
||||||
environment_vars: EnvVars = Field(
|
environment_vars: EnvVars = Field(
|
||||||
@ -357,6 +335,9 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
notifications: NotificationConfig = Field(
|
notifications: NotificationConfig = Field(
|
||||||
default_factory=NotificationConfig, title="Global notification configuration."
|
default_factory=NotificationConfig, title="Global notification configuration."
|
||||||
)
|
)
|
||||||
|
networking: NetworkingConfig = Field(
|
||||||
|
default_factory=NetworkingConfig, title="Networking configuration"
|
||||||
|
)
|
||||||
proxy: ProxyConfig = Field(
|
proxy: ProxyConfig = Field(
|
||||||
default_factory=ProxyConfig, title="Proxy configuration."
|
default_factory=ProxyConfig, title="Proxy configuration."
|
||||||
)
|
)
|
||||||
@ -375,6 +356,11 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
default_factory=ModelConfig, title="Detection model configuration."
|
default_factory=ModelConfig, title="Detection model configuration."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# GenAI config
|
||||||
|
genai: GenAIConfig = Field(
|
||||||
|
default_factory=GenAIConfig, title="Generative AI configuration."
|
||||||
|
)
|
||||||
|
|
||||||
# Camera config
|
# Camera config
|
||||||
cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
|
cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
|
||||||
audio: AudioConfig = Field(
|
audio: AudioConfig = Field(
|
||||||
@ -389,9 +375,6 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
ffmpeg: FfmpegConfig = Field(
|
ffmpeg: FfmpegConfig = Field(
|
||||||
default_factory=FfmpegConfig, title="Global FFmpeg configuration."
|
default_factory=FfmpegConfig, title="Global FFmpeg configuration."
|
||||||
)
|
)
|
||||||
genai: GenAIConfig = Field(
|
|
||||||
default_factory=GenAIConfig, title="Generative AI configuration."
|
|
||||||
)
|
|
||||||
live: CameraLiveConfig = Field(
|
live: CameraLiveConfig = Field(
|
||||||
default_factory=CameraLiveConfig, title="Live playback settings."
|
default_factory=CameraLiveConfig, title="Live playback settings."
|
||||||
)
|
)
|
||||||
@ -416,6 +399,9 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Classification Config
|
# Classification Config
|
||||||
|
audio_transcription: AudioTranscriptionConfig = Field(
|
||||||
|
default_factory=AudioTranscriptionConfig, title="Audio transcription config."
|
||||||
|
)
|
||||||
classification: ClassificationConfig = Field(
|
classification: ClassificationConfig = Field(
|
||||||
default_factory=ClassificationConfig, title="Object classification config."
|
default_factory=ClassificationConfig, title="Object classification config."
|
||||||
)
|
)
|
||||||
@ -469,6 +455,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
global_config = self.model_dump(
|
global_config = self.model_dump(
|
||||||
include={
|
include={
|
||||||
"audio": ...,
|
"audio": ...,
|
||||||
|
"audio_transcription": ...,
|
||||||
"birdseye": ...,
|
"birdseye": ...,
|
||||||
"face_recognition": ...,
|
"face_recognition": ...,
|
||||||
"lpr": ...,
|
"lpr": ...,
|
||||||
@ -477,7 +464,6 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
"live": ...,
|
"live": ...,
|
||||||
"objects": ...,
|
"objects": ...,
|
||||||
"review": ...,
|
"review": ...,
|
||||||
"genai": ...,
|
|
||||||
"motion": ...,
|
"motion": ...,
|
||||||
"notifications": ...,
|
"notifications": ...,
|
||||||
"detect": ...,
|
"detect": ...,
|
||||||
@ -506,7 +492,9 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
model_config["path"] = detector_config.model_path
|
model_config["path"] = detector_config.model_path
|
||||||
|
|
||||||
if "path" not in model_config:
|
if "path" not in model_config:
|
||||||
if detector_config.type == "cpu":
|
if detector_config.type == "cpu" or detector_config.type.endswith(
|
||||||
|
"_tfl"
|
||||||
|
):
|
||||||
model_config["path"] = "/cpu_model.tflite"
|
model_config["path"] = "/cpu_model.tflite"
|
||||||
elif detector_config.type == "edgetpu":
|
elif detector_config.type == "edgetpu":
|
||||||
model_config["path"] = "/edgetpu_model.tflite"
|
model_config["path"] = "/edgetpu_model.tflite"
|
||||||
@ -525,6 +513,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
allowed_fields_map = {
|
allowed_fields_map = {
|
||||||
"face_recognition": ["enabled", "min_area"],
|
"face_recognition": ["enabled", "min_area"],
|
||||||
"lpr": ["enabled", "expire_time", "min_area", "enhancement"],
|
"lpr": ["enabled", "expire_time", "min_area", "enhancement"],
|
||||||
|
"audio_transcription": ["enabled", "live_enabled"],
|
||||||
}
|
}
|
||||||
|
|
||||||
for section in allowed_fields_map:
|
for section in allowed_fields_map:
|
||||||
@ -606,6 +595,9 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
# set config pre-value
|
# set config pre-value
|
||||||
camera_config.enabled_in_config = camera_config.enabled
|
camera_config.enabled_in_config = camera_config.enabled
|
||||||
camera_config.audio.enabled_in_config = camera_config.audio.enabled
|
camera_config.audio.enabled_in_config = camera_config.audio.enabled
|
||||||
|
camera_config.audio_transcription.enabled_in_config = (
|
||||||
|
camera_config.audio_transcription.enabled
|
||||||
|
)
|
||||||
camera_config.record.enabled_in_config = camera_config.record.enabled
|
camera_config.record.enabled_in_config = camera_config.record.enabled
|
||||||
camera_config.notifications.enabled_in_config = (
|
camera_config.notifications.enabled_in_config = (
|
||||||
camera_config.notifications.enabled
|
camera_config.notifications.enabled
|
||||||
@ -619,6 +611,12 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
camera_config.review.detections.enabled_in_config = (
|
camera_config.review.detections.enabled_in_config = (
|
||||||
camera_config.review.detections.enabled
|
camera_config.review.detections.enabled
|
||||||
)
|
)
|
||||||
|
camera_config.objects.genai.enabled_in_config = (
|
||||||
|
camera_config.objects.genai.enabled
|
||||||
|
)
|
||||||
|
camera_config.review.genai.enabled_in_config = (
|
||||||
|
camera_config.review.genai.enabled
|
||||||
|
)
|
||||||
|
|
||||||
# Add default filters
|
# Add default filters
|
||||||
object_keys = camera_config.objects.track
|
object_keys = camera_config.objects.track
|
||||||
@ -685,7 +683,6 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
verify_config_roles(camera_config)
|
verify_config_roles(camera_config)
|
||||||
verify_valid_live_stream_names(self, camera_config)
|
verify_valid_live_stream_names(self, camera_config)
|
||||||
verify_recording_retention(camera_config)
|
|
||||||
verify_recording_segments_setup_with_reasonable_time(camera_config)
|
verify_recording_segments_setup_with_reasonable_time(camera_config)
|
||||||
verify_zone_objects_are_tracked(camera_config)
|
verify_zone_objects_are_tracked(camera_config)
|
||||||
verify_required_zones_exist(camera_config)
|
verify_required_zones_exist(camera_config)
|
||||||
@ -694,10 +691,29 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
verify_objects_track(camera_config, labelmap_objects)
|
verify_objects_track(camera_config, labelmap_objects)
|
||||||
verify_lpr_and_face(self, camera_config)
|
verify_lpr_and_face(self, camera_config)
|
||||||
|
|
||||||
|
# set names on classification configs
|
||||||
|
for name, config in self.classification.custom.items():
|
||||||
|
config.name = name
|
||||||
|
|
||||||
self.objects.parse_all_objects(self.cameras)
|
self.objects.parse_all_objects(self.cameras)
|
||||||
self.model.create_colormap(sorted(self.objects.all_objects))
|
self.model.create_colormap(sorted(self.objects.all_objects))
|
||||||
self.model.check_and_load_plus_model(self.plus_api)
|
self.model.check_and_load_plus_model(self.plus_api)
|
||||||
|
|
||||||
|
# Check audio transcription and audio detection requirements
|
||||||
|
if self.audio_transcription.enabled:
|
||||||
|
# If audio transcription is enabled globally, at least one camera must have audio detection enabled
|
||||||
|
if not any(camera.audio.enabled for camera in self.cameras.values()):
|
||||||
|
raise ValueError(
|
||||||
|
"Audio transcription is enabled globally, but no cameras have audio detection enabled. At least one camera must have audio detection enabled."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# If audio transcription is disabled globally, check each camera with audio_transcription enabled
|
||||||
|
for camera in self.cameras.values():
|
||||||
|
if camera.audio_transcription.enabled and not camera.audio.enabled:
|
||||||
|
raise ValueError(
|
||||||
|
f"Camera {camera.name} has audio transcription enabled, but audio detection is not enabled for this camera. Audio detection must be enabled for cameras with audio transcription when it is disabled globally."
|
||||||
|
)
|
||||||
|
|
||||||
if self.plus_api and not self.snapshots.clean_copy:
|
if self.plus_api and not self.snapshots.clean_copy:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
|
"Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
|
||||||
@ -716,6 +732,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, **kwargs):
|
def load(cls, **kwargs):
|
||||||
|
"""Loads the Frigate config file, runs migrations, and creates the config object."""
|
||||||
config_path = find_config_file()
|
config_path = find_config_file()
|
||||||
|
|
||||||
# No configuration file found, create one.
|
# No configuration file found, create one.
|
||||||
@ -743,7 +760,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
return FrigateConfig.parse(f, **kwargs)
|
return FrigateConfig.parse(f, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def parse(cls, config, *, is_json=None, **context):
|
def parse(cls, config, *, is_json=None, safe_load=False, **context):
|
||||||
# If config is a file, read its contents.
|
# If config is a file, read its contents.
|
||||||
if hasattr(config, "read"):
|
if hasattr(config, "read"):
|
||||||
fname = getattr(config, "name", None)
|
fname = getattr(config, "name", None)
|
||||||
@ -767,6 +784,15 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
else:
|
else:
|
||||||
config = yaml.load(config)
|
config = yaml.load(config)
|
||||||
|
|
||||||
|
# load minimal Frigate config after the full config did not validate
|
||||||
|
if safe_load:
|
||||||
|
safe_config = {"safe_mode": True, "cameras": {}, "mqtt": {"enabled": False}}
|
||||||
|
|
||||||
|
# copy over auth and proxy config in case auth needs to be enforced
|
||||||
|
safe_config["auth"] = config.get("auth", {})
|
||||||
|
safe_config["proxy"] = config.get("proxy", {})
|
||||||
|
return cls.parse_object(safe_config, **context)
|
||||||
|
|
||||||
# Validate and return the config dict.
|
# Validate and return the config dict.
|
||||||
return cls.parse_object(config, **context)
|
return cls.parse_object(config, **context)
|
||||||
|
|
||||||
|
|||||||
@ -1,20 +1,11 @@
|
|||||||
import logging
|
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
from pydantic import Field, ValidationInfo, model_validator
|
from pydantic import Field, ValidationInfo, model_validator
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
|
from frigate.log import LogLevel, apply_log_levels
|
||||||
|
|
||||||
from .base import FrigateBaseModel
|
from .base import FrigateBaseModel
|
||||||
|
|
||||||
__all__ = ["LoggerConfig", "LogLevel"]
|
__all__ = ["LoggerConfig"]
|
||||||
|
|
||||||
|
|
||||||
class LogLevel(str, Enum):
|
|
||||||
debug = "debug"
|
|
||||||
info = "info"
|
|
||||||
warning = "warning"
|
|
||||||
error = "error"
|
|
||||||
critical = "critical"
|
|
||||||
|
|
||||||
|
|
||||||
class LoggerConfig(FrigateBaseModel):
|
class LoggerConfig(FrigateBaseModel):
|
||||||
@ -26,16 +17,6 @@ class LoggerConfig(FrigateBaseModel):
|
|||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def post_validation(self, info: ValidationInfo) -> Self:
|
def post_validation(self, info: ValidationInfo) -> Self:
|
||||||
if isinstance(info.context, dict) and info.context.get("install", False):
|
if isinstance(info.context, dict) and info.context.get("install", False):
|
||||||
logging.getLogger().setLevel(self.default.value.upper())
|
apply_log_levels(self.default.value.upper(), self.logs)
|
||||||
|
|
||||||
log_levels = {
|
|
||||||
"httpx": LogLevel.error,
|
|
||||||
"werkzeug": LogLevel.error,
|
|
||||||
"ws4py": LogLevel.error,
|
|
||||||
**self.logs,
|
|
||||||
}
|
|
||||||
|
|
||||||
for log, level in log_levels.items():
|
|
||||||
logging.getLogger(log).setLevel(level.value.upper())
|
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|||||||
@ -30,7 +30,7 @@ class MqttConfig(FrigateBaseModel):
|
|||||||
)
|
)
|
||||||
tls_client_key: Optional[str] = Field(default=None, title="MQTT TLS Client Key")
|
tls_client_key: Optional[str] = Field(default=None, title="MQTT TLS Client Key")
|
||||||
tls_insecure: Optional[bool] = Field(default=None, title="MQTT TLS Insecure")
|
tls_insecure: Optional[bool] = Field(default=None, title="MQTT TLS Insecure")
|
||||||
qos: Optional[int] = Field(default=0, title="MQTT QoS")
|
qos: int = Field(default=0, title="MQTT QoS")
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def user_requires_pass(self, info: ValidationInfo) -> Self:
|
def user_requires_pass(self, info: ValidationInfo) -> Self:
|
||||||
|
|||||||
13
frigate/config/network.py
Normal file
13
frigate/config/network.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from .base import FrigateBaseModel
|
||||||
|
|
||||||
|
__all__ = ["IPv6Config", "NetworkingConfig"]
|
||||||
|
|
||||||
|
|
||||||
|
class IPv6Config(FrigateBaseModel):
|
||||||
|
enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971")
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkingConfig(FrigateBaseModel):
|
||||||
|
ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration")
|
||||||
@ -11,6 +11,7 @@ EXPORT_DIR = f"{BASE_DIR}/exports"
|
|||||||
FACE_DIR = f"{CLIPS_DIR}/faces"
|
FACE_DIR = f"{CLIPS_DIR}/faces"
|
||||||
THUMB_DIR = f"{CLIPS_DIR}/thumbs"
|
THUMB_DIR = f"{CLIPS_DIR}/thumbs"
|
||||||
RECORD_DIR = f"{BASE_DIR}/recordings"
|
RECORD_DIR = f"{BASE_DIR}/recordings"
|
||||||
|
TRIGGER_DIR = f"{CLIPS_DIR}/triggers"
|
||||||
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
|
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
|
||||||
CACHE_DIR = "/tmp/cache"
|
CACHE_DIR = "/tmp/cache"
|
||||||
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
|
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
|
||||||
@ -110,10 +111,18 @@ UPSERT_REVIEW_SEGMENT = "upsert_review_segment"
|
|||||||
CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
|
CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
|
||||||
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
|
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
|
||||||
UPDATE_EVENT_DESCRIPTION = "update_event_description"
|
UPDATE_EVENT_DESCRIPTION = "update_event_description"
|
||||||
|
UPDATE_REVIEW_DESCRIPTION = "update_review_description"
|
||||||
UPDATE_MODEL_STATE = "update_model_state"
|
UPDATE_MODEL_STATE = "update_model_state"
|
||||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
|
||||||
|
UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout"
|
||||||
NOTIFICATION_TEST = "notification_test"
|
NOTIFICATION_TEST = "notification_test"
|
||||||
|
|
||||||
|
# IO Nice Values
|
||||||
|
|
||||||
|
PROCESS_PRIORITY_HIGH = 0
|
||||||
|
PROCESS_PRIORITY_MED = 10
|
||||||
|
PROCESS_PRIORITY_LOW = 19
|
||||||
|
|
||||||
# Stats Values
|
# Stats Values
|
||||||
|
|
||||||
FREQUENCY_STATS_POINTS = 15
|
FREQUENCY_STATS_POINTS = 15
|
||||||
|
|||||||
81
frigate/data_processing/common/audio_transcription/model.py
Normal file
81
frigate/data_processing/common/audio_transcription/model.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
"""Set up audio transcription models based on model size."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
import sherpa_onnx
|
||||||
|
from faster_whisper.utils import download_model
|
||||||
|
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
from frigate.data_processing.types import AudioTranscriptionModel
|
||||||
|
from frigate.util.downloader import ModelDownloader
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AudioTranscriptionModelRunner:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
device: str = "CPU",
|
||||||
|
model_size: str = "small",
|
||||||
|
):
|
||||||
|
self.model: AudioTranscriptionModel = None
|
||||||
|
self.requestor = InterProcessRequestor()
|
||||||
|
|
||||||
|
if model_size == "large":
|
||||||
|
# use the Whisper download function instead of our own
|
||||||
|
logger.debug("Downloading Whisper audio transcription model")
|
||||||
|
download_model(
|
||||||
|
size_or_id="small" if device == "cuda" else "tiny",
|
||||||
|
local_files_only=False,
|
||||||
|
cache_dir=os.path.join(MODEL_CACHE_DIR, "whisper"),
|
||||||
|
)
|
||||||
|
logger.debug("Whisper audio transcription model downloaded")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# small model as default
|
||||||
|
download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx")
|
||||||
|
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
|
||||||
|
self.model_files = {
|
||||||
|
"encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx",
|
||||||
|
"decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx",
|
||||||
|
"joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx",
|
||||||
|
"tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt",
|
||||||
|
}
|
||||||
|
|
||||||
|
if not all(
|
||||||
|
os.path.exists(os.path.join(download_path, n))
|
||||||
|
for n in self.model_files.keys()
|
||||||
|
):
|
||||||
|
self.downloader = ModelDownloader(
|
||||||
|
model_name="sherpa-onnx",
|
||||||
|
download_path=download_path,
|
||||||
|
file_names=self.model_files.keys(),
|
||||||
|
download_func=self.__download_models,
|
||||||
|
)
|
||||||
|
self.downloader.ensure_model_files()
|
||||||
|
self.downloader.wait_for_download()
|
||||||
|
|
||||||
|
self.model = sherpa_onnx.OnlineRecognizer.from_transducer(
|
||||||
|
tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"),
|
||||||
|
encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"),
|
||||||
|
decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"),
|
||||||
|
joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"),
|
||||||
|
num_threads=2,
|
||||||
|
sample_rate=16000,
|
||||||
|
feature_dim=80,
|
||||||
|
enable_endpoint_detection=True,
|
||||||
|
rule1_min_trailing_silence=2.4,
|
||||||
|
rule2_min_trailing_silence=1.2,
|
||||||
|
rule3_min_utterance_length=300,
|
||||||
|
decoding_method="greedy_search",
|
||||||
|
provider="cpu",
|
||||||
|
)
|
||||||
|
|
||||||
|
def __download_models(self, path: str) -> None:
|
||||||
|
try:
|
||||||
|
file_name = os.path.basename(path)
|
||||||
|
ModelDownloader.download_from_url(self.model_files[file_name], path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to download {path}: {e}")
|
||||||
@ -11,6 +11,7 @@ from scipy import stats
|
|||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import MODEL_CACHE_DIR
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding, FaceNetEmbedding
|
from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding, FaceNetEmbedding
|
||||||
|
from frigate.log import redirect_output_to_logger
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -37,6 +38,7 @@ class FaceRecognizer(ABC):
|
|||||||
def classify(self, face_image: np.ndarray) -> tuple[str, float] | None:
|
def classify(self, face_image: np.ndarray) -> tuple[str, float] | None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||||
def init_landmark_detector(self) -> None:
|
def init_landmark_detector(self) -> None:
|
||||||
landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml")
|
landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml")
|
||||||
|
|
||||||
@ -267,7 +269,7 @@ class ArcFaceRecognizer(FaceRecognizer):
|
|||||||
def __init__(self, config: FrigateConfig):
|
def __init__(self, config: FrigateConfig):
|
||||||
super().__init__(config)
|
super().__init__(config)
|
||||||
self.mean_embs: dict[int, np.ndarray] = {}
|
self.mean_embs: dict[int, np.ndarray] = {}
|
||||||
self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding()
|
self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding(config.face_recognition)
|
||||||
self.model_builder_queue: queue.Queue | None = None
|
self.model_builder_queue: queue.Queue | None = None
|
||||||
|
|
||||||
def clear(self) -> None:
|
def clear(self) -> None:
|
||||||
|
|||||||
@ -22,7 +22,7 @@ from frigate.comms.event_metadata_updater import (
|
|||||||
EventMetadataPublisher,
|
EventMetadataPublisher,
|
||||||
EventMetadataTypeEnum,
|
EventMetadataTypeEnum,
|
||||||
)
|
)
|
||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
|
||||||
from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE
|
from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE
|
||||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||||
@ -43,7 +43,11 @@ class LicensePlateProcessingMixin:
|
|||||||
self.plates_det_second = EventsPerSecond()
|
self.plates_det_second = EventsPerSecond()
|
||||||
self.plates_det_second.start()
|
self.plates_det_second.start()
|
||||||
self.event_metadata_publisher = EventMetadataPublisher()
|
self.event_metadata_publisher = EventMetadataPublisher()
|
||||||
self.ctc_decoder = CTCDecoder()
|
self.ctc_decoder = CTCDecoder(
|
||||||
|
character_dict_path=os.path.join(
|
||||||
|
MODEL_CACHE_DIR, "paddleocr-onnx", "ppocr_keys_v1.txt"
|
||||||
|
)
|
||||||
|
)
|
||||||
self.batch_size = 6
|
self.batch_size = 6
|
||||||
|
|
||||||
# Detection specific parameters
|
# Detection specific parameters
|
||||||
@ -1168,7 +1172,6 @@ class LicensePlateProcessingMixin:
|
|||||||
event_id = f"{now}-{rand_id}"
|
event_id = f"{now}-{rand_id}"
|
||||||
|
|
||||||
self.event_metadata_publisher.publish(
|
self.event_metadata_publisher.publish(
|
||||||
EventMetadataTypeEnum.lpr_event_create,
|
|
||||||
(
|
(
|
||||||
now,
|
now,
|
||||||
camera,
|
camera,
|
||||||
@ -1179,6 +1182,7 @@ class LicensePlateProcessingMixin:
|
|||||||
None,
|
None,
|
||||||
plate,
|
plate,
|
||||||
),
|
),
|
||||||
|
EventMetadataTypeEnum.lpr_event_create.value,
|
||||||
)
|
)
|
||||||
return event_id
|
return event_id
|
||||||
|
|
||||||
@ -1522,7 +1526,7 @@ class LicensePlateProcessingMixin:
|
|||||||
# If it's a known plate, publish to sub_label
|
# If it's a known plate, publish to sub_label
|
||||||
if sub_label is not None:
|
if sub_label is not None:
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
EventMetadataTypeEnum.sub_label, (id, sub_label, avg_confidence)
|
(id, sub_label, avg_confidence), EventMetadataTypeEnum.sub_label.value
|
||||||
)
|
)
|
||||||
|
|
||||||
# always publish to recognized_license_plate field
|
# always publish to recognized_license_plate field
|
||||||
@ -1541,8 +1545,8 @@ class LicensePlateProcessingMixin:
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
EventMetadataTypeEnum.recognized_license_plate,
|
(id, "recognized_license_plate", top_plate, avg_confidence),
|
||||||
(id, top_plate, avg_confidence),
|
EventMetadataTypeEnum.attribute.value,
|
||||||
)
|
)
|
||||||
|
|
||||||
# save the best snapshot for dedicated lpr cams not using frigate+
|
# save the best snapshot for dedicated lpr cams not using frigate+
|
||||||
@ -1556,8 +1560,8 @@ class LicensePlateProcessingMixin:
|
|||||||
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
_, encoded_img = cv2.imencode(".jpg", frame_bgr)
|
_, encoded_img = cv2.imencode(".jpg", frame_bgr)
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
EventMetadataTypeEnum.save_lpr_snapshot,
|
|
||||||
(base64.b64encode(encoded_img).decode("ASCII"), id, camera),
|
(base64.b64encode(encoded_img).decode("ASCII"), id, camera),
|
||||||
|
EventMetadataTypeEnum.save_lpr_snapshot.value,
|
||||||
)
|
)
|
||||||
|
|
||||||
if id not in self.detected_license_plates:
|
if id not in self.detected_license_plates:
|
||||||
@ -1595,113 +1599,121 @@ class CTCDecoder:
|
|||||||
for each decoded character sequence.
|
for each decoded character sequence.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, character_dict_path=None):
|
||||||
"""
|
"""
|
||||||
Initialize the CTCDecoder with a list of characters and a character map.
|
Initializes the CTCDecoder.
|
||||||
|
:param character_dict_path: Path to the character dictionary file.
|
||||||
|
If None, a default (English-focused) list is used.
|
||||||
|
For Chinese models, this should point to the correct
|
||||||
|
character dictionary file provided with the model.
|
||||||
|
"""
|
||||||
|
self.characters = []
|
||||||
|
if character_dict_path and os.path.exists(character_dict_path):
|
||||||
|
with open(character_dict_path, "r", encoding="utf-8") as f:
|
||||||
|
self.characters = (
|
||||||
|
["blank"] + [line.strip() for line in f if line.strip()] + [" "]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.characters = [
|
||||||
|
"blank",
|
||||||
|
"0",
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
"4",
|
||||||
|
"5",
|
||||||
|
"6",
|
||||||
|
"7",
|
||||||
|
"8",
|
||||||
|
"9",
|
||||||
|
":",
|
||||||
|
";",
|
||||||
|
"<",
|
||||||
|
"=",
|
||||||
|
">",
|
||||||
|
"?",
|
||||||
|
"@",
|
||||||
|
"A",
|
||||||
|
"B",
|
||||||
|
"C",
|
||||||
|
"D",
|
||||||
|
"E",
|
||||||
|
"F",
|
||||||
|
"G",
|
||||||
|
"H",
|
||||||
|
"I",
|
||||||
|
"J",
|
||||||
|
"K",
|
||||||
|
"L",
|
||||||
|
"M",
|
||||||
|
"N",
|
||||||
|
"O",
|
||||||
|
"P",
|
||||||
|
"Q",
|
||||||
|
"R",
|
||||||
|
"S",
|
||||||
|
"T",
|
||||||
|
"U",
|
||||||
|
"V",
|
||||||
|
"W",
|
||||||
|
"X",
|
||||||
|
"Y",
|
||||||
|
"Z",
|
||||||
|
"[",
|
||||||
|
"\\",
|
||||||
|
"]",
|
||||||
|
"^",
|
||||||
|
"_",
|
||||||
|
"`",
|
||||||
|
"a",
|
||||||
|
"b",
|
||||||
|
"c",
|
||||||
|
"d",
|
||||||
|
"e",
|
||||||
|
"f",
|
||||||
|
"g",
|
||||||
|
"h",
|
||||||
|
"i",
|
||||||
|
"j",
|
||||||
|
"k",
|
||||||
|
"l",
|
||||||
|
"m",
|
||||||
|
"n",
|
||||||
|
"o",
|
||||||
|
"p",
|
||||||
|
"q",
|
||||||
|
"r",
|
||||||
|
"s",
|
||||||
|
"t",
|
||||||
|
"u",
|
||||||
|
"v",
|
||||||
|
"w",
|
||||||
|
"x",
|
||||||
|
"y",
|
||||||
|
"z",
|
||||||
|
"{",
|
||||||
|
"|",
|
||||||
|
"}",
|
||||||
|
"~",
|
||||||
|
"!",
|
||||||
|
'"',
|
||||||
|
"#",
|
||||||
|
"$",
|
||||||
|
"%",
|
||||||
|
"&",
|
||||||
|
"'",
|
||||||
|
"(",
|
||||||
|
")",
|
||||||
|
"*",
|
||||||
|
"+",
|
||||||
|
",",
|
||||||
|
"-",
|
||||||
|
".",
|
||||||
|
"/",
|
||||||
|
" ",
|
||||||
|
" ",
|
||||||
|
]
|
||||||
|
|
||||||
The character set includes digits, letters, special characters, and a "blank" token
|
|
||||||
(used by the CTC model for decoding purposes). A character map is created to map
|
|
||||||
indices to characters.
|
|
||||||
"""
|
|
||||||
self.characters = [
|
|
||||||
"blank",
|
|
||||||
"0",
|
|
||||||
"1",
|
|
||||||
"2",
|
|
||||||
"3",
|
|
||||||
"4",
|
|
||||||
"5",
|
|
||||||
"6",
|
|
||||||
"7",
|
|
||||||
"8",
|
|
||||||
"9",
|
|
||||||
":",
|
|
||||||
";",
|
|
||||||
"<",
|
|
||||||
"=",
|
|
||||||
">",
|
|
||||||
"?",
|
|
||||||
"@",
|
|
||||||
"A",
|
|
||||||
"B",
|
|
||||||
"C",
|
|
||||||
"D",
|
|
||||||
"E",
|
|
||||||
"F",
|
|
||||||
"G",
|
|
||||||
"H",
|
|
||||||
"I",
|
|
||||||
"J",
|
|
||||||
"K",
|
|
||||||
"L",
|
|
||||||
"M",
|
|
||||||
"N",
|
|
||||||
"O",
|
|
||||||
"P",
|
|
||||||
"Q",
|
|
||||||
"R",
|
|
||||||
"S",
|
|
||||||
"T",
|
|
||||||
"U",
|
|
||||||
"V",
|
|
||||||
"W",
|
|
||||||
"X",
|
|
||||||
"Y",
|
|
||||||
"Z",
|
|
||||||
"[",
|
|
||||||
"\\",
|
|
||||||
"]",
|
|
||||||
"^",
|
|
||||||
"_",
|
|
||||||
"`",
|
|
||||||
"a",
|
|
||||||
"b",
|
|
||||||
"c",
|
|
||||||
"d",
|
|
||||||
"e",
|
|
||||||
"f",
|
|
||||||
"g",
|
|
||||||
"h",
|
|
||||||
"i",
|
|
||||||
"j",
|
|
||||||
"k",
|
|
||||||
"l",
|
|
||||||
"m",
|
|
||||||
"n",
|
|
||||||
"o",
|
|
||||||
"p",
|
|
||||||
"q",
|
|
||||||
"r",
|
|
||||||
"s",
|
|
||||||
"t",
|
|
||||||
"u",
|
|
||||||
"v",
|
|
||||||
"w",
|
|
||||||
"x",
|
|
||||||
"y",
|
|
||||||
"z",
|
|
||||||
"{",
|
|
||||||
"|",
|
|
||||||
"}",
|
|
||||||
"~",
|
|
||||||
"!",
|
|
||||||
'"',
|
|
||||||
"#",
|
|
||||||
"$",
|
|
||||||
"%",
|
|
||||||
"&",
|
|
||||||
"'",
|
|
||||||
"(",
|
|
||||||
")",
|
|
||||||
"*",
|
|
||||||
"+",
|
|
||||||
",",
|
|
||||||
"-",
|
|
||||||
".",
|
|
||||||
"/",
|
|
||||||
" ",
|
|
||||||
" ",
|
|
||||||
]
|
|
||||||
self.char_map = {i: char for i, char in enumerate(self.characters)}
|
self.char_map = {i: char for i, char in enumerate(self.characters)}
|
||||||
|
|
||||||
def __call__(
|
def __call__(
|
||||||
@ -1735,7 +1747,7 @@ class CTCDecoder:
|
|||||||
merged_path.append(char_index)
|
merged_path.append(char_index)
|
||||||
merged_probs.append(seq_log_probs[t, char_index])
|
merged_probs.append(seq_log_probs[t, char_index])
|
||||||
|
|
||||||
result = "".join(self.char_map[idx] for idx in merged_path)
|
result = "".join(self.char_map.get(idx, "") for idx in merged_path)
|
||||||
results.append(result)
|
results.append(result)
|
||||||
|
|
||||||
confidence = np.exp(merged_probs).tolist()
|
confidence = np.exp(merged_probs).tolist()
|
||||||
|
|||||||
@ -39,7 +39,9 @@ class PostProcessorApi(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def handle_request(self, request_data: dict[str, Any]) -> dict[str, Any] | None:
|
def handle_request(
|
||||||
|
self, topic: str, request_data: dict[str, Any]
|
||||||
|
) -> dict[str, Any] | None:
|
||||||
"""Handle metadata requests.
|
"""Handle metadata requests.
|
||||||
Args:
|
Args:
|
||||||
request_data (dict): containing data about requested change to process.
|
request_data (dict): containing data about requested change to process.
|
||||||
|
|||||||
212
frigate/data_processing/post/audio_transcription.py
Normal file
212
frigate/data_processing/post/audio_transcription.py
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
"""Handle post-processing for audio transcription."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from faster_whisper import WhisperModel
|
||||||
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
|
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import (
|
||||||
|
CACHE_DIR,
|
||||||
|
MODEL_CACHE_DIR,
|
||||||
|
UPDATE_EVENT_DESCRIPTION,
|
||||||
|
)
|
||||||
|
from frigate.data_processing.types import PostProcessDataEnum
|
||||||
|
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||||
|
from frigate.util.audio import get_audio_from_recording
|
||||||
|
|
||||||
|
from ..types import DataProcessorMetrics
|
||||||
|
from .api import PostProcessorApi
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
metrics: DataProcessorMetrics,
|
||||||
|
):
|
||||||
|
super().__init__(config, metrics, None)
|
||||||
|
self.config = config
|
||||||
|
self.requestor = requestor
|
||||||
|
self.recognizer = None
|
||||||
|
self.transcription_lock = threading.Lock()
|
||||||
|
self.transcription_thread = None
|
||||||
|
self.transcription_running = False
|
||||||
|
|
||||||
|
# faster-whisper handles model downloading automatically
|
||||||
|
self.model_path = os.path.join(MODEL_CACHE_DIR, "whisper")
|
||||||
|
os.makedirs(self.model_path, exist_ok=True)
|
||||||
|
|
||||||
|
self.__build_recognizer()
|
||||||
|
|
||||||
|
def __build_recognizer(self) -> None:
|
||||||
|
try:
|
||||||
|
self.recognizer = WhisperModel(
|
||||||
|
model_size_or_path="small",
|
||||||
|
device="cuda"
|
||||||
|
if self.config.audio_transcription.device == "GPU"
|
||||||
|
else "cpu",
|
||||||
|
download_root=self.model_path,
|
||||||
|
local_files_only=False, # Allow downloading if not cached
|
||||||
|
compute_type="int8",
|
||||||
|
)
|
||||||
|
logger.debug("Audio transcription (recordings) initialized")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize recordings audio transcription: {e}")
|
||||||
|
self.recognizer = None
|
||||||
|
|
||||||
|
def process_data(
|
||||||
|
self, data: dict[str, any], data_type: PostProcessDataEnum
|
||||||
|
) -> None:
|
||||||
|
"""Transcribe audio from a recording.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data (dict): Contains data about the input (event_id, camera, etc.).
|
||||||
|
data_type (enum): Describes the data being processed (recording or tracked_object).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
event_id = data["event_id"]
|
||||||
|
camera_name = data["camera"]
|
||||||
|
|
||||||
|
if data_type == PostProcessDataEnum.recording:
|
||||||
|
start_ts = data["frame_time"]
|
||||||
|
recordings_available_through = data["recordings_available"]
|
||||||
|
end_ts = min(recordings_available_through, start_ts + 60) # Default 60s
|
||||||
|
|
||||||
|
elif data_type == PostProcessDataEnum.tracked_object:
|
||||||
|
obj_data = data["event"]["data"]
|
||||||
|
obj_data["id"] = data["event"]["id"]
|
||||||
|
obj_data["camera"] = data["event"]["camera"]
|
||||||
|
start_ts = data["event"]["start_time"]
|
||||||
|
end_ts = data["event"].get(
|
||||||
|
"end_time", start_ts + 60
|
||||||
|
) # Use end_time if available
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.error("No data type passed to audio transcription post-processing")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
audio_data = get_audio_from_recording(
|
||||||
|
self.config.cameras[camera_name].ffmpeg,
|
||||||
|
camera_name,
|
||||||
|
start_ts,
|
||||||
|
end_ts,
|
||||||
|
sample_rate=16000,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not audio_data:
|
||||||
|
logger.debug(f"No audio data extracted for {event_id}")
|
||||||
|
return
|
||||||
|
|
||||||
|
transcription = self.__transcribe_audio(audio_data)
|
||||||
|
if not transcription:
|
||||||
|
logger.debug("No transcription generated from audio")
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.debug(f"Transcribed audio for {event_id}: '{transcription}'")
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
UPDATE_EVENT_DESCRIPTION,
|
||||||
|
{
|
||||||
|
"type": TrackedObjectUpdateTypesEnum.description,
|
||||||
|
"id": event_id,
|
||||||
|
"description": transcription,
|
||||||
|
"camera": camera_name,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Embed the description
|
||||||
|
self.requestor.send_data(
|
||||||
|
EmbeddingsRequestEnum.embed_description.value,
|
||||||
|
{"id": event_id, "description": transcription},
|
||||||
|
)
|
||||||
|
|
||||||
|
except DoesNotExist:
|
||||||
|
logger.debug("No recording found for audio transcription post-processing")
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in audio transcription post-processing: {e}")
|
||||||
|
|
||||||
|
def __transcribe_audio(self, audio_data: bytes) -> Optional[tuple[str, float]]:
|
||||||
|
"""Transcribe WAV audio data using faster-whisper."""
|
||||||
|
if not self.recognizer:
|
||||||
|
logger.debug("Recognizer not initialized")
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Save audio data to a temporary wav (faster-whisper expects a file)
|
||||||
|
temp_wav = os.path.join(CACHE_DIR, f"temp_audio_{int(time.time())}.wav")
|
||||||
|
with open(temp_wav, "wb") as f:
|
||||||
|
f.write(audio_data)
|
||||||
|
|
||||||
|
segments, info = self.recognizer.transcribe(
|
||||||
|
temp_wav,
|
||||||
|
language=self.config.audio_transcription.language,
|
||||||
|
beam_size=5,
|
||||||
|
)
|
||||||
|
|
||||||
|
os.remove(temp_wav)
|
||||||
|
|
||||||
|
# Combine all segment texts
|
||||||
|
text = " ".join(segment.text.strip() for segment in segments)
|
||||||
|
if not text:
|
||||||
|
return None
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"Detected language '%s' with probability %f"
|
||||||
|
% (info.language, info.language_probability)
|
||||||
|
)
|
||||||
|
|
||||||
|
return text
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error transcribing audio: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _transcription_wrapper(self, event: dict[str, any]) -> None:
|
||||||
|
"""Wrapper to run transcription and reset running flag when done."""
|
||||||
|
try:
|
||||||
|
self.process_data(
|
||||||
|
{
|
||||||
|
"event_id": event["id"],
|
||||||
|
"camera": event["camera"],
|
||||||
|
"event": event,
|
||||||
|
},
|
||||||
|
PostProcessDataEnum.tracked_object,
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
with self.transcription_lock:
|
||||||
|
self.transcription_running = False
|
||||||
|
self.transcription_thread = None
|
||||||
|
|
||||||
|
def handle_request(self, topic: str, request_data: dict[str, any]) -> str | None:
|
||||||
|
if topic == "transcribe_audio":
|
||||||
|
event = request_data["event"]
|
||||||
|
|
||||||
|
with self.transcription_lock:
|
||||||
|
if self.transcription_running:
|
||||||
|
logger.warning(
|
||||||
|
"Audio transcription for a speech event is already running."
|
||||||
|
)
|
||||||
|
return "in_progress"
|
||||||
|
|
||||||
|
# Mark as running and start the thread
|
||||||
|
self.transcription_running = True
|
||||||
|
self.transcription_thread = threading.Thread(
|
||||||
|
target=self._transcription_wrapper, args=(event,), daemon=True
|
||||||
|
)
|
||||||
|
self.transcription_thread.start()
|
||||||
|
return "started"
|
||||||
|
|
||||||
|
return None
|
||||||
266
frigate/data_processing/post/review_descriptions.py
Normal file
266
frigate/data_processing/post/review_descriptions.py
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
"""Post processor for review items to get descriptions."""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import threading
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.config.camera.review import GenAIReviewConfig
|
||||||
|
from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
|
||||||
|
from frigate.data_processing.types import PostProcessDataEnum
|
||||||
|
from frigate.genai import GenAIClient
|
||||||
|
from frigate.models import ReviewSegment
|
||||||
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||||
|
|
||||||
|
from ..post.api import PostProcessorApi
|
||||||
|
from ..types import DataProcessorMetrics
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewDescriptionProcessor(PostProcessorApi):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
metrics: DataProcessorMetrics,
|
||||||
|
client: GenAIClient,
|
||||||
|
):
|
||||||
|
super().__init__(config, metrics, None)
|
||||||
|
self.requestor = requestor
|
||||||
|
self.metrics = metrics
|
||||||
|
self.genai_client = client
|
||||||
|
self.review_desc_speed = InferenceSpeed(self.metrics.review_desc_speed)
|
||||||
|
self.review_descs_dps = EventsPerSecond()
|
||||||
|
self.review_descs_dps.start()
|
||||||
|
|
||||||
|
def process_data(self, data, data_type):
|
||||||
|
self.metrics.review_desc_dps.value = self.review_descs_dps.eps()
|
||||||
|
|
||||||
|
if data_type != PostProcessDataEnum.review:
|
||||||
|
return
|
||||||
|
|
||||||
|
camera = data["after"]["camera"]
|
||||||
|
camera_config = self.config.cameras[camera]
|
||||||
|
|
||||||
|
if not camera_config.review.genai.enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
id = data["after"]["id"]
|
||||||
|
|
||||||
|
if data["type"] == "new" or data["type"] == "update":
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
final_data = data["after"]
|
||||||
|
|
||||||
|
if (
|
||||||
|
final_data["severity"] == "alert"
|
||||||
|
and not camera_config.review.genai.alerts
|
||||||
|
):
|
||||||
|
return
|
||||||
|
elif (
|
||||||
|
final_data["severity"] == "detection"
|
||||||
|
and not camera_config.review.genai.detections
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
|
frames = self.get_cache_frames(
|
||||||
|
camera, final_data["start_time"], final_data["end_time"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if not frames:
|
||||||
|
frames = [final_data["thumb_path"]]
|
||||||
|
|
||||||
|
thumbs = []
|
||||||
|
|
||||||
|
for idx, thumb_path in enumerate(frames):
|
||||||
|
thumb_data = cv2.imread(thumb_path)
|
||||||
|
ret, jpg = cv2.imencode(
|
||||||
|
".jpg", thumb_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100]
|
||||||
|
)
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
thumbs.append(jpg.tobytes())
|
||||||
|
|
||||||
|
if camera_config.review.genai.debug_save_thumbnails:
|
||||||
|
id = data["after"]["id"]
|
||||||
|
Path(os.path.join(CLIPS_DIR, f"genai-requests/{id}")).mkdir(
|
||||||
|
parents=True, exist_ok=True
|
||||||
|
)
|
||||||
|
shutil.copy(
|
||||||
|
thumb_path,
|
||||||
|
os.path.join(
|
||||||
|
CLIPS_DIR,
|
||||||
|
f"genai-requests/{id}/{idx}.webp",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# kickoff analysis
|
||||||
|
self.review_descs_dps.update()
|
||||||
|
threading.Thread(
|
||||||
|
target=run_analysis,
|
||||||
|
args=(
|
||||||
|
self.requestor,
|
||||||
|
self.genai_client,
|
||||||
|
self.review_desc_speed,
|
||||||
|
camera,
|
||||||
|
final_data,
|
||||||
|
thumbs,
|
||||||
|
camera_config.review.genai,
|
||||||
|
list(self.config.model.merged_labelmap.values()),
|
||||||
|
),
|
||||||
|
).start()
|
||||||
|
|
||||||
|
def handle_request(self, topic, request_data):
|
||||||
|
if topic == EmbeddingsRequestEnum.summarize_review.value:
|
||||||
|
start_ts = request_data["start_ts"]
|
||||||
|
end_ts = request_data["end_ts"]
|
||||||
|
items: list[dict[str, Any]] = [
|
||||||
|
r["data"]["metadata"]
|
||||||
|
for r in (
|
||||||
|
ReviewSegment.select(ReviewSegment.data)
|
||||||
|
.where(
|
||||||
|
(ReviewSegment.data["metadata"].is_null(False))
|
||||||
|
& (ReviewSegment.start_time < end_ts)
|
||||||
|
& (ReviewSegment.end_time > start_ts)
|
||||||
|
)
|
||||||
|
.order_by(ReviewSegment.start_time.asc())
|
||||||
|
.dicts()
|
||||||
|
.iterator()
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(items) == 0:
|
||||||
|
logger.debug("No review items with metadata found during time period")
|
||||||
|
return None
|
||||||
|
|
||||||
|
important_items = list(
|
||||||
|
filter(
|
||||||
|
lambda item: item.get("potential_threat_level", 0) > 0
|
||||||
|
or item.get("other_concerns"),
|
||||||
|
items,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not important_items:
|
||||||
|
return "No concerns were found during this time period."
|
||||||
|
|
||||||
|
return self.genai_client.generate_review_summary(
|
||||||
|
start_ts, end_ts, important_items
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_cache_frames(
|
||||||
|
self,
|
||||||
|
camera: str,
|
||||||
|
start_time: float,
|
||||||
|
end_time: float,
|
||||||
|
desired_frame_count: int = 12,
|
||||||
|
) -> list[str]:
|
||||||
|
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||||
|
file_start = f"preview_{camera}"
|
||||||
|
start_file = f"{file_start}-{start_time}.webp"
|
||||||
|
end_file = f"{file_start}-{end_time}.webp"
|
||||||
|
all_frames = []
|
||||||
|
|
||||||
|
for file in sorted(os.listdir(preview_dir)):
|
||||||
|
if not file.startswith(file_start):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if file < start_file:
|
||||||
|
if len(all_frames):
|
||||||
|
all_frames[0] = os.path.join(preview_dir, file)
|
||||||
|
else:
|
||||||
|
all_frames.append(os.path.join(preview_dir, file))
|
||||||
|
|
||||||
|
continue
|
||||||
|
|
||||||
|
if file > end_file:
|
||||||
|
all_frames.append(os.path.join(preview_dir, file))
|
||||||
|
break
|
||||||
|
|
||||||
|
all_frames.append(os.path.join(preview_dir, file))
|
||||||
|
|
||||||
|
frame_count = len(all_frames)
|
||||||
|
if frame_count <= desired_frame_count:
|
||||||
|
return all_frames
|
||||||
|
|
||||||
|
selected_frames = []
|
||||||
|
step_size = (frame_count - 1) / (desired_frame_count - 1)
|
||||||
|
|
||||||
|
for i in range(desired_frame_count):
|
||||||
|
index = round(i * step_size)
|
||||||
|
selected_frames.append(all_frames[index])
|
||||||
|
|
||||||
|
return selected_frames
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_analysis(
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
genai_client: GenAIClient,
|
||||||
|
review_inference_speed: InferenceSpeed,
|
||||||
|
camera: str,
|
||||||
|
final_data: dict[str, str],
|
||||||
|
thumbs: list[bytes],
|
||||||
|
genai_config: GenAIReviewConfig,
|
||||||
|
labelmap_objects: list[str],
|
||||||
|
) -> None:
|
||||||
|
start = datetime.datetime.now().timestamp()
|
||||||
|
analytics_data = {
|
||||||
|
"id": final_data["id"],
|
||||||
|
"camera": camera,
|
||||||
|
"zones": final_data["data"]["zones"],
|
||||||
|
"start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime(
|
||||||
|
"%A, %I:%M %p"
|
||||||
|
),
|
||||||
|
"duration": final_data["end_time"] - final_data["start_time"],
|
||||||
|
}
|
||||||
|
|
||||||
|
objects = []
|
||||||
|
verified_objects = []
|
||||||
|
|
||||||
|
for label in set(final_data["data"]["objects"] + final_data["data"]["sub_labels"]):
|
||||||
|
if "-verified" in label:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if label in labelmap_objects:
|
||||||
|
objects.append(label.replace("_", " ").title())
|
||||||
|
else:
|
||||||
|
verified_objects.append(label.replace("_", " ").title())
|
||||||
|
|
||||||
|
analytics_data["objects"] = objects
|
||||||
|
analytics_data["recognized_objects"] = verified_objects
|
||||||
|
|
||||||
|
metadata = genai_client.generate_review_description(
|
||||||
|
analytics_data,
|
||||||
|
thumbs,
|
||||||
|
genai_config.additional_concerns,
|
||||||
|
genai_config.preferred_language,
|
||||||
|
genai_config.debug_save_thumbnails,
|
||||||
|
)
|
||||||
|
review_inference_speed.update(datetime.datetime.now().timestamp() - start)
|
||||||
|
|
||||||
|
if not metadata:
|
||||||
|
return None
|
||||||
|
|
||||||
|
prev_data = copy.deepcopy(final_data)
|
||||||
|
final_data["data"]["metadata"] = metadata.model_dump()
|
||||||
|
requestor.send_data(
|
||||||
|
UPDATE_REVIEW_DESCRIPTION,
|
||||||
|
{
|
||||||
|
"type": "genai",
|
||||||
|
"before": {k: v for k, v in prev_data.items()},
|
||||||
|
"after": {k: v for k, v in final_data.items()},
|
||||||
|
},
|
||||||
|
)
|
||||||
233
frigate/data_processing/post/semantic_trigger.py
Normal file
233
frigate/data_processing/post/semantic_trigger.py
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
"""Post time processor to trigger actions based on similar embeddings."""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import CONFIG_DIR
|
||||||
|
from frigate.data_processing.types import PostProcessDataEnum
|
||||||
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
|
from frigate.embeddings.util import ZScoreNormalization
|
||||||
|
from frigate.models import Event, Trigger
|
||||||
|
from frigate.util.builtin import cosine_distance
|
||||||
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
|
from ..post.api import PostProcessorApi
|
||||||
|
from ..types import DataProcessorMetrics
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
WRITE_DEBUG_IMAGES = False
|
||||||
|
|
||||||
|
|
||||||
|
class SemanticTriggerProcessor(PostProcessorApi):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
db: SqliteVecQueueDatabase,
|
||||||
|
config: FrigateConfig,
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
metrics: DataProcessorMetrics,
|
||||||
|
embeddings,
|
||||||
|
):
|
||||||
|
super().__init__(config, metrics, None)
|
||||||
|
self.db = db
|
||||||
|
self.embeddings = embeddings
|
||||||
|
self.requestor = requestor
|
||||||
|
self.trigger_embeddings: list[np.ndarray] = []
|
||||||
|
|
||||||
|
self.thumb_stats = ZScoreNormalization()
|
||||||
|
self.desc_stats = ZScoreNormalization()
|
||||||
|
|
||||||
|
# load stats from disk
|
||||||
|
try:
|
||||||
|
with open(os.path.join(CONFIG_DIR, ".search_stats.json"), "r") as f:
|
||||||
|
data = json.loads(f.read())
|
||||||
|
self.thumb_stats.from_dict(data["thumb_stats"])
|
||||||
|
self.desc_stats.from_dict(data["desc_stats"])
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_data(
|
||||||
|
self, data: dict[str, Any], data_type: PostProcessDataEnum
|
||||||
|
) -> None:
|
||||||
|
event_id = data["event_id"]
|
||||||
|
camera = data["camera"]
|
||||||
|
process_type = data["type"]
|
||||||
|
|
||||||
|
if self.config.cameras[camera].semantic_search.triggers is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
triggers = (
|
||||||
|
Trigger.select(
|
||||||
|
Trigger.camera,
|
||||||
|
Trigger.name,
|
||||||
|
Trigger.data,
|
||||||
|
Trigger.type,
|
||||||
|
Trigger.embedding,
|
||||||
|
Trigger.threshold,
|
||||||
|
)
|
||||||
|
.where(Trigger.camera == camera)
|
||||||
|
.dicts()
|
||||||
|
.iterator()
|
||||||
|
)
|
||||||
|
|
||||||
|
for trigger in triggers:
|
||||||
|
if (
|
||||||
|
trigger["name"]
|
||||||
|
not in self.config.cameras[camera].semantic_search.triggers
|
||||||
|
or not self.config.cameras[camera]
|
||||||
|
.semantic_search.triggers[trigger["name"]]
|
||||||
|
.enabled
|
||||||
|
):
|
||||||
|
logger.debug(
|
||||||
|
f"Trigger {trigger['name']} is disabled for camera {camera}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Processing {trigger['type']} trigger for {event_id} on {trigger['camera']}: {trigger['name']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
trigger_embedding = np.frombuffer(trigger["embedding"], dtype=np.float32)
|
||||||
|
|
||||||
|
# Get embeddings based on type
|
||||||
|
thumbnail_embedding = None
|
||||||
|
description_embedding = None
|
||||||
|
|
||||||
|
if process_type == "image":
|
||||||
|
cursor = self.db.execute_sql(
|
||||||
|
"""
|
||||||
|
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
||||||
|
""",
|
||||||
|
[event_id],
|
||||||
|
)
|
||||||
|
row = cursor.fetchone() if cursor else None
|
||||||
|
if row:
|
||||||
|
thumbnail_embedding = np.frombuffer(row[0], dtype=np.float32)
|
||||||
|
|
||||||
|
if process_type == "text":
|
||||||
|
cursor = self.db.execute_sql(
|
||||||
|
"""
|
||||||
|
SELECT description_embedding FROM vec_descriptions WHERE id = ?
|
||||||
|
""",
|
||||||
|
[event_id],
|
||||||
|
)
|
||||||
|
row = cursor.fetchone() if cursor else None
|
||||||
|
if row:
|
||||||
|
description_embedding = np.frombuffer(row[0], dtype=np.float32)
|
||||||
|
|
||||||
|
# Skip processing if we don't have any embeddings
|
||||||
|
if thumbnail_embedding is None and description_embedding is None:
|
||||||
|
logger.debug(f"No embeddings found for {event_id}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine which embedding to compare based on trigger type
|
||||||
|
if (
|
||||||
|
trigger["type"] in ["text", "thumbnail"]
|
||||||
|
and thumbnail_embedding is not None
|
||||||
|
):
|
||||||
|
data_embedding = thumbnail_embedding
|
||||||
|
normalized_distance = self.thumb_stats.normalize(
|
||||||
|
[cosine_distance(data_embedding, trigger_embedding)],
|
||||||
|
save_stats=False,
|
||||||
|
)[0]
|
||||||
|
elif trigger["type"] == "description" and description_embedding is not None:
|
||||||
|
data_embedding = description_embedding
|
||||||
|
normalized_distance = self.desc_stats.normalize(
|
||||||
|
[cosine_distance(data_embedding, trigger_embedding)],
|
||||||
|
save_stats=False,
|
||||||
|
)[0]
|
||||||
|
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
similarity = 1 - normalized_distance
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Trigger {trigger['name']} ({trigger['data'] if trigger['type'] == 'text' or trigger['type'] == 'description' else 'image'}): "
|
||||||
|
f"normalized distance: {normalized_distance:.4f}, "
|
||||||
|
f"similarity: {similarity:.4f}, threshold: {trigger['threshold']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if similarity meets threshold
|
||||||
|
if similarity >= trigger["threshold"]:
|
||||||
|
logger.info(
|
||||||
|
f"Trigger {trigger['name']} activated with similarity {similarity:.4f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update the trigger's last_triggered and triggering_event_id
|
||||||
|
Trigger.update(
|
||||||
|
last_triggered=datetime.datetime.now(), triggering_event_id=event_id
|
||||||
|
).where(
|
||||||
|
Trigger.camera == camera, Trigger.name == trigger["name"]
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
# Always publish MQTT message
|
||||||
|
self.requestor.send_data(
|
||||||
|
"triggers",
|
||||||
|
json.dumps(
|
||||||
|
{
|
||||||
|
"name": trigger["name"],
|
||||||
|
"camera": camera,
|
||||||
|
"event_id": event_id,
|
||||||
|
"type": trigger["type"],
|
||||||
|
"score": similarity,
|
||||||
|
}
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.config.cameras[camera]
|
||||||
|
.semantic_search.triggers[trigger["name"]]
|
||||||
|
.actions
|
||||||
|
):
|
||||||
|
# TODO: handle actions for the trigger
|
||||||
|
# notifications already handled by webpush
|
||||||
|
pass
|
||||||
|
|
||||||
|
if WRITE_DEBUG_IMAGES:
|
||||||
|
try:
|
||||||
|
event: Event = Event.get(Event.id == event_id)
|
||||||
|
except DoesNotExist:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Skip the event if not an object
|
||||||
|
if event.data.get("type") != "object":
|
||||||
|
return
|
||||||
|
|
||||||
|
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||||
|
|
||||||
|
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
|
||||||
|
thumbnail = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||||
|
|
||||||
|
font_scale = 0.5
|
||||||
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||||
|
cv2.putText(
|
||||||
|
thumbnail,
|
||||||
|
f"{similarity:.4f}",
|
||||||
|
(10, 30),
|
||||||
|
font,
|
||||||
|
fontScale=font_scale,
|
||||||
|
color=(0, 255, 0),
|
||||||
|
thickness=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
current_time = int(datetime.datetime.now().timestamp())
|
||||||
|
cv2.imwrite(
|
||||||
|
f"debug/frames/trigger-{event_id}_{current_time}.jpg",
|
||||||
|
thumbnail,
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_request(self, topic, request_data):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def expire_object(self, object_id, camera):
|
||||||
|
pass
|
||||||
21
frigate/data_processing/post/types.py
Normal file
21
frigate/data_processing/post/types.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
from pydantic import BaseModel, ConfigDict, Field
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewMetadata(BaseModel):
|
||||||
|
model_config = ConfigDict(extra="ignore", protected_namespaces=())
|
||||||
|
|
||||||
|
scene: str = Field(
|
||||||
|
description="A comprehensive description of the setting and entities, including relevant context and plausible inferences if supported by visual evidence."
|
||||||
|
)
|
||||||
|
confidence: float = Field(
|
||||||
|
description="A float between 0 and 1 representing your overall confidence in this analysis."
|
||||||
|
)
|
||||||
|
potential_threat_level: int = Field(
|
||||||
|
ge=0,
|
||||||
|
le=3,
|
||||||
|
description="An integer representing the potential threat level (1-3). 1: Minor anomaly. 2: Moderate concern. 3: High threat. Only include this field if a clear security concern is observable; otherwise, omit it.",
|
||||||
|
)
|
||||||
|
other_concerns: list[str] | None = Field(
|
||||||
|
default=None,
|
||||||
|
description="Other concerns highlighted by the user that are observed.",
|
||||||
|
)
|
||||||
281
frigate/data_processing/real_time/audio_transcription.py
Normal file
281
frigate/data_processing/real_time/audio_transcription.py
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
"""Handle processing audio for speech transcription using sherpa-onnx with FFmpeg pipe."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import threading
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.config import CameraConfig, FrigateConfig
|
||||||
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
from frigate.data_processing.common.audio_transcription.model import (
|
||||||
|
AudioTranscriptionModelRunner,
|
||||||
|
)
|
||||||
|
from frigate.data_processing.real_time.whisper_online import (
|
||||||
|
FasterWhisperASR,
|
||||||
|
OnlineASRProcessor,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ..types import DataProcessorMetrics
|
||||||
|
from .api import RealTimeProcessorApi
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
camera_config: CameraConfig,
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
model_runner: AudioTranscriptionModelRunner,
|
||||||
|
metrics: DataProcessorMetrics,
|
||||||
|
stop_event: threading.Event,
|
||||||
|
):
|
||||||
|
super().__init__(config, metrics)
|
||||||
|
self.config = config
|
||||||
|
self.camera_config = camera_config
|
||||||
|
self.requestor = requestor
|
||||||
|
self.stream = None
|
||||||
|
self.whisper_model = None
|
||||||
|
self.model_runner = model_runner
|
||||||
|
self.transcription_segments = []
|
||||||
|
self.audio_queue = queue.Queue()
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def __build_recognizer(self) -> None:
|
||||||
|
try:
|
||||||
|
if self.config.audio_transcription.model_size == "large":
|
||||||
|
# Whisper models need to be per-process and can only run one stream at a time
|
||||||
|
# TODO: try parallel: https://github.com/SYSTRAN/faster-whisper/issues/100
|
||||||
|
logger.debug(f"Loading Whisper model for {self.camera_config.name}")
|
||||||
|
self.whisper_model = FasterWhisperASR(
|
||||||
|
modelsize="tiny",
|
||||||
|
device="cuda"
|
||||||
|
if self.config.audio_transcription.device == "GPU"
|
||||||
|
else "cpu",
|
||||||
|
lan=self.config.audio_transcription.language,
|
||||||
|
model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"),
|
||||||
|
)
|
||||||
|
self.whisper_model.use_vad()
|
||||||
|
self.stream = OnlineASRProcessor(
|
||||||
|
asr=self.whisper_model,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Loading sherpa stream for {self.camera_config.name}")
|
||||||
|
self.stream = self.model_runner.model.create_stream()
|
||||||
|
logger.debug(
|
||||||
|
f"Audio transcription (live) initialized for {self.camera_config.name}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to initialize live streaming audio transcription: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __process_audio_stream(
|
||||||
|
self, audio_data: np.ndarray
|
||||||
|
) -> Optional[tuple[str, bool]]:
|
||||||
|
if (
|
||||||
|
self.model_runner.model is None
|
||||||
|
and self.config.audio_transcription.model_size == "small"
|
||||||
|
):
|
||||||
|
logger.debug("Audio transcription (live) model not initialized")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not self.stream:
|
||||||
|
self.__build_recognizer()
|
||||||
|
|
||||||
|
try:
|
||||||
|
if audio_data.dtype != np.float32:
|
||||||
|
audio_data = audio_data.astype(np.float32)
|
||||||
|
|
||||||
|
if audio_data.max() > 1.0 or audio_data.min() < -1.0:
|
||||||
|
audio_data = audio_data / 32768.0 # Normalize from int16
|
||||||
|
|
||||||
|
rms = float(np.sqrt(np.mean(np.absolute(np.square(audio_data)))))
|
||||||
|
logger.debug(f"Audio chunk size: {audio_data.size}, RMS: {rms:.4f}")
|
||||||
|
|
||||||
|
if self.config.audio_transcription.model_size == "large":
|
||||||
|
# large model
|
||||||
|
self.stream.insert_audio_chunk(audio_data)
|
||||||
|
output = self.stream.process_iter()
|
||||||
|
text = output[2].strip()
|
||||||
|
is_endpoint = (
|
||||||
|
text.endswith((".", "!", "?"))
|
||||||
|
and sum(len(str(lines)) for lines in self.transcription_segments)
|
||||||
|
> 300
|
||||||
|
)
|
||||||
|
|
||||||
|
if text:
|
||||||
|
self.transcription_segments.append(text)
|
||||||
|
concatenated_text = " ".join(self.transcription_segments)
|
||||||
|
logger.debug(f"Concatenated transcription: '{concatenated_text}'")
|
||||||
|
text = concatenated_text
|
||||||
|
|
||||||
|
else:
|
||||||
|
# small model
|
||||||
|
self.stream.accept_waveform(16000, audio_data)
|
||||||
|
|
||||||
|
while self.model_runner.model.is_ready(self.stream):
|
||||||
|
self.model_runner.model.decode_stream(self.stream)
|
||||||
|
|
||||||
|
text = self.model_runner.model.get_result(self.stream).strip()
|
||||||
|
is_endpoint = self.model_runner.model.is_endpoint(self.stream)
|
||||||
|
|
||||||
|
logger.debug(f"Transcription result: '{text}'")
|
||||||
|
|
||||||
|
if not text:
|
||||||
|
logger.debug("No transcription, returning")
|
||||||
|
return None
|
||||||
|
|
||||||
|
logger.debug(f"Endpoint detected: {is_endpoint}")
|
||||||
|
|
||||||
|
if is_endpoint and self.config.audio_transcription.model_size == "small":
|
||||||
|
# reset sherpa if we've reached an endpoint
|
||||||
|
self.model_runner.model.reset(self.stream)
|
||||||
|
|
||||||
|
return text, is_endpoint
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing audio stream: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_audio(self, obj_data: dict[str, any], audio: np.ndarray) -> bool | None:
|
||||||
|
if audio is None or audio.size == 0:
|
||||||
|
logger.debug("No audio data provided for transcription")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# enqueue audio data for processing in the thread
|
||||||
|
self.audio_queue.put((obj_data, audio))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
"""Run method for the transcription thread to process queued audio data."""
|
||||||
|
logger.debug(
|
||||||
|
f"Starting audio transcription thread for {self.camera_config.name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# start with an empty transcription
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
|
while not self.stop_event.is_set():
|
||||||
|
try:
|
||||||
|
# Get audio data from queue with a timeout to check stop_event
|
||||||
|
_, audio = self.audio_queue.get(timeout=0.1)
|
||||||
|
result = self.__process_audio_stream(audio)
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
continue
|
||||||
|
|
||||||
|
text, is_endpoint = result
|
||||||
|
logger.debug(f"Transcribed audio: '{text}', Endpoint: {is_endpoint}")
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription", text
|
||||||
|
)
|
||||||
|
|
||||||
|
self.audio_queue.task_done()
|
||||||
|
|
||||||
|
if is_endpoint:
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing audio in thread: {e}")
|
||||||
|
self.audio_queue.task_done()
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Stopping audio transcription thread for {self.camera_config.name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def clear_audio_queue(self) -> None:
|
||||||
|
# Clear the audio queue
|
||||||
|
while not self.audio_queue.empty():
|
||||||
|
try:
|
||||||
|
self.audio_queue.get_nowait()
|
||||||
|
self.audio_queue.task_done()
|
||||||
|
except queue.Empty:
|
||||||
|
break
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
if self.config.audio_transcription.model_size == "large":
|
||||||
|
# get final output from whisper
|
||||||
|
output = self.stream.finish()
|
||||||
|
self.transcription_segments = []
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
(output[2].strip() + " "),
|
||||||
|
)
|
||||||
|
|
||||||
|
# reset whisper
|
||||||
|
self.stream.init()
|
||||||
|
self.transcription_segments = []
|
||||||
|
else:
|
||||||
|
# reset sherpa
|
||||||
|
self.model_runner.model.reset(self.stream)
|
||||||
|
|
||||||
|
logger.debug("Stream reset")
|
||||||
|
|
||||||
|
def check_unload_model(self) -> None:
|
||||||
|
# regularly called in the loop in audio maintainer
|
||||||
|
if (
|
||||||
|
self.config.audio_transcription.model_size == "large"
|
||||||
|
and self.whisper_model is not None
|
||||||
|
):
|
||||||
|
logger.debug(f"Unloading Whisper model for {self.camera_config.name}")
|
||||||
|
self.clear_audio_queue()
|
||||||
|
self.transcription_segments = []
|
||||||
|
self.stream = None
|
||||||
|
self.whisper_model = None
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
self.config.audio_transcription.model_size == "small"
|
||||||
|
and self.stream is not None
|
||||||
|
):
|
||||||
|
logger.debug(f"Clearing sherpa stream for {self.camera_config.name}")
|
||||||
|
self.stream = None
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
"""Stop the transcription thread and clean up."""
|
||||||
|
self.stop_event.set()
|
||||||
|
# Clear the queue to prevent processing stale data
|
||||||
|
while not self.audio_queue.empty():
|
||||||
|
try:
|
||||||
|
self.audio_queue.get_nowait()
|
||||||
|
self.audio_queue.task_done()
|
||||||
|
except queue.Empty:
|
||||||
|
break
|
||||||
|
logger.debug(
|
||||||
|
f"Transcription thread stop signaled for {self.camera_config.name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_request(
|
||||||
|
self, topic: str, request_data: dict[str, any]
|
||||||
|
) -> dict[str, any] | None:
|
||||||
|
if topic == "clear_audio_recognizer":
|
||||||
|
self.stream = None
|
||||||
|
self.__build_recognizer()
|
||||||
|
return {"message": "Audio recognizer cleared and rebuilt", "success": True}
|
||||||
|
return None
|
||||||
|
|
||||||
|
def expire_object(self, object_id: str) -> None:
|
||||||
|
pass
|
||||||
@ -13,6 +13,7 @@ from frigate.comms.event_metadata_updater import (
|
|||||||
)
|
)
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import MODEL_CACHE_DIR
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
from frigate.log import redirect_output_to_logger
|
||||||
from frigate.util.object import calculate_region
|
from frigate.util.object import calculate_region
|
||||||
|
|
||||||
from ..types import DataProcessorMetrics
|
from ..types import DataProcessorMetrics
|
||||||
@ -76,6 +77,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to download {path}: {e}")
|
logger.error(f"Failed to download {path}: {e}")
|
||||||
|
|
||||||
|
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||||
def __build_detector(self) -> None:
|
def __build_detector(self) -> None:
|
||||||
self.interpreter = Interpreter(
|
self.interpreter = Interpreter(
|
||||||
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
||||||
@ -154,8 +156,8 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
return
|
return
|
||||||
|
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
EventMetadataTypeEnum.sub_label,
|
|
||||||
(obj_data["id"], self.labelmap[best_id], score),
|
(obj_data["id"], self.labelmap[best_id], score),
|
||||||
|
EventMetadataTypeEnum.sub_label.value,
|
||||||
)
|
)
|
||||||
self.detected_birds[obj_data["id"]] = score
|
self.detected_birds[obj_data["id"]] = score
|
||||||
|
|
||||||
|
|||||||
352
frigate/data_processing/real_time/custom_classification.py
Normal file
352
frigate/data_processing/real_time/custom_classification.py
Normal file
@ -0,0 +1,352 @@
|
|||||||
|
"""Real time processor that works with classification tflite models."""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||||
|
from frigate.comms.event_metadata_updater import (
|
||||||
|
EventMetadataPublisher,
|
||||||
|
EventMetadataTypeEnum,
|
||||||
|
)
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.config.classification import (
|
||||||
|
CustomClassificationConfig,
|
||||||
|
ObjectClassificationType,
|
||||||
|
)
|
||||||
|
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
|
||||||
|
from frigate.log import redirect_output_to_logger
|
||||||
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
|
||||||
|
from frigate.util.object import box_overlaps, calculate_region
|
||||||
|
|
||||||
|
from ..types import DataProcessorMetrics
|
||||||
|
from .api import RealTimeProcessorApi
|
||||||
|
|
||||||
|
try:
|
||||||
|
from tflite_runtime.interpreter import Interpreter
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
from tensorflow.lite.python.interpreter import Interpreter
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
model_config: CustomClassificationConfig,
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
metrics: DataProcessorMetrics,
|
||||||
|
):
|
||||||
|
super().__init__(config, metrics)
|
||||||
|
self.model_config = model_config
|
||||||
|
self.requestor = requestor
|
||||||
|
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||||
|
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||||
|
self.interpreter: Interpreter = None
|
||||||
|
self.tensor_input_details: dict[str, Any] = None
|
||||||
|
self.tensor_output_details: dict[str, Any] = None
|
||||||
|
self.labelmap: dict[int, str] = {}
|
||||||
|
self.classifications_per_second = EventsPerSecond()
|
||||||
|
self.inference_speed = InferenceSpeed(
|
||||||
|
self.metrics.classification_speeds[self.model_config.name]
|
||||||
|
)
|
||||||
|
self.last_run = datetime.datetime.now().timestamp()
|
||||||
|
self.__build_detector()
|
||||||
|
|
||||||
|
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||||
|
def __build_detector(self) -> None:
|
||||||
|
self.interpreter = Interpreter(
|
||||||
|
model_path=os.path.join(self.model_dir, "model.tflite"),
|
||||||
|
num_threads=2,
|
||||||
|
)
|
||||||
|
self.interpreter.allocate_tensors()
|
||||||
|
self.tensor_input_details = self.interpreter.get_input_details()
|
||||||
|
self.tensor_output_details = self.interpreter.get_output_details()
|
||||||
|
self.labelmap = load_labels(
|
||||||
|
os.path.join(self.model_dir, "labelmap.txt"),
|
||||||
|
prefill=0,
|
||||||
|
)
|
||||||
|
self.classifications_per_second.start()
|
||||||
|
|
||||||
|
def __update_metrics(self, duration: float) -> None:
|
||||||
|
self.classifications_per_second.update()
|
||||||
|
self.inference_speed.update(duration)
|
||||||
|
|
||||||
|
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
|
||||||
|
self.metrics.classification_cps[
|
||||||
|
self.model_config.name
|
||||||
|
].value = self.classifications_per_second.eps()
|
||||||
|
camera = frame_data.get("camera")
|
||||||
|
|
||||||
|
if camera not in self.model_config.state_config.cameras:
|
||||||
|
return
|
||||||
|
|
||||||
|
camera_config = self.model_config.state_config.cameras[camera]
|
||||||
|
crop = [
|
||||||
|
camera_config.crop[0],
|
||||||
|
camera_config.crop[1],
|
||||||
|
camera_config.crop[2],
|
||||||
|
camera_config.crop[3],
|
||||||
|
]
|
||||||
|
should_run = False
|
||||||
|
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
if (
|
||||||
|
self.model_config.state_config.interval
|
||||||
|
and now > self.last_run + self.model_config.state_config.interval
|
||||||
|
):
|
||||||
|
self.last_run = now
|
||||||
|
should_run = True
|
||||||
|
|
||||||
|
if (
|
||||||
|
not should_run
|
||||||
|
and self.model_config.state_config.motion
|
||||||
|
and any([box_overlaps(crop, mb) for mb in frame_data.get("motion", [])])
|
||||||
|
):
|
||||||
|
# classification should run at most once per second
|
||||||
|
if now > self.last_run + 1:
|
||||||
|
self.last_run = now
|
||||||
|
should_run = True
|
||||||
|
|
||||||
|
if not should_run:
|
||||||
|
return
|
||||||
|
|
||||||
|
x, y, x2, y2 = calculate_region(
|
||||||
|
frame.shape,
|
||||||
|
crop[0],
|
||||||
|
crop[1],
|
||||||
|
crop[2],
|
||||||
|
crop[3],
|
||||||
|
224,
|
||||||
|
1.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
|
frame = rgb[
|
||||||
|
y:y2,
|
||||||
|
x:x2,
|
||||||
|
]
|
||||||
|
|
||||||
|
if frame.shape != (224, 224):
|
||||||
|
frame = cv2.resize(frame, (224, 224))
|
||||||
|
|
||||||
|
input = np.expand_dims(frame, axis=0)
|
||||||
|
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
|
||||||
|
self.interpreter.invoke()
|
||||||
|
res: np.ndarray = self.interpreter.get_tensor(
|
||||||
|
self.tensor_output_details[0]["index"]
|
||||||
|
)[0]
|
||||||
|
probs = res / res.sum(axis=0)
|
||||||
|
best_id = np.argmax(probs)
|
||||||
|
score = round(probs[best_id], 2)
|
||||||
|
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||||
|
|
||||||
|
write_classification_attempt(
|
||||||
|
self.train_dir,
|
||||||
|
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||||
|
now,
|
||||||
|
self.labelmap[best_id],
|
||||||
|
score,
|
||||||
|
)
|
||||||
|
|
||||||
|
if score >= self.model_config.threshold:
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{camera}/classification/{self.model_config.name}",
|
||||||
|
self.labelmap[best_id],
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_request(self, topic, request_data):
|
||||||
|
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
||||||
|
if request_data.get("model_name") == self.model_config.name:
|
||||||
|
self.__build_detector()
|
||||||
|
logger.info(
|
||||||
|
f"Successfully loaded updated model for {self.model_config.name}"
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": f"Loaded {self.model_config.name} model.",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def expire_object(self, object_id, camera):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
model_config: CustomClassificationConfig,
|
||||||
|
sub_label_publisher: EventMetadataPublisher,
|
||||||
|
metrics: DataProcessorMetrics,
|
||||||
|
):
|
||||||
|
super().__init__(config, metrics)
|
||||||
|
self.model_config = model_config
|
||||||
|
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||||
|
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||||
|
self.interpreter: Interpreter = None
|
||||||
|
self.sub_label_publisher = sub_label_publisher
|
||||||
|
self.tensor_input_details: dict[str, Any] = None
|
||||||
|
self.tensor_output_details: dict[str, Any] = None
|
||||||
|
self.detected_objects: dict[str, float] = {}
|
||||||
|
self.labelmap: dict[int, str] = {}
|
||||||
|
self.classifications_per_second = EventsPerSecond()
|
||||||
|
self.inference_speed = InferenceSpeed(
|
||||||
|
self.metrics.classification_speeds[self.model_config.name]
|
||||||
|
)
|
||||||
|
self.__build_detector()
|
||||||
|
|
||||||
|
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||||
|
def __build_detector(self) -> None:
|
||||||
|
self.interpreter = Interpreter(
|
||||||
|
model_path=os.path.join(self.model_dir, "model.tflite"),
|
||||||
|
num_threads=2,
|
||||||
|
)
|
||||||
|
self.interpreter.allocate_tensors()
|
||||||
|
self.tensor_input_details = self.interpreter.get_input_details()
|
||||||
|
self.tensor_output_details = self.interpreter.get_output_details()
|
||||||
|
self.labelmap = load_labels(
|
||||||
|
os.path.join(self.model_dir, "labelmap.txt"),
|
||||||
|
prefill=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __update_metrics(self, duration: float) -> None:
|
||||||
|
self.classifications_per_second.update()
|
||||||
|
self.inference_speed.update(duration)
|
||||||
|
|
||||||
|
def process_frame(self, obj_data, frame):
|
||||||
|
self.metrics.classification_cps[
|
||||||
|
self.model_config.name
|
||||||
|
].value = self.classifications_per_second.eps()
|
||||||
|
|
||||||
|
if obj_data["false_positive"]:
|
||||||
|
return
|
||||||
|
|
||||||
|
if obj_data["label"] not in self.model_config.object_config.objects:
|
||||||
|
return
|
||||||
|
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
x, y, x2, y2 = calculate_region(
|
||||||
|
frame.shape,
|
||||||
|
obj_data["box"][0],
|
||||||
|
obj_data["box"][1],
|
||||||
|
obj_data["box"][2],
|
||||||
|
obj_data["box"][3],
|
||||||
|
max(
|
||||||
|
obj_data["box"][1] - obj_data["box"][0],
|
||||||
|
obj_data["box"][3] - obj_data["box"][2],
|
||||||
|
),
|
||||||
|
1.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
|
crop = rgb[
|
||||||
|
y:y2,
|
||||||
|
x:x2,
|
||||||
|
]
|
||||||
|
|
||||||
|
if crop.shape != (224, 224):
|
||||||
|
crop = cv2.resize(crop, (224, 224))
|
||||||
|
|
||||||
|
input = np.expand_dims(crop, axis=0)
|
||||||
|
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
|
||||||
|
self.interpreter.invoke()
|
||||||
|
res: np.ndarray = self.interpreter.get_tensor(
|
||||||
|
self.tensor_output_details[0]["index"]
|
||||||
|
)[0]
|
||||||
|
probs = res / res.sum(axis=0)
|
||||||
|
best_id = np.argmax(probs)
|
||||||
|
score = round(probs[best_id], 2)
|
||||||
|
previous_score = self.detected_objects.get(obj_data["id"], 0.0)
|
||||||
|
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||||
|
|
||||||
|
write_classification_attempt(
|
||||||
|
self.train_dir,
|
||||||
|
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||||
|
now,
|
||||||
|
self.labelmap[best_id],
|
||||||
|
score,
|
||||||
|
)
|
||||||
|
|
||||||
|
if score < self.model_config.threshold:
|
||||||
|
logger.debug(f"Score {score} is less than threshold.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if score <= previous_score:
|
||||||
|
logger.debug(f"Score {score} is worse than previous score {previous_score}")
|
||||||
|
return
|
||||||
|
|
||||||
|
sub_label = self.labelmap[best_id]
|
||||||
|
self.detected_objects[obj_data["id"]] = score
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.model_config.object_config.classification_type
|
||||||
|
== ObjectClassificationType.sub_label
|
||||||
|
):
|
||||||
|
if sub_label != "none":
|
||||||
|
self.sub_label_publisher.publish(
|
||||||
|
(obj_data["id"], sub_label, score),
|
||||||
|
EventMetadataTypeEnum.sub_label,
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
self.model_config.object_config.classification_type
|
||||||
|
== ObjectClassificationType.attribute
|
||||||
|
):
|
||||||
|
self.sub_label_publisher.publish(
|
||||||
|
(obj_data["id"], self.model_config.name, sub_label, score),
|
||||||
|
EventMetadataTypeEnum.attribute.value,
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_request(self, topic, request_data):
|
||||||
|
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
||||||
|
if request_data.get("model_name") == self.model_config.name:
|
||||||
|
logger.info(
|
||||||
|
f"Successfully loaded updated model for {self.model_config.name}"
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": f"Loaded {self.model_config.name} model.",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def expire_object(self, object_id, camera):
|
||||||
|
if object_id in self.detected_objects:
|
||||||
|
self.detected_objects.pop(object_id)
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def write_classification_attempt(
|
||||||
|
folder: str,
|
||||||
|
frame: np.ndarray,
|
||||||
|
timestamp: float,
|
||||||
|
label: str,
|
||||||
|
score: float,
|
||||||
|
) -> None:
|
||||||
|
if "-" in label:
|
||||||
|
label = label.replace("-", "_")
|
||||||
|
|
||||||
|
file = os.path.join(folder, f"{timestamp}-{label}-{score}.webp")
|
||||||
|
os.makedirs(folder, exist_ok=True)
|
||||||
|
cv2.imwrite(file, frame)
|
||||||
|
|
||||||
|
files = sorted(
|
||||||
|
filter(lambda f: (f.endswith(".webp")), os.listdir(folder)),
|
||||||
|
key=lambda f: os.path.getctime(os.path.join(folder, f)),
|
||||||
|
reverse=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# delete oldest face image if maximum is reached
|
||||||
|
if len(files) > 100:
|
||||||
|
os.unlink(os.path.join(folder, files[-1]))
|
||||||
@ -171,7 +171,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
# don't run for non person objects
|
# don't run for non person objects
|
||||||
if obj_data.get("label") != "person":
|
if obj_data.get("label") != "person":
|
||||||
logger.debug("Not a processing face for non person object.")
|
logger.debug("Not processing face for a non person object.")
|
||||||
return
|
return
|
||||||
|
|
||||||
# don't overwrite sub label for objects that have a sub label
|
# don't overwrite sub label for objects that have a sub label
|
||||||
@ -319,8 +319,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
if weighted_score >= self.face_config.recognition_threshold:
|
if weighted_score >= self.face_config.recognition_threshold:
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
EventMetadataTypeEnum.sub_label,
|
|
||||||
(id, weighted_sub_label, weighted_score),
|
(id, weighted_sub_label, weighted_score),
|
||||||
|
EventMetadataTypeEnum.sub_label.value,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||||
|
|||||||
1158
frigate/data_processing/real_time/whisper_online.py
Normal file
1158
frigate/data_processing/real_time/whisper_online.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,13 @@
|
|||||||
"""Embeddings types."""
|
"""Embeddings types."""
|
||||||
|
|
||||||
import multiprocessing as mp
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from multiprocessing.managers import SyncManager
|
||||||
from multiprocessing.sharedctypes import Synchronized
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
|
|
||||||
|
import sherpa_onnx
|
||||||
|
|
||||||
|
from frigate.data_processing.real_time.whisper_online import FasterWhisperASR
|
||||||
|
|
||||||
|
|
||||||
class DataProcessorMetrics:
|
class DataProcessorMetrics:
|
||||||
image_embeddings_speed: Synchronized
|
image_embeddings_speed: Synchronized
|
||||||
@ -16,18 +20,31 @@ class DataProcessorMetrics:
|
|||||||
alpr_pps: Synchronized
|
alpr_pps: Synchronized
|
||||||
yolov9_lpr_speed: Synchronized
|
yolov9_lpr_speed: Synchronized
|
||||||
yolov9_lpr_pps: Synchronized
|
yolov9_lpr_pps: Synchronized
|
||||||
|
review_desc_speed: Synchronized
|
||||||
|
review_desc_dps: Synchronized
|
||||||
|
classification_speeds: dict[str, Synchronized]
|
||||||
|
classification_cps: dict[str, Synchronized]
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, manager: SyncManager, custom_classification_models: list[str]):
|
||||||
self.image_embeddings_speed = mp.Value("d", 0.0)
|
self.image_embeddings_speed = manager.Value("d", 0.0)
|
||||||
self.image_embeddings_eps = mp.Value("d", 0.0)
|
self.image_embeddings_eps = manager.Value("d", 0.0)
|
||||||
self.text_embeddings_speed = mp.Value("d", 0.0)
|
self.text_embeddings_speed = manager.Value("d", 0.0)
|
||||||
self.text_embeddings_eps = mp.Value("d", 0.0)
|
self.text_embeddings_eps = manager.Value("d", 0.0)
|
||||||
self.face_rec_speed = mp.Value("d", 0.0)
|
self.face_rec_speed = manager.Value("d", 0.0)
|
||||||
self.face_rec_fps = mp.Value("d", 0.0)
|
self.face_rec_fps = manager.Value("d", 0.0)
|
||||||
self.alpr_speed = mp.Value("d", 0.0)
|
self.alpr_speed = manager.Value("d", 0.0)
|
||||||
self.alpr_pps = mp.Value("d", 0.0)
|
self.alpr_pps = manager.Value("d", 0.0)
|
||||||
self.yolov9_lpr_speed = mp.Value("d", 0.0)
|
self.yolov9_lpr_speed = manager.Value("d", 0.0)
|
||||||
self.yolov9_lpr_pps = mp.Value("d", 0.0)
|
self.yolov9_lpr_pps = manager.Value("d", 0.0)
|
||||||
|
self.review_desc_speed = manager.Value("d", 0.0)
|
||||||
|
self.review_desc_dps = manager.Value("d", 0.0)
|
||||||
|
self.classification_speeds = manager.dict()
|
||||||
|
self.classification_cps = manager.dict()
|
||||||
|
|
||||||
|
if custom_classification_models:
|
||||||
|
for key in custom_classification_models:
|
||||||
|
self.classification_speeds[key] = manager.Value("d", 0.0)
|
||||||
|
self.classification_cps[key] = manager.Value("d", 0.0)
|
||||||
|
|
||||||
|
|
||||||
class DataProcessorModelRunner:
|
class DataProcessorModelRunner:
|
||||||
@ -41,3 +58,6 @@ class PostProcessDataEnum(str, Enum):
|
|||||||
recording = "recording"
|
recording = "recording"
|
||||||
review = "review"
|
review = "review"
|
||||||
tracked_object = "tracked_object"
|
tracked_object = "tracked_object"
|
||||||
|
|
||||||
|
|
||||||
|
AudioTranscriptionModel = FasterWhisperASR | sherpa_onnx.OnlineRecognizer | None
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user