Merge branch 'blakeblackshear:dev' into endpoint_last_clip

This commit is contained in:
vanseforge 2024-03-01 20:11:58 +01:00 committed by GitHub
commit 26475174e6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
405 changed files with 10170 additions and 34940 deletions

View File

@ -1,8 +1,5 @@
name: Camera Support Request
description: Support for setting up cameras in Frigate
title: "[Camera Support]: " title: "[Camera Support]: "
labels: ["support", "triage"] labels: ["support", "triage"]
assignees: []
body: body:
- type: textarea - type: textarea
id: description id: description
@ -14,7 +11,7 @@ body:
id: version id: version
attributes: attributes:
label: Version label: Version
description: Visible on the Debug page in the Web UI description: Visible on the System page in the Web UI
validations: validations:
required: true required: true
- type: textarea - type: textarea

View File

@ -1,8 +1,5 @@
name: Config Support Request
description: Support for Frigate configuration
title: "[Config Support]: " title: "[Config Support]: "
labels: ["support", "triage"] labels: ["support", "triage"]
assignees: []
body: body:
- type: textarea - type: textarea
id: description id: description
@ -14,7 +11,7 @@ body:
id: version id: version
attributes: attributes:
label: Version label: Version
description: Visible on the Debug page in the Web UI description: Visible on the System page in the Web UI
validations: validations:
required: true required: true
- type: textarea - type: textarea

View File

@ -1,8 +1,5 @@
name: Detector Support Request
description: Support for setting up object detector in Frigate (Coral, OpenVINO, TensorRT, etc.)
title: "[Detector Support]: " title: "[Detector Support]: "
labels: ["support", "triage"] labels: ["support", "triage"]
assignees: []
body: body:
- type: textarea - type: textarea
id: description id: description
@ -14,7 +11,7 @@ body:
id: version id: version
attributes: attributes:
label: Version label: Version
description: Visible on the Debug page in the Web UI description: Visible on the System page in the Web UI
validations: validations:
required: true required: true
- type: textarea - type: textarea

View File

@ -1,8 +1,5 @@
name: General Support Request
description: General support request for Frigate
title: "[Support]: " title: "[Support]: "
labels: ["support", "triage"] labels: ["support", "triage"]
assignees: []
body: body:
- type: textarea - type: textarea
id: description id: description
@ -14,7 +11,7 @@ body:
id: version id: version
attributes: attributes:
label: Version label: Version
description: Visible on the Debug page in the Web UI description: Visible on the System page in the Web UI
validations: validations:
required: true required: true
- type: textarea - type: textarea

View File

@ -1,8 +1,5 @@
name: Hardware Acceleration Support Request
description: Support for setting up GPU hardware acceleration in Frigate
title: "[HW Accel Support]: " title: "[HW Accel Support]: "
labels: ["support", "triage"] labels: ["support", "triage"]
assignees: []
body: body:
- type: textarea - type: textarea
id: description id: description
@ -14,7 +11,7 @@ body:
id: version id: version
attributes: attributes:
label: Version label: Version
description: Visible on the Debug page in the Web UI description: Visible on the System page in the Web UI
validations: validations:
required: true required: true
- type: textarea - type: textarea

View File

@ -0,0 +1,9 @@
title: "[Question]: "
labels: ["question"]
body:
- type: textarea
id: description
attributes:
label: "What is your question:"
validations:
required: true

View File

@ -1 +1,5 @@
blank_issues_enabled: false blank_issues_enabled: false
contact_links:
- name: Frigate Support
url: https://github.com/blakeblackshear/frigate/discussions/new/choose
about: Get support for setting up or troubelshooting Frigate.

View File

@ -11,11 +11,22 @@ outputs:
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Remove unnecessary files # Stop docker so we can mount more space at /var/lib/docker
run: | - name: Stop docker
sudo rm -rf /usr/share/dotnet run: sudo systemctl stop docker
sudo rm -rf /usr/local/lib/android shell: bash
sudo rm -rf /opt/ghc # This creates a virtual volume at /var/lib/docker to maximize the size
# As of 2/14/2024, this results in 97G for docker images
- name: Maximize build space
uses: easimon/maximize-build-space@master
with:
remove-dotnet: 'true'
remove-android: 'true'
remove-haskell: 'true'
remove-codeql: 'true'
build-mount-path: '/var/lib/docker'
- name: Start docker
run: sudo systemctl start docker
shell: bash shell: bash
- id: lowercaseRepo - id: lowercaseRepo
uses: ASzc/change-string-case-action@v5 uses: ASzc/change-string-case-action@v5

View File

@ -47,6 +47,57 @@ jobs:
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha
- name: AMD/ROCm gfx900
env:
AMDGPU: gfx900
HSA_OVERRIDE: 1
HSA_OVERRIDE_GFX_VERSION: 9.0.0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900
*.cache-from=type=gha
- name: AMD/ROCm gfx1030
env:
AMDGPU: gfx1030
HSA_OVERRIDE: 1
HSA_OVERRIDE_GFX_VERSION: 10.3.0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030
*.cache-from=type=gha
- name: AMD/ROCm gfx1100
env:
AMDGPU: gfx1100
HSA_OVERRIDE: 1
HSA_OVERRIDE_GFX_VERSION: 11.0.0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100
*.cache-from=type=gha
arm64_build: arm64_build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: ARM Build name: ARM Build

View File

@ -2,5 +2,5 @@
/docker/tensorrt/ @madsciencetist @NateMeyer /docker/tensorrt/ @madsciencetist @NateMeyer
/docker/tensorrt/*arm64* @madsciencetist /docker/tensorrt/*arm64* @madsciencetist
/docker/tensorrt/*jetson* @madsciencetist /docker/tensorrt/*jetson* @madsciencetist
/docker/rockchip/ @MarcA711 /docker/rockchip/ @MarcA711
/docker/rocm/ @harakas

View File

@ -196,6 +196,8 @@ EXPOSE 8555/tcp 8555/udp
# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB # Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T" ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
# Do not fail on long-running download scripts
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
ENTRYPOINT ["/init"] ENTRYPOINT ["/init"]
CMD [] CMD []

View File

@ -1,20 +1,22 @@
click == 8.1.* click == 8.1.*
Flask == 2.3.* Flask == 2.3.*
imutils == 0.5.* imutils == 0.5.*
markupsafe == 2.1.*
matplotlib == 3.7.* matplotlib == 3.7.*
mypy == 1.6.1 mypy == 1.6.1
numpy == 1.23.* numpy == 1.23.*
onvif_zeep == 0.2.12 onvif_zeep == 0.2.12
opencv-python-headless == 4.7.0.* opencv-python-headless == 4.7.0.*
paho-mqtt == 1.6.* paho-mqtt == 2.0.*
pandas == 2.1.4 pandas == 2.1.4
peewee == 3.17.* peewee == 3.17.*
peewee_migrate == 1.12.* peewee_migrate == 1.12.*
psutil == 5.9.* psutil == 5.9.*
pydantic == 1.10.* pydantic == 2.6.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml git+https://github.com/fbcotter/py3nvml#egg=py3nvml
PyYAML == 6.0.* PyYAML == 6.0.*
pytz == 2023.3.post1 pytz == 2023.3.post1
pyzmq == 25.1.*
ruamel.yaml == 0.18.* ruamel.yaml == 0.18.*
tzlocal == 5.2 tzlocal == 5.2
types-PyYAML == 6.0.* types-PyYAML == 6.0.*
@ -25,6 +27,7 @@ norfair == 2.2.*
setproctitle == 1.3.* setproctitle == 1.3.*
ws4py == 0.5.* ws4py == 0.5.*
unidecode == 1.3.* unidecode == 1.3.*
onnxruntime == 1.16.*
# Openvino Library - Custom built with MYRIAD support # Openvino Library - Custom built with MYRIAD support
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64'
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64'

View File

@ -0,0 +1,35 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Download yolov8 models when DOWNLOAD_YOLOV8=1 environment variable is set
set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache"}
DOWNLOAD_YOLOV8=${DOWNLOAD_YOLOV8:-"0"}
YOLOV8_DIR="$MODEL_CACHE_DIR/yolov8"
YOLOV8_URL=https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz
YOLOV8_DIGEST=304186b299560fbacc28eac9b9ea02cc2289fe30eb2c0df30109a2529423695c
if [ "$DOWNLOAD_YOLOV8" = "1" ]; then
echo "download-models: DOWNLOAD_YOLOV8=${DOWNLOAD_YOLOV8}, running download"
if ! test -f "${YOLOV8_DIR}/model.fetched"; then
mkdir -p $YOLOV8_DIR
TMP_FILE="${YOLOV8_DIR}/download.tar.gz"
curl --no-progress-meter -L --max-filesize 500M --insecure --output $TMP_FILE "${YOLOV8_URL}"
digest=$(sha256sum $TMP_FILE | awk '{print $1}')
if [ "$digest" = "$YOLOV8_DIGEST" ]; then
echo "download-models: Extracting downloaded file"
cd $YOLOV8_DIR
tar zxf $TMP_FILE
rm $TMP_FILE
touch model.fetched
echo "download-models: Yolov8 download done, files placed into ${YOLOV8_DIR}"
else
echo "download-models: Downloaded file digest does not match: got $digest, expected $YOLOV8_DIGEST"
rm $TMP_FILE
fi
else
echo "download-models: ${YOLOV8_DIR}/model.fetched already present"
fi
fi

View File

@ -0,0 +1 @@
oneshot

View File

@ -0,0 +1 @@
/etc/s6-overlay/s6-rc.d/download-models/run

View File

@ -109,9 +109,9 @@ if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
"rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
} }
elif go2rtc_config["ffmpeg"].get("rtsp") is None: elif go2rtc_config["ffmpeg"].get("rtsp") is None:
go2rtc_config["ffmpeg"][ go2rtc_config["ffmpeg"]["rtsp"] = (
"rtsp" "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" )
# add hardware acceleration presets for rockchip devices # add hardware acceleration presets for rockchip devices
# may be removed if frigate uses a go2rtc version that includes these presets # may be removed if frigate uses a go2rtc version that includes these presets

View File

@ -10,6 +10,8 @@ events {
} }
http { http {
map_hash_bucket_size 256;
include mime.types; include mime.types;
default_type application/octet-stream; default_type application/octet-stream;

View File

@ -9,7 +9,7 @@ COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
FROM deps AS rk-deps FROM deps AS rk-frigate
ARG TARGETARCH ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
@ -28,5 +28,5 @@ ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolo
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffmpeg /usr/lib/btbn-ffmpeg/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-1/ffmpeg /usr/lib/btbn-ffmpeg/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffprobe /usr/lib/btbn-ffmpeg/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-1/ffprobe /usr/lib/btbn-ffmpeg/bin/

View File

@ -1,9 +1,3 @@
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wget"
}
target wheels { target wheels {
dockerfile = "docker/main/Dockerfile" dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"] platforms = ["linux/arm64"]
@ -25,7 +19,6 @@ target rootfs {
target rk { target rk {
dockerfile = "docker/rockchip/Dockerfile" dockerfile = "docker/rockchip/Dockerfile"
contexts = { contexts = {
wget = "target:wget",
wheels = "target:wheels", wheels = "target:wheels",
deps = "target:deps", deps = "target:deps",
rootfs = "target:rootfs" rootfs = "target:rootfs"

106
docker/rocm/Dockerfile Normal file
View File

@ -0,0 +1,106 @@
# syntax=docker/dockerfile:1.4
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG ROCM=5.7.3
ARG AMDGPU=gfx900
ARG HSA_OVERRIDE_GFX_VERSION
ARG HSA_OVERRIDE
#######################################################################
FROM ubuntu:focal as rocm
ARG ROCM
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install gnupg wget
RUN mkdir --parents --mode=0755 /etc/apt/keyrings
RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null
COPY docker/rocm/rocm.list /etc/apt/sources.list.d/
COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
RUN apt-get update
RUN apt-get -y install --no-install-recommends migraphx
RUN apt-get -y install --no-install-recommends migraphx-dev
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
#######################################################################
FROM --platform=linux/amd64 debian:11 as debian-base
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
RUN apt-get -y install python3
#######################################################################
# ROCm does not come with migraphx wrappers for python 3.9, so we build it here
FROM debian-base as debian-build
ARG ROCM
COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
RUN ln -s /opt/rocm-$ROCM /opt/rocm
RUN apt-get -y install g++ cmake
RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev
WORKDIR /opt/build
COPY docker/rocm/migraphx .
RUN mkdir build && cd build && cmake .. && make install
#######################################################################
FROM deps AS deps-prelim
# need this to install libnuma1
RUN apt-get update
# no ugprade?!?!
RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rocm/rootfs/ /
#######################################################################
FROM scratch AS rocm-dist
ARG ROCM
ARG AMDGPU
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
#######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0
ENV HSA_ENABLE_SDMA=0
COPY --from=rocm-dist / /
RUN ldconfig
#######################################################################
FROM rocm-prelim-hsa-override0 as rocm-prelim-hsa-override1
ARG HSA_OVERRIDE_GFX_VERSION
ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION
#######################################################################
FROM rocm-prelim-hsa-override$HSA_OVERRIDE as rocm-deps
# Request yolov8 download at startup
ENV DOWNLOAD_YOLOV8=1

View File

@ -0,0 +1,26 @@
cmake_minimum_required(VERSION 3.1)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
project(migraphx_py)
include_directories(/opt/rocm/include)
find_package(pybind11 REQUIRED)
pybind11_add_module(migraphx migraphx_py.cpp)
target_link_libraries(migraphx PRIVATE /opt/rocm/lib/libmigraphx.so /opt/rocm/lib/libmigraphx_tf.so /opt/rocm/lib/libmigraphx_onnx.so)
install(TARGETS migraphx
COMPONENT python
LIBRARY DESTINATION /opt/rocm/lib
)

View File

@ -0,0 +1,582 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <migraphx/program.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/json.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#ifdef HAVE_GPU
#include <migraphx/gpu/hip.hpp>
#endif
using half = half_float::half;
namespace py = pybind11;
#ifdef __clang__
#define MIGRAPHX_PUSH_UNUSED_WARNING \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
#define MIGRAPHX_POP_WARNING _Pragma("clang diagnostic pop")
#else
#define MIGRAPHX_PUSH_UNUSED_WARNING
#define MIGRAPHX_POP_WARNING
#endif
#define MIGRAPHX_PYBIND11_MODULE(...) \
MIGRAPHX_PUSH_UNUSED_WARNING \
PYBIND11_MODULE(__VA_ARGS__) \
MIGRAPHX_POP_WARNING
#define MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM(x, t) .value(#x, migraphx::shape::type_t::x)
namespace migraphx {
migraphx::value to_value(py::kwargs kwargs);
migraphx::value to_value(py::list lst);
template <class T, class F>
void visit_py(T x, F f)
{
if(py::isinstance<py::kwargs>(x))
{
f(to_value(x.template cast<py::kwargs>()));
}
else if(py::isinstance<py::list>(x))
{
f(to_value(x.template cast<py::list>()));
}
else if(py::isinstance<py::bool_>(x))
{
f(x.template cast<bool>());
}
else if(py::isinstance<py::int_>(x) or py::hasattr(x, "__index__"))
{
f(x.template cast<int>());
}
else if(py::isinstance<py::float_>(x))
{
f(x.template cast<float>());
}
else if(py::isinstance<py::str>(x))
{
f(x.template cast<std::string>());
}
else if(py::isinstance<migraphx::shape::dynamic_dimension>(x))
{
f(migraphx::to_value(x.template cast<migraphx::shape::dynamic_dimension>()));
}
else
{
MIGRAPHX_THROW("VISIT_PY: Unsupported data type!");
}
}
migraphx::value to_value(py::list lst)
{
migraphx::value v = migraphx::value::array{};
for(auto val : lst)
{
visit_py(val, [&](auto py_val) { v.push_back(py_val); });
}
return v;
}
migraphx::value to_value(py::kwargs kwargs)
{
migraphx::value v = migraphx::value::object{};
for(auto arg : kwargs)
{
auto&& key = py::str(arg.first);
auto&& val = arg.second;
visit_py(val, [&](auto py_val) { v[key] = py_val; });
}
return v;
}
} // namespace migraphx
namespace pybind11 {
namespace detail {
template <>
struct npy_format_descriptor<half>
{
static std::string format()
{
// following: https://docs.python.org/3/library/struct.html#format-characters
return "e";
}
static constexpr auto name() { return _("half"); }
};
} // namespace detail
} // namespace pybind11
template <class F>
void visit_type(const migraphx::shape& s, F f)
{
s.visit_type(f);
}
template <class T, class F>
void visit(const migraphx::raw_data<T>& x, F f)
{
x.visit(f);
}
template <class F>
void visit_types(F f)
{
migraphx::shape::visit_types(f);
}
template <class T>
py::buffer_info to_buffer_info(T& x)
{
migraphx::shape s = x.get_shape();
assert(s.type() != migraphx::shape::tuple_type);
if(s.dynamic())
MIGRAPHX_THROW("MIGRAPHX PYTHON: dynamic shape argument passed to to_buffer_info");
auto strides = s.strides();
std::transform(
strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); });
py::buffer_info b;
visit_type(s, [&](auto as) {
// migraphx use int8_t data to store bool type, we need to
// explicitly specify the data type as bool for python
if(s.type() == migraphx::shape::bool_type)
{
b = py::buffer_info(x.data(),
as.size(),
py::format_descriptor<bool>::format(),
s.ndim(),
s.lens(),
strides);
}
else
{
b = py::buffer_info(x.data(),
as.size(),
py::format_descriptor<decltype(as())>::format(),
s.ndim(),
s.lens(),
strides);
}
});
return b;
}
migraphx::shape to_shape(const py::buffer_info& info)
{
migraphx::shape::type_t t;
std::size_t n = 0;
visit_types([&](auto as) {
if(info.format == py::format_descriptor<decltype(as())>::format() or
(info.format == "l" and py::format_descriptor<decltype(as())>::format() == "q") or
(info.format == "L" and py::format_descriptor<decltype(as())>::format() == "Q"))
{
t = as.type_enum();
n = sizeof(as());
}
else if(info.format == "?" and py::format_descriptor<decltype(as())>::format() == "b")
{
t = migraphx::shape::bool_type;
n = sizeof(bool);
}
});
if(n == 0)
{
MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format);
}
auto strides = info.strides;
std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t {
return n > 0 ? i / n : 0;
});
// scalar support
if(info.shape.empty())
{
return migraphx::shape{t};
}
else
{
return migraphx::shape{t, info.shape, strides};
}
}
MIGRAPHX_PYBIND11_MODULE(migraphx, m)
{
py::class_<migraphx::shape> shape_cls(m, "shape");
shape_cls
.def(py::init([](py::kwargs kwargs) {
auto v = migraphx::to_value(kwargs);
auto t = migraphx::shape::parse_type(v.get("type", "float"));
if(v.contains("dyn_dims"))
{
auto dyn_dims =
migraphx::from_value<std::vector<migraphx::shape::dynamic_dimension>>(
v.at("dyn_dims"));
return migraphx::shape(t, dyn_dims);
}
auto lens = v.get<std::size_t>("lens", {1});
if(v.contains("strides"))
return migraphx::shape(t, lens, v.at("strides").to_vector<std::size_t>());
else
return migraphx::shape(t, lens);
}))
.def("type", &migraphx::shape::type)
.def("lens", &migraphx::shape::lens)
.def("strides", &migraphx::shape::strides)
.def("ndim", &migraphx::shape::ndim)
.def("elements", &migraphx::shape::elements)
.def("bytes", &migraphx::shape::bytes)
.def("type_string", &migraphx::shape::type_string)
.def("type_size", &migraphx::shape::type_size)
.def("dyn_dims", &migraphx::shape::dyn_dims)
.def("packed", &migraphx::shape::packed)
.def("transposed", &migraphx::shape::transposed)
.def("broadcasted", &migraphx::shape::broadcasted)
.def("standard", &migraphx::shape::standard)
.def("scalar", &migraphx::shape::scalar)
.def("dynamic", &migraphx::shape::dynamic)
.def("__eq__", std::equal_to<migraphx::shape>{})
.def("__ne__", std::not_equal_to<migraphx::shape>{})
.def("__repr__", [](const migraphx::shape& s) { return migraphx::to_string(s); });
py::enum_<migraphx::shape::type_t>(shape_cls, "type_t")
MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM);
py::class_<migraphx::shape::dynamic_dimension>(shape_cls, "dynamic_dimension")
.def(py::init<>())
.def(py::init<std::size_t, std::size_t>())
.def(py::init<std::size_t, std::size_t, std::set<std::size_t>>())
.def_readwrite("min", &migraphx::shape::dynamic_dimension::min)
.def_readwrite("max", &migraphx::shape::dynamic_dimension::max)
.def_readwrite("optimals", &migraphx::shape::dynamic_dimension::optimals)
.def("is_fixed", &migraphx::shape::dynamic_dimension::is_fixed);
py::class_<migraphx::argument>(m, "argument", py::buffer_protocol())
.def_buffer([](migraphx::argument& x) -> py::buffer_info { return to_buffer_info(x); })
.def(py::init([](py::buffer b) {
py::buffer_info info = b.request();
return migraphx::argument(to_shape(info), info.ptr);
}))
.def("get_shape", &migraphx::argument::get_shape)
.def("data_ptr",
[](migraphx::argument& x) { return reinterpret_cast<std::uintptr_t>(x.data()); })
.def("tolist",
[](migraphx::argument& x) {
py::list l{x.get_shape().elements()};
visit(x, [&](auto data) { l = py::cast(data.to_vector()); });
return l;
})
.def("__eq__", std::equal_to<migraphx::argument>{})
.def("__ne__", std::not_equal_to<migraphx::argument>{})
.def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); });
py::class_<migraphx::target>(m, "target");
py::class_<migraphx::instruction_ref>(m, "instruction_ref")
.def("shape", [](migraphx::instruction_ref i) { return i->get_shape(); })
.def("op", [](migraphx::instruction_ref i) { return i->get_operator(); });
py::class_<migraphx::module, std::unique_ptr<migraphx::module, py::nodelete>>(m, "module")
.def("print", [](const migraphx::module& mm) { std::cout << mm << std::endl; })
.def(
"add_instruction",
[](migraphx::module& mm,
const migraphx::operation& op,
std::vector<migraphx::instruction_ref>& args,
std::vector<migraphx::module*>& mod_args) {
return mm.add_instruction(op, args, mod_args);
},
py::arg("op"),
py::arg("args"),
py::arg("mod_args") = std::vector<migraphx::module*>{})
.def(
"add_literal",
[](migraphx::module& mm, py::buffer data) {
py::buffer_info info = data.request();
auto literal_shape = to_shape(info);
return mm.add_literal(literal_shape, reinterpret_cast<char*>(info.ptr));
},
py::arg("data"))
.def(
"add_parameter",
[](migraphx::module& mm, const std::string& name, const migraphx::shape shape) {
return mm.add_parameter(name, shape);
},
py::arg("name"),
py::arg("shape"))
.def(
"add_return",
[](migraphx::module& mm, std::vector<migraphx::instruction_ref>& args) {
return mm.add_return(args);
},
py::arg("args"))
.def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); });
py::class_<migraphx::program>(m, "program")
.def(py::init([]() { return migraphx::program(); }))
.def("get_parameter_names", &migraphx::program::get_parameter_names)
.def("get_parameter_shapes", &migraphx::program::get_parameter_shapes)
.def("get_output_shapes", &migraphx::program::get_output_shapes)
.def("is_compiled", &migraphx::program::is_compiled)
.def(
"compile",
[](migraphx::program& p,
const migraphx::target& t,
bool offload_copy,
bool fast_math,
bool exhaustive_tune) {
migraphx::compile_options options;
options.offload_copy = offload_copy;
options.fast_math = fast_math;
options.exhaustive_tune = exhaustive_tune;
p.compile(t, options);
},
py::arg("t"),
py::arg("offload_copy") = true,
py::arg("fast_math") = true,
py::arg("exhaustive_tune") = false)
.def("get_main_module", [](const migraphx::program& p) { return p.get_main_module(); })
.def(
"create_module",
[](migraphx::program& p, const std::string& name) { return p.create_module(name); },
py::arg("name"))
.def("run",
[](migraphx::program& p, py::dict params) {
migraphx::parameter_map pm;
for(auto x : params)
{
std::string key = x.first.cast<std::string>();
py::buffer b = x.second.cast<py::buffer>();
py::buffer_info info = b.request();
pm[key] = migraphx::argument(to_shape(info), info.ptr);
}
return p.eval(pm);
})
.def("run_async",
[](migraphx::program& p,
py::dict params,
std::uintptr_t stream,
std::string stream_name) {
migraphx::parameter_map pm;
for(auto x : params)
{
std::string key = x.first.cast<std::string>();
py::buffer b = x.second.cast<py::buffer>();
py::buffer_info info = b.request();
pm[key] = migraphx::argument(to_shape(info), info.ptr);
}
migraphx::execution_environment exec_env{
migraphx::any_ptr(reinterpret_cast<void*>(stream), stream_name), true};
return p.eval(pm, exec_env);
})
.def("sort", &migraphx::program::sort)
.def("print", [](const migraphx::program& p) { std::cout << p << std::endl; })
.def("__eq__", std::equal_to<migraphx::program>{})
.def("__ne__", std::not_equal_to<migraphx::program>{})
.def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); });
py::class_<migraphx::operation> op(m, "op");
op.def(py::init([](const std::string& name, py::kwargs kwargs) {
migraphx::value v = migraphx::value::object{};
if(kwargs)
{
v = migraphx::to_value(kwargs);
}
return migraphx::make_op(name, v);
}))
.def("name", &migraphx::operation::name);
py::enum_<migraphx::op::pooling_mode>(op, "pooling_mode")
.value("average", migraphx::op::pooling_mode::average)
.value("max", migraphx::op::pooling_mode::max)
.value("lpnorm", migraphx::op::pooling_mode::lpnorm);
py::enum_<migraphx::op::rnn_direction>(op, "rnn_direction")
.value("forward", migraphx::op::rnn_direction::forward)
.value("reverse", migraphx::op::rnn_direction::reverse)
.value("bidirectional", migraphx::op::rnn_direction::bidirectional);
m.def(
"argument_from_pointer",
[](const migraphx::shape shape, const int64_t address) {
return migraphx::argument(shape, reinterpret_cast<void*>(address));
},
py::arg("shape"),
py::arg("address"));
m.def(
"parse_tf",
[](const std::string& filename,
bool is_nhwc,
unsigned int batch_size,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::vector<std::string> output_names) {
return migraphx::parse_tf(
filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names});
},
"Parse tf protobuf (default format is nhwc)",
py::arg("filename"),
py::arg("is_nhwc") = true,
py::arg("batch_size") = 1,
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("output_names") = std::vector<std::string>());
m.def(
"parse_onnx",
[](const std::string& filename,
unsigned int default_dim_value,
migraphx::shape::dynamic_dimension default_dyn_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
map_dyn_input_dims,
bool skip_unknown_operators,
bool print_program_on_error,
int64_t max_loop_iterations) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.default_dyn_dim_value = default_dyn_dim_value;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
options.max_loop_iterations = max_loop_iterations;
return migraphx::parse_onnx(filename, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 0,
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("map_dyn_input_dims") =
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false,
py::arg("max_loop_iterations") = 10);
m.def(
"parse_onnx_buffer",
[](const std::string& onnx_buffer,
unsigned int default_dim_value,
migraphx::shape::dynamic_dimension default_dyn_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
map_dyn_input_dims,
bool skip_unknown_operators,
bool print_program_on_error) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.default_dyn_dim_value = default_dyn_dim_value;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
return migraphx::parse_onnx_buffer(onnx_buffer, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 0,
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("map_dyn_input_dims") =
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false);
m.def(
"load",
[](const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::load(name, options);
},
"Load MIGraphX program",
py::arg("filename"),
py::arg("format") = "msgpack");
m.def(
"save",
[](const migraphx::program& p, const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::save(p, name, options);
},
"Save MIGraphX program",
py::arg("p"),
py::arg("filename"),
py::arg("format") = "msgpack");
m.def("get_target", &migraphx::make_target);
m.def("create_argument", [](const migraphx::shape& s, const std::vector<double>& values) {
if(values.size() != s.elements())
MIGRAPHX_THROW("Values and shape elements do not match");
migraphx::argument a{s};
a.fill(values.begin(), values.end());
return a;
});
m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0);
m.def("fill_argument", &migraphx::fill_argument, py::arg("s"), py::arg("value"));
m.def("quantize_fp16",
&migraphx::quantize_fp16,
py::arg("prog"),
py::arg("ins_names") = std::vector<std::string>{"all"});
m.def("quantize_int8",
&migraphx::quantize_int8,
py::arg("prog"),
py::arg("t"),
py::arg("calibration") = std::vector<migraphx::parameter_map>{},
py::arg("ins_names") = std::vector<std::string>{"dot", "convolution"});
#ifdef HAVE_GPU
m.def("allocate_gpu", &migraphx::gpu::allocate_gpu, py::arg("s"), py::arg("host") = false);
m.def("to_gpu", &migraphx::gpu::to_gpu, py::arg("arg"), py::arg("host") = false);
m.def("from_gpu", &migraphx::gpu::from_gpu);
m.def("gpu_sync", [] { migraphx::gpu::gpu_sync(); });
#endif
#ifdef VERSION_INFO
m.attr("__version__") = VERSION_INFO;
#else
m.attr("__version__") = "dev";
#endif
}

3
docker/rocm/rocm-pin-600 Normal file
View File

@ -0,0 +1,3 @@
Package: *
Pin: release o=repo.radeon.com
Pin-Priority: 600

38
docker/rocm/rocm.hcl Normal file
View File

@ -0,0 +1,38 @@
variable "AMDGPU" {
default = "gfx900"
}
variable "ROCM" {
default = "5.7.3"
}
variable "HSA_OVERRIDE_GFX_VERSION" {
default = ""
}
variable "HSA_OVERRIDE" {
default = "1"
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "deps"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "rootfs"
}
target rocm {
dockerfile = "docker/rocm/Dockerfile"
contexts = {
deps = "target:deps",
rootfs = "target:rootfs"
}
platforms = ["linux/amd64"]
args = {
AMDGPU = AMDGPU,
ROCM = ROCM,
HSA_OVERRIDE_GFX_VERSION = HSA_OVERRIDE_GFX_VERSION,
HSA_OVERRIDE = HSA_OVERRIDE
}
}

1
docker/rocm/rocm.list Normal file
View File

@ -0,0 +1 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/5.7.3 focal main

17
docker/rocm/rocm.mk Normal file
View File

@ -0,0 +1,17 @@
BOARDS += rocm
# AMD/ROCm is chunky so we build couple of smaller images for specific chipsets
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
local-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm rocm
build-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
push-rocm: build-rocm
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm

View File

@ -0,0 +1,20 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Compile YoloV8 ONNX files into ROCm MIGraphX files
OVERRIDE=$(cd /opt/frigate && python3 -c 'import frigate.detectors.plugins.rocm as rocm; print(rocm.auto_override_gfx_version())')
if ! test -z "$OVERRIDE"; then
echo "Using HSA_OVERRIDE_GFX_VERSION=${OVERRIDE}"
export HSA_OVERRIDE_GFX_VERSION=$OVERRIDE
fi
for onnx in /config/model_cache/yolov8/*.onnx
do
mxr="${onnx%.onnx}.mxr"
if ! test -f $mxr; then
echo "processing $onnx into $mxr"
/opt/rocm/bin/migraphx-driver compile $onnx --optimize --gpu --enable-offload-copy --binary -o $mxr
fi
done

View File

@ -0,0 +1 @@
oneshot

View File

@ -0,0 +1 @@
/etc/s6-overlay/s6-rc.d/compile-rocm-models/run

View File

@ -1,6 +1,8 @@
# Birdseye # Birdseye
Birdseye allows a heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about. Birdseye allows a heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about.
## Birdseye Behavior
### Birdseye Modes ### Birdseye Modes
@ -34,6 +36,29 @@ cameras:
enabled: False enabled: False
``` ```
### Birdseye Inactivity
By default birdseye shows all cameras that have had the configured activity in the last 30 seconds, this can be configured:
```yaml
birdseye:
enabled: True
inactivity_threshold: 15
```
## Birdseye Layout
### Birdseye Dimensions
The resolution and aspect ratio of birdseye can be configured. Resolution will increase the quality but does not affect the layout. Changing the aspect ratio of birdseye does affect how cameras are laid out.
```yaml
birdseye:
enabled: True
width: 1280
height: 720
```
### Sorting cameras in the Birdseye view ### Sorting cameras in the Birdseye view
It is possible to override the order of cameras that are being shown in the Birdseye view. It is possible to override the order of cameras that are being shown in the Birdseye view.
@ -55,3 +80,27 @@ cameras:
``` ```
*Note*: Cameras are sorted by default using their name to ensure a constant view inside Birdseye. *Note*: Cameras are sorted by default using their name to ensure a constant view inside Birdseye.
### Birdseye Cameras
It is possible to limit the number of cameras shown on birdseye at one time. When this is enabled, birdseye will show the cameras with most recent activity. There is a cooldown to ensure that cameras do not switch too frequently.
For example, this can be configured to only show the most recently active camera.
```yaml
birdseye:
enabled: True
layout:
max_cameras: 1
```
### Birdseye Scaling
By default birdseye tries to fit 2 cameras in each row and then double in size until a suitable layout is found. The scaling can be configured with a value between 1.0 and 5.0 depending on use case.
```yaml
birdseye:
enabled: True
layout:
scaling_factor: 3.0
```

View File

@ -101,7 +101,7 @@ If available, recommended settings are:
According to [this discussion](https://github.com/blakeblackshear/frigate/issues/3235#issuecomment-1135876973), the http video streams seem to be the most reliable for Reolink. According to [this discussion](https://github.com/blakeblackshear/frigate/issues/3235#issuecomment-1135876973), the http video streams seem to be the most reliable for Reolink.
Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels. Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels.
The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras. The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras.
:::caution :::caution

View File

@ -13,8 +13,8 @@ Depending on your system, these parameters may not be compatible. More informati
## Raspberry Pi 3/4 ## Raspberry Pi 3/4
Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
**NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration.
```yaml ```yaml
# if you want to decode a h264 stream # if you want to decode a h264 stream
@ -28,16 +28,39 @@ ffmpeg:
:::note :::note
If running Frigate in docker, you either need to run in priviliged mode or be sure to map the /dev/video1x devices to Frigate If running Frigate in Docker, you either need to run in privileged mode or
map the `/dev/video*` devices to Frigate. With Docker compose add:
```yaml ```yaml
docker run -d \ services:
--name frigate \ frigate:
... ...
--device /dev/video10 \ devices:
ghcr.io/blakeblackshear/frigate:stable - /dev/video11:/dev/video11
``` ```
Or with `docker run`:
```bash
docker run -d \
--name frigate \
...
--device /dev/video11 \
ghcr.io/blakeblackshear/frigate:stable
```
`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check
by running the following and looking for `H264`:
```bash
for d in /dev/video*; do
echo -e "---\n$d"
v4l2-ctl --list-formats-ext -d $d
done
```
Or map in all the `/dev/video*` devices.
::: :::
## Intel-based CPUs ## Intel-based CPUs

View File

@ -25,7 +25,7 @@ cameras:
## VSCode Configuration Schema ## VSCode Configuration Schema
VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon. VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose port `5000` for the Web Interface when accessing the config from VSCode on another machine.
## Environment Variable Substitution ## Environment Variable Substitution

View File

@ -17,7 +17,7 @@ Before tuning motion it is important to understand the goal. In an optimal confi
## Create Motion Masks ## Create Motion Masks
First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md).
## Prepare For Testing ## Prepare For Testing
@ -37,7 +37,7 @@ Remember that motion detection is just used to determine when object detection s
### Threshold ### Threshold
The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. The threshold value dictates how much of a change in a pixels luminance is required to be considered motion.
```yaml ```yaml
# default threshold value # default threshold value
@ -69,7 +69,7 @@ motion:
Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera. Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera.
Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving.
### Improve Contrast ### Improve Contrast
@ -77,7 +77,7 @@ At this point if motion is working as desired there is no reason to continue wit
## Tuning Motion Detection During The Night ## Tuning Motion Detection During The Night
Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone.
However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection.

View File

@ -11,6 +11,12 @@ Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvi
The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`. The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`.
:::tip
If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) is often more efficient than using the CPU detector.
:::
The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.` The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.`
A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
@ -29,17 +35,17 @@ detectors:
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
## Edge-TPU Detector ## Edge TPU Detector
The EdgeTPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an EdgeTPU detector, set the `"type"` attribute to `"edgetpu"`. The Edge TPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
The EdgeTPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds. The Edge TPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds.
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
:::tip :::tip
See [common Edge-TPU troubleshooting steps](/troubleshooting/edgetpu) if the EdgeTPu is not detected. See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edge TPU is not detected.
::: :::
@ -99,13 +105,65 @@ detectors:
device: pci device: pci
``` ```
### Yolov8 On Coral
It is possible to use the [ultralytics yolov8](https://github.com/ultralytics/ultralytics) pretrained models with the Google Coral processors.
#### Setup
You need to download yolov8 model files suitable for the EdgeTPU. Frigate can do this automatically with the `DOWNLOAD_YOLOV8={0 | 1}` environment variable either from the command line
```bash
$ docker run ... -e DOWNLOAD_YOLOV8=1 \
...
```
or when using docker compose:
```yaml
services:
frigate:
...
environment:
DOWNLOAD_YOLOV8: "1"
```
When this variable is set then frigate will at startup fetch [yolov8.small.models.tar.gz](https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz) and extract it into the `/config/model_cache/yolov8/` directory.
The following files suitable for the EdgeTPU detector will be available under `/config/model_cache/yolov8/`:
- `yolov8[ns]_320x320_edgetpu.tflite` -- nano (n) and small (s) sized models that have been trained using the coco dataset (90 classes)
- `yolov8[ns]-oiv7_320x320_edgetpu.tflite` -- model files that have been trained using the google open images v7 dataset (601 classes)
- `labels.txt` and `labels-frigate.txt` -- full and aggregated labels for the coco dataset models
- `labels-oiv7.txt` and `labels-oiv7-frigate.txt` -- labels for the oiv7 dataset models
The aggregated label files contain renamed labels leaving only `person`, `vehicle`, `animal` and `bird` classes. The oiv7 trained models contain 601 classes and so are difficult to configure manually -- using aggregate labels is recommended.
Larger models (of `m` and `l` size and also at `640x640` resolution) can be found at https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ but have to be installed manually.
The oiv7 models have been trained using a larger google open images v7 dataset. They also contain a lot more detection classes (over 600) so using aggregate label files is recommended. The large number of classes leads to lower baseline for detection probability values and also for higher resource consumption (they are slower to evaluate).
#### Configuration
```yaml
model:
labelmap_path: /config/model_cache/yolov8/labels.txt
model_type: yolov8
detectors:
coral:
type: edgetpu
device: usb
model:
path: /config/model_cache/yolov8/yolov8n_320x320_edgetpu.tflite
```
## OpenVINO Detector ## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on Intel CPU, GPU and VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Working_with_devices.html). Other supported devices could be `AUTO`, `CPU`, `GPU`, `MYRIAD`, etc. If not specified, the default OpenVINO device will be selected by the `AUTO` plugin. The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Working_with_devices.html). Other supported devices could be `AUTO`, `CPU`, `GPU`, `MYRIAD`, etc. If not specified, the default OpenVINO device will be selected by the `AUTO` plugin.
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html)
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model. An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
@ -125,7 +183,7 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt labelmap_path: /openvino-model/coco_91cl_bkgr.txt
``` ```
This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/index.md#full-configuration-reference) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate: This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
```yaml ```yaml
detectors: detectors:
@ -146,7 +204,7 @@ model:
### Intel NCS2 VPU and Myriad X Setup ### Intel NCS2 VPU and Myriad X Setup
Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device. Intel produces a neural net inference acceleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for acceleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device.
```bash ```bash
sudo usermod -a -G users "$(whoami)" sudo usermod -a -G users "$(whoami)"
@ -176,7 +234,7 @@ volumes:
## NVidia TensorRT Detector ## NVidia TensorRT Detector
NVidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection.
### Minimum Hardware Support ### Minimum Hardware Support
@ -345,7 +403,7 @@ model: # required
Explanation for rknn specific options: Explanation for rknn specific options:
- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples: - **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit corresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples:
- `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value. - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value.
- `core_mask: 0b001` use only core0. - `core_mask: 0b001` use only core0.
- `core_mask: 0b011` use core0 and core1. - `core_mask: 0b011` use core0 and core1.
@ -397,3 +455,157 @@ detectors:
``` ```
::: :::
## AMD/ROCm GPU detector
### Setup
The `rocm` detector supports running [ultralytics](https://github.com/ultralytics/ultralytics) yolov8 models on AMD GPUs and iGPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
As the ROCm software stack is quite bloated, there are also smaller versions for specific GPU chipsets:
- `ghcr.io/blakeblackshear/frigate:stable-rocm-gfx900`
- `ghcr.io/blakeblackshear/frigate:stable-rocm-gfx1030`
- `ghcr.io/blakeblackshear/frigate:stable-rocm-gfx1100`
### Docker settings for GPU access
ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added.
When running docker directly the following flags should be added for device access:
```bash
$ docker run --device=/dev/kfd --device=/dev/dri \
...
```
When using docker compose:
```yaml
services:
frigate:
...
devices:
- /dev/dri
- /dev/kfd
...
```
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
### Docker settings for overriding the GPU chipset
Your GPU or iGPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working.
Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples.
For chipset specific frigate rocm builds this variable is already set automatically.
For the general rocm frigate build there is some automatic detection:
- gfx90c -> 9.0.0
- gfx1031 -> 10.3.0
- gfx1103 -> 11.0.0
If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as:
```bash
$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
...
```
When using docker compose:
```yaml
services:
frigate:
...
environment:
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
```
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties
- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below)
- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION")
- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value
- if things are not working check the frigate docker logs
#### Figuring out if AMD/ROCm is working and found your GPU
```bash
$ docker exec -it frigate /opt/rocm/bin/rocminfo
```
#### Figuring out your AMD GPU chipset version:
We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result:
```bash
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
```
### Yolov8 model download and available files
The ROCm specific frigate docker containers automatically download yolov8 files from https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ at startup --
they fetch [yolov8.small.models.tar.gz](https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz)
and uncompresses it into the `/config/model_cache/yolov8/` directory. After that the model files are compiled for your GPU chipset.
Both the download and compilation can take couple of minutes during which frigate will not be responsive. See docker logs for how it is progressing.
Automatic model download can be configured with the `DOWNLOAD_YOLOV8=1/0` environment variable either from the command line
```bash
$ docker run ... -e DOWNLOAD_YOLOV8=1 \
...
```
or when using docker compose:
```yaml
services:
frigate:
...
environment:
DOWNLOAD_YOLOV8: "1"
```
Download can be triggered also in regular frigate builds using that environment variable. The following files will be available under `/config/model_cache/yolov8/`:
- `yolov8[ns]_320x320.onnx` -- nano (n) and small (s) sized floating point model files usable by the `rocm` and `onnx` detectors that have been trained using the coco dataset (90 classes)
- `yolov8[ns]-oiv7_320x320.onnx` -- floating point model files usable by the `rocm` and `onnx` detectors that have been trained using the google open images v7 dataset (601 classes)
- `labels.txt` and `labels-frigate.txt` -- full and aggregated labels for the coco dataset models
- `labels-oiv7.txt` and `labels-oiv7-frigate.txt` -- labels for the oiv7 dataset models
The aggregated label files contain renamed labels leaving only `person`, `vehicle`, `animal` and `bird` classes. The oiv7 trained models contain 601 classes and so are difficult to configure manually -- using aggregate labels is recommended.
Larger models (of `m` and `l` size and also at `640x640` resolution) can be found at https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ but have to be installed manually.
The oiv7 models have been trained using a larger google open images v7 dataset. They also contain a lot more detection classes (over 600) so using aggregate label files is recommended. The large number of classes leads to lower baseline for detection probability values and also for higher resource consumption (they are slower to evaluate).
The `rocm` builds precompile the `onnx` files for your chipset into `mxr` files. If you change your hardware or GPU or have compiled the wrong versions you need to delete the cached `.mxr` files under `/config/model_cache/yolov8/`.
### Frigate configuration
You also need to modify the frigate configuration to specify the detector, labels and model file. Here is an example configuration running `yolov8s`:
```yaml
model:
labelmap_path: /config/model_cache/yolov8/labels.txt
model_type: yolov8
detectors:
rocm:
type: rocm
model:
path: /config/model_cache/yolov8/yolov8s_320x320.onnx
```
Other settings available for the rocm detector
- `conserve_cpu: True` -- run ROCm/HIP synchronization in blocking mode saving CPU (at small loss of latency and maximum throughput)
- `auto_override_gfx: True` -- enable or disable automatic gfx driver detection
### Expected performance
On an AMD Ryzen 3 5400U with integrated GPU (gfx90c) the yolov8n runs in around 9ms per image (about 110 detections per second) and 18ms (55 detections per second) for yolov8s (at 320x320 detector resolution).

View File

@ -10,7 +10,7 @@ Frigate includes the object models listed below from the Google Coral test data.
Please note: Please note:
- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. - `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.
- `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects. - `person` is the only tracked object by default. See the [full configuration reference](reference.md) for an example of expanding the list of tracked objects.
<ul> <ul>
{labels.split("\n").map((label) => ( {labels.split("\n").map((label) => (

View File

@ -36,7 +36,7 @@ record:
enabled: True enabled: True
retain: retain:
days: 3 days: 3
mode: all mode: motion
events: events:
retain: retain:
default: 30 default: 30
@ -161,6 +161,25 @@ Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only recor
The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress. The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress.
### Time-lapse export
When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS.
To configure the speed-up factor, the frame rate and further custom settings, the configuration parameter `timelapse_args` can be used. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS:
```yaml
record:
enabled: True
export:
timelapse_args: "-vf setpts=PTS/60 -r 25"
```
:::tip
When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large.
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
:::
## Syncing Recordings With Disk ## Syncing Recordings With Disk
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.

View File

@ -145,6 +145,14 @@ birdseye:
# motion - cameras are included if motion was detected in the last 30 seconds # motion - cameras are included if motion was detected in the last 30 seconds
# continuous - all cameras are included always # continuous - all cameras are included always
mode: objects mode: objects
# Optional: Threshold for camera activity to stop showing camera (default: shown below)
inactivity_threshold: 30
# Optional: Configure the birdseye layout
layout:
# Optional: Scaling factor for the layout calculator (default: shown below)
scaling_factor: 2.0
# Optional: Maximum number of cameras to show at one time, showing the most recent (default: show all cameras)
max_cameras: 1
# Optional: ffmpeg configuration # Optional: ffmpeg configuration
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets # More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets

View File

@ -155,6 +155,12 @@ cd web && npm install
cd web && npm run dev cd web && npm run dev
``` ```
##### 3a. Run the development server against a non-local instance
To run the development server against a non-local instance, you will need to
replace the `localhost` values in `vite.config.ts` with the IP address of the
non-local backend server.
#### 4. Making changes #### 4. Making changes
The Web UI is built using [Vite](https://vitejs.dev/), [Preact](https://preactjs.com), and [Tailwind CSS](https://tailwindcss.com). The Web UI is built using [Vite](https://vitejs.dev/), [Preact](https://preactjs.com), and [Tailwind CSS](https://tailwindcss.com).

View File

@ -40,14 +40,15 @@ The USB version is compatible with the widest variety of hardware and does not r
The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
A single Coral can handle many cameras and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed. A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed.
### OpenVino ### OpenVINO
The OpenVINO detector type is able to run on: The OpenVINO detector type is able to run on:
- 6th Gen Intel Platforms and newer that have an iGPU - 6th Gen Intel Platforms and newer that have an iGPU
- x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2) - x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2)
- Most modern AMD CPUs (though this is officially not supported by Intel)
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
@ -105,6 +106,12 @@ Frigate supports SBCs with the following Rockchip SoCs:
Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms. Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms.
#### AMD GPUs and iGPUs
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs and iGPUs.
An AMD Ryzen mini PC with AMD Ryzen 3 5400U iGPU takes about 9 ms to evaluate yolov8n.
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.

View File

@ -49,7 +49,7 @@ services:
:::caution :::caution
Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder. Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder.
::: :::
@ -98,9 +98,10 @@ services:
image: ghcr.io/blakeblackshear/frigate:stable image: ghcr.io/blakeblackshear/frigate:stable
shm_size: "64mb" # update for your cameras based on calculation above shm_size: "64mb" # update for your cameras based on calculation above
devices: devices:
- /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions - /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
- /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware - /dev/video11:/dev/video11 # For Raspberry Pi 4B
- /dev/dri/renderD128:/dev/dri/renderD128 # For intel hwaccel, needs to be updated for your hardware
volumes: volumes:
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config - /path/to/your/config:/config
@ -150,6 +151,10 @@ The community supported docker image tags for the current stable version are:
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5 - `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6 - `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
- `stable-rk` - Frigate build for SBCs with Rockchip SoC - `stable-rk` - Frigate build for SBCs with Rockchip SoC
- `stable-rocm` - Frigate build for [AMD GPUs and iGPUs](../configuration/object_detectors.md#amdrocm-gpu-detector), all drivers
- `stable-rocm-gfx900` - AMD gfx900 driver only
- `stable-rocm-gfx1030` - AMD gfx1030 driver only
- `stable-rocm-gfx1100` - AMD gfx1100 driver only
## Home Assistant Addon ## Home Assistant Addon

View File

@ -237,7 +237,7 @@ cameras:
More details on available detectors can be found [here](../configuration/object_detectors.md). More details on available detectors can be found [here](../configuration/object_detectors.md).
Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference). Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/reference.md).
### Step 5: Setup motion masks ### Step 5: Setup motion masks
@ -305,7 +305,7 @@ cameras:
If you don't have separate streams for detect and record, you would just add the record role to the list on the first input. If you don't have separate streams for detect and record, you would just add the record role to the list on the first input.
By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/index.md#full-configuration-reference). By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/reference.md).
#### Snapshots #### Snapshots
@ -325,7 +325,7 @@ cameras:
motion: ... motion: ...
``` ```
By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/index.md#full-configuration-reference). By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/reference.md).
### Step 7: Complete config ### Step 7: Complete config

View File

@ -3,7 +3,7 @@ id: ha_network_storage
title: Home Assistant network storage title: Home Assistant network storage
--- ---
As of Home Asisstant Core 2023.6, Network Mounted Storage is supported for addons. As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons.
## Setting Up Remote Storage For Frigate ## Setting Up Remote Storage For Frigate

View File

@ -87,7 +87,7 @@ There are many ways to authenticate a website but a straightforward approach is
## Nginx Reverse Proxy ## Nginx Reverse Proxy
This method shows a working example for subdomain type reverse proxy with SSL enabled. This method shows a working example for subdomain type reverse proxy with SSL enabled.
### Setup server and port to reverse proxy ### Setup server and port to reverse proxy
@ -123,7 +123,7 @@ This section points to your SSL files, the example below shows locations to a de
``` ```
### Setup reverse proxy settings ### Setup reverse proxy settings
The settings below enabled connection upgrade, sets up logging (optional) and proxies everything from the `/` context to the docker host and port specified earlier in the configuration The settings below enabled connection upgrade, sets up logging (optional) and proxies everything from the `/` context to the docker host and port specified earlier in the configuration

View File

@ -43,7 +43,7 @@ Accepts the following query string parameters:
Example parameters: Example parameters:
- `h=300`: resizes the image to 300 pixes tall - `h=300`: resizes the image to 300 pixels tall
### `GET /api/stats` ### `GET /api/stats`

View File

@ -7,10 +7,6 @@ title: FAQ
Frigate+ models are built by fine tuning a base model with the images you have annotated and verified. The base model is trained from scratch from a sampling of images across all Frigate+ user submissions and takes weeks of expensive GPU resources to train. If the models were built using your image uploads alone, you would need to provide tens of thousands of examples and it would take more than a week (and considerable cost) to train. Diversity helps the model generalize. Frigate+ models are built by fine tuning a base model with the images you have annotated and verified. The base model is trained from scratch from a sampling of images across all Frigate+ user submissions and takes weeks of expensive GPU resources to train. If the models were built using your image uploads alone, you would need to provide tens of thousands of examples and it would take more than a week (and considerable cost) to train. Diversity helps the model generalize.
### What is a training credit and how do I use them?
Essentially, `1 training credit = 1 trained model`. When you have uploaded, annotated, and verified additional images and you are ready to train your model, you will submit a model request which will use one credit. The model that is trained will utilize all of the verified images in your account. When new base models are available, it will require the use of a training credit to generate a new user model on the new base model.
### Are my video feeds sent to the cloud for analysis when using Frigate+ models? ### Are my video feeds sent to the cloud for analysis when using Frigate+ models?
No. Frigate+ models are a drop in replacement for the default model. All processing is performed locally as always. The only images sent to Frigate+ are the ones you specifically submit via the `Send to Frigate+` button or upload directly. No. Frigate+ models are a drop in replacement for the default model. All processing is performed locally as always. The only images sent to Frigate+ are the ones you specifically submit via the `Send to Frigate+` button or upload directly.
@ -25,4 +21,4 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co
### Can I keep using my Frigate+ models even if I do not renew my subscription? ### Can I keep using my Frigate+ models even if I do not renew my subscription?
Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained using the training credits that you purchased are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models. Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models.

View File

@ -13,7 +13,7 @@ For more detailed recommendations, you can refer to the docs on [improving your
## Step 2: Submit a model request ## Step 2: Submit a model request
Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the training credits that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours. Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
![Plus Models Page](/img/plus/plus-models.jpg) ![Plus Models Page](/img/plus/plus-models.jpg)
## Step 3: Set your model id in the config ## Step 3: Set your model id in the config

View File

@ -11,7 +11,7 @@ The baseline model isn't directly available after subscribing. This may change i
::: :::
With a subscription, and at each annual renewal, you will receive 12 model training credits. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional training credits. With a subscription, 12 model trainings per year are included. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional trainings.
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md). Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).

View File

@ -25,7 +25,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U
The USB coral has different IDs when it is uninitialized and initialized. The USB coral has different IDs when it is uninitialized and initialized.
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped. - When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed. - When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed.
## USB Coral Detection Appears to be Stuck ## USB Coral Detection Appears to be Stuck

View File

@ -56,4 +56,4 @@ SQLite does not work well on a network share, if the `/media` folder is mapped t
If MQTT isn't working in docker try using the IP of the device hosting the MQTT server instead of `localhost`, `127.0.0.1`, or `mosquitto.ix-mosquitto.svc.cluster.local`. If MQTT isn't working in docker try using the IP of the device hosting the MQTT server instead of `localhost`, `127.0.0.1`, or `mosquitto.ix-mosquitto.svc.cluster.local`.
This is because, by default, Frigate does not run in host mode so localhost points to the Frigate container and not the host device's network. This is because, by default, Frigate does not run in host mode so localhost points to the Frigate container and not the host device's network.

View File

@ -5,7 +5,7 @@ title: Troubleshooting Recordings
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest... ### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording, this will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk. This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
```yaml ```yaml
logger: logger:
@ -25,6 +25,41 @@ It is important to let this run until the errors begin to happen, to confirm tha
If the storage is too slow to keep up with the recordings then the maintainer will fall behind and purge the oldest recordings to ensure the cache does not fill up causing a crash. In this case it is important to diagnose why the copy times are slow. If the storage is too slow to keep up with the recordings then the maintainer will fall behind and purge the oldest recordings to ensure the cache does not fill up causing a crash. In this case it is important to diagnose why the copy times are slow.
##### Check RAM, swap, cache utilization, and disk utilization
If CPU, RAM, disk throughput, or bus I/O is insufficient, nothing inside frigate will help. It is important to review each aspect of available system resources.
On linux, some helpful tools/commands in diagnosing would be:
- docker stats
- htop
- iotop -o
- iostat -sxy --human 1 1
- vmstat 1
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
```yaml
version: "3.9"
services:
frigate:
...
mem_swappiness: 0
memswap_limit: <MAXSWAP>
deploy:
resources:
limits:
memory: <MAXRAM>
```
**Run command example**
```
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
```
NOTE: These are hard-limits for the container, be sure there is enough headroom above what is shown by `docker stats` for your container. It will immediately halt if it hits `<MAXRAM>`. In general, running all cache and tmp filespace in RAM is preferable to disk I/O where possible.
##### Check Storage Type ##### Check Storage Type
Mounting a network share is a popular option for storing Recordings, but this can lead to reduced copy times and cause problems. Some users have found that using `NFS` instead of `SMB` considerably decreased the copy times and fixed the issue. It is also important to ensure that the network connection between the device running Frigate and the network share is stable and fast. Mounting a network share is a popular option for storing Recordings, but this can lead to reduced copy times and cause problems. Some users have found that using `NFS` instead of `SMB` considerably decreased the copy times and fixed the issue. It is also important to ensure that the network connection between the device running Frigate and the network share is stable and fast.

View File

@ -17,11 +17,13 @@ from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
from frigate.comms.config_updater import ConfigPublisher
from frigate.comms.detections_updater import DetectionProxy
from frigate.comms.dispatcher import Communicator, Dispatcher from frigate.comms.dispatcher import Communicator, Dispatcher
from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.inter_process import InterProcessCommunicator
from frigate.comms.mqtt import MqttClient from frigate.comms.mqtt import MqttClient
from frigate.comms.ws import WebSocketClient from frigate.comms.ws import WebSocketClient
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import FrigateConfig
from frigate.const import ( from frigate.const import (
CACHE_DIR, CACHE_DIR,
CLIPS_DIR, CLIPS_DIR,
@ -43,6 +45,7 @@ from frigate.models import (
Recordings, Recordings,
RecordingsToDelete, RecordingsToDelete,
Regions, Regions,
ReviewSegment,
Timeline, Timeline,
) )
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
@ -53,10 +56,12 @@ from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
from frigate.record.cleanup import RecordingCleanup from frigate.record.cleanup import RecordingCleanup
from frigate.record.record import manage_recordings from frigate.record.record import manage_recordings
from frigate.stats import StatsEmitter, stats_init from frigate.review.review import manage_review_segments
from frigate.stats.emitter import StatsEmitter
from frigate.stats.util import stats_init
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor from frigate.timeline import TimelineProcessor
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes from frigate.types import CameraMetricsTypes, PTZMetricsTypes
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.version import VERSION from frigate.version import VERSION
from frigate.video import capture_camera, track_camera from frigate.video import capture_camera, track_camera
@ -75,7 +80,6 @@ class FrigateApp:
self.log_queue: Queue = mp.Queue() self.log_queue: Queue = mp.Queue()
self.plus_api = PlusApi() self.plus_api = PlusApi()
self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
self.ptz_metrics: dict[str, PTZMetricsTypes] = {} self.ptz_metrics: dict[str, PTZMetricsTypes] = {}
self.processes: dict[str, int] = {} self.processes: dict[str, int] = {}
self.region_grids: dict[str, list[list[dict[str, int]]]] = {} self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
@ -129,35 +133,6 @@ class FrigateApp:
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards # from mypy 0.981 onwards
"process_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item] "process_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"detection_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].detect.enabled,
),
"motion_enabled": mp.Value("i", True), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"improve_contrast_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].motion.improve_contrast,
),
"motion_threshold": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].motion.threshold,
),
"motion_contour_area": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].motion.contour_area,
),
"detection_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item] "detection_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards # from mypy 0.981 onwards
@ -171,25 +146,10 @@ class FrigateApp:
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards # from mypy 0.981 onwards
"frame_queue": mp.Queue(maxsize=2), "frame_queue": mp.Queue(maxsize=2),
"region_grid_queue": mp.Queue(maxsize=1),
"capture_process": None, "capture_process": None,
"process": None, "process": None,
"audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item] "audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item] "audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"birdseye_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].birdseye.enabled,
),
"birdseye_mode": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
BirdseyeModeEnum.get_index(
self.config.cameras[camera_name].birdseye.mode.value
),
),
} }
self.ptz_metrics[camera_name] = { self.ptz_metrics[camera_name] = {
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item] "ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
@ -221,20 +181,6 @@ class FrigateApp:
# from mypy 0.981 onwards # from mypy 0.981 onwards
} }
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set() self.ptz_metrics[camera_name]["ptz_motor_stopped"].set()
self.feature_metrics[camera_name] = {
"audio_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].audio.enabled,
),
"record_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].record.enabled,
),
}
def set_log_levels(self) -> None: def set_log_levels(self) -> None:
logging.getLogger().setLevel(self.config.logger.default.value.upper()) logging.getLogger().setLevel(self.config.logger.default.value.upper())
@ -251,31 +197,15 @@ class FrigateApp:
# Queues for clip processing # Queues for clip processing
self.event_queue: Queue = mp.Queue() self.event_queue: Queue = mp.Queue()
self.event_processed_queue: Queue = mp.Queue() self.event_processed_queue: Queue = mp.Queue()
self.video_output_queue: Queue = mp.Queue(
maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2
)
# Queue for cameras to push tracked objects to # Queue for cameras to push tracked objects to
self.detected_frames_queue: Queue = mp.Queue( self.detected_frames_queue: Queue = mp.Queue(
maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2 maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2
) )
# Queue for object recordings info
self.object_recordings_info_queue: Queue = mp.Queue()
# Queue for audio recordings info if enabled
self.audio_recordings_info_queue: Optional[Queue] = (
mp.Queue()
if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0
else None
)
# Queue for timeline events # Queue for timeline events
self.timeline_queue: Queue = mp.Queue() self.timeline_queue: Queue = mp.Queue()
# Queue for inter process communication
self.inter_process_queue: Queue = mp.Queue()
def init_database(self) -> None: def init_database(self) -> None:
def vacuum_db(db: SqliteExtDatabase) -> None: def vacuum_db(db: SqliteExtDatabase) -> None:
logger.info("Running database vacuum") logger.info("Running database vacuum")
@ -348,13 +278,7 @@ class FrigateApp:
recording_process = mp.Process( recording_process = mp.Process(
target=manage_recordings, target=manage_recordings,
name="recording_manager", name="recording_manager",
args=( args=(self.config,),
self.config,
self.inter_process_queue,
self.object_recordings_info_queue,
self.audio_recordings_info_queue,
self.feature_metrics,
),
) )
recording_process.daemon = True recording_process.daemon = True
self.recording_process = recording_process self.recording_process = recording_process
@ -362,6 +286,18 @@ class FrigateApp:
self.processes["recording"] = recording_process.pid or 0 self.processes["recording"] = recording_process.pid or 0
logger.info(f"Recording process started: {recording_process.pid}") logger.info(f"Recording process started: {recording_process.pid}")
def init_review_segment_manager(self) -> None:
review_segment_process = mp.Process(
target=manage_review_segments,
name="review_segment_manager",
args=(self.config,),
)
review_segment_process.daemon = True
self.review_segment_process = review_segment_process
review_segment_process.start()
self.processes["review_segment"] = review_segment_process.pid or 0
logger.info(f"Recording process started: {review_segment_process.pid}")
def bind_database(self) -> None: def bind_database(self) -> None:
"""Bind db to the main process.""" """Bind db to the main process."""
# NOTE: all db accessing processes need to be created before the db can be bound to the main process # NOTE: all db accessing processes need to be created before the db can be bound to the main process
@ -376,34 +312,37 @@ class FrigateApp:
60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) 60, 10 * len([c for c in self.config.cameras.values() if c.enabled])
), ),
) )
models = [Event, Recordings, RecordingsToDelete, Previews, Regions, Timeline] models = [
Event,
Previews,
Recordings,
RecordingsToDelete,
Regions,
ReviewSegment,
Timeline,
]
self.db.bind(models) self.db.bind(models)
def init_stats(self) -> None:
self.stats_tracking = stats_init(
self.config, self.camera_metrics, self.detectors, self.processes
)
def init_external_event_processor(self) -> None: def init_external_event_processor(self) -> None:
self.external_event_processor = ExternalEventProcessor( self.external_event_processor = ExternalEventProcessor(
self.config, self.event_queue self.config, self.event_queue
) )
def init_inter_process_communicator(self) -> None: def init_inter_process_communicator(self) -> None:
self.inter_process_communicator = InterProcessCommunicator( self.inter_process_communicator = InterProcessCommunicator()
self.inter_process_queue self.inter_config_updater = ConfigPublisher()
) self.inter_detection_proxy = DetectionProxy()
def init_web_server(self) -> None: def init_web_server(self) -> None:
self.flask_app = create_app( self.flask_app = create_app(
self.config, self.config,
self.db, self.db,
self.stats_tracking,
self.detected_frames_processor, self.detected_frames_processor,
self.storage_maintainer, self.storage_maintainer,
self.onvif_controller, self.onvif_controller,
self.external_event_processor, self.external_event_processor,
self.plus_api, self.plus_api,
self.stats_emitter,
) )
def init_onvif(self) -> None: def init_onvif(self) -> None:
@ -420,9 +359,8 @@ class FrigateApp:
self.dispatcher = Dispatcher( self.dispatcher = Dispatcher(
self.config, self.config,
self.inter_config_updater,
self.onvif_controller, self.onvif_controller,
self.camera_metrics,
self.feature_metrics,
self.ptz_metrics, self.ptz_metrics,
comms, comms,
) )
@ -481,8 +419,6 @@ class FrigateApp:
self.detected_frames_queue, self.detected_frames_queue,
self.event_queue, self.event_queue,
self.event_processed_queue, self.event_processed_queue,
self.video_output_queue,
self.object_recordings_info_queue,
self.ptz_autotracker_thread, self.ptz_autotracker_thread,
self.stop_event, self.stop_event,
) )
@ -492,12 +428,7 @@ class FrigateApp:
output_processor = mp.Process( output_processor = mp.Process(
target=output_frames, target=output_frames,
name="output_processor", name="output_processor",
args=( args=(self.config,),
self.config,
self.video_output_queue,
self.inter_process_queue,
self.camera_metrics,
),
) )
output_processor.daemon = True output_processor.daemon = True
self.output_processor = output_processor self.output_processor = output_processor
@ -534,7 +465,6 @@ class FrigateApp:
self.detection_queue, self.detection_queue,
self.detection_out_events[name], self.detection_out_events[name],
self.detected_frames_queue, self.detected_frames_queue,
self.inter_process_queue,
self.camera_metrics[name], self.camera_metrics[name],
self.ptz_metrics[name], self.ptz_metrics[name],
self.region_grids[name], self.region_grids[name],
@ -568,10 +498,7 @@ class FrigateApp:
name="audio_capture", name="audio_capture",
args=( args=(
self.config, self.config,
self.audio_recordings_info_queue,
self.camera_metrics, self.camera_metrics,
self.feature_metrics,
self.inter_process_communicator,
), ),
) )
audio_process.daemon = True audio_process.daemon = True
@ -611,8 +538,9 @@ class FrigateApp:
def start_stats_emitter(self) -> None: def start_stats_emitter(self) -> None:
self.stats_emitter = StatsEmitter( self.stats_emitter = StatsEmitter(
self.config, self.config,
self.stats_tracking, stats_init(
self.dispatcher, self.config, self.camera_metrics, self.detectors, self.processes
),
self.stop_event, self.stop_event,
) )
self.stats_emitter.start() self.stats_emitter.start()
@ -647,6 +575,25 @@ class FrigateApp:
self.init_logger() self.init_logger()
logger.info(f"Starting Frigate ({VERSION})") logger.info(f"Starting Frigate ({VERSION})")
if not os.environ.get("I_PROMISE_I_WONT_MAKE_AN_ISSUE_ON_GITHUB"):
print(
"**********************************************************************************"
)
print(
"**********************************************************************************"
)
print("Frigate 0.14 UNSTABLE")
print("This build is not for public use. Please use Frigate stable.")
print("Unstable/experimental builds are not enabled, Frigate is exiting.")
print(
"**********************************************************************************"
)
print(
"**********************************************************************************"
)
sys.exit(1)
try: try:
self.ensure_dirs() self.ensure_dirs()
try: try:
@ -680,6 +627,7 @@ class FrigateApp:
self.init_database() self.init_database()
self.init_onvif() self.init_onvif()
self.init_recording_manager() self.init_recording_manager()
self.init_review_segment_manager()
self.init_go2rtc() self.init_go2rtc()
self.bind_database() self.bind_database()
self.init_inter_process_communicator() self.init_inter_process_communicator()
@ -697,14 +645,13 @@ class FrigateApp:
self.start_camera_capture_processes() self.start_camera_capture_processes()
self.start_audio_processors() self.start_audio_processors()
self.start_storage_maintainer() self.start_storage_maintainer()
self.init_stats()
self.init_external_event_processor() self.init_external_event_processor()
self.start_stats_emitter()
self.init_web_server() self.init_web_server()
self.start_timeline_processor() self.start_timeline_processor()
self.start_event_processor() self.start_event_processor()
self.start_event_cleanup() self.start_event_cleanup()
self.start_record_cleanup() self.start_record_cleanup()
self.start_stats_emitter()
self.start_watchdog() self.start_watchdog()
self.check_shm() self.check_shm()
@ -753,15 +700,16 @@ class FrigateApp:
for queue in [ for queue in [
self.event_queue, self.event_queue,
self.event_processed_queue, self.event_processed_queue,
self.video_output_queue,
self.detected_frames_queue, self.detected_frames_queue,
self.object_recordings_info_queue,
self.audio_recordings_info_queue,
self.log_queue, self.log_queue,
self.inter_process_queue,
]: ]:
if queue is not None: if queue is not None:
while not queue.empty(): while not queue.empty():
queue.get_nowait() queue.get_nowait()
queue.close() queue.close()
queue.join_thread() queue.join_thread()
# Stop Communicators
self.inter_process_communicator.stop()
self.inter_config_updater.stop()
self.inter_detection_proxy.stop()

View File

@ -0,0 +1,51 @@
"""Facilitates communication between processes."""
import multiprocessing as mp
from multiprocessing.synchronize import Event as MpEvent
from typing import Optional
import zmq
SOCKET_PUB_SUB = "ipc:///tmp/cache/config"
class ConfigPublisher:
"""Publishes config changes to different processes."""
def __init__(self) -> None:
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind(SOCKET_PUB_SUB)
self.stop_event: MpEvent = mp.Event()
def publish(self, topic: str, payload: any) -> None:
"""There is no communication back to the processes."""
self.socket.send_string(topic, flags=zmq.SNDMORE)
self.socket.send_pyobj(payload)
def stop(self) -> None:
self.stop_event.set()
self.socket.close()
self.context.destroy()
class ConfigSubscriber:
"""Simplifies receiving an updated config."""
def __init__(self, topic: str) -> None:
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic)
self.socket.connect(SOCKET_PUB_SUB)
def check_for_update(self) -> Optional[tuple[str, any]]:
"""Returns updated config or None if no update."""
try:
topic = self.socket.recv_string(flags=zmq.NOBLOCK)
return (topic, self.socket.recv_pyobj())
except zmq.ZMQError:
return (None, None)
def stop(self) -> None:
self.socket.close()
self.context.destroy()

View File

@ -0,0 +1,102 @@
"""Facilitates communication between processes."""
import threading
from enum import Enum
from typing import Optional
import zmq
SOCKET_CONTROL = "inproc://control.detections_updater"
SOCKET_PUB = "ipc:///tmp/cache/detect_pub"
SOCKET_SUB = "ipc:///tmp/cache/detect_sun"
class DetectionTypeEnum(str, Enum):
all = ""
video = "video"
audio = "audio"
class DetectionProxyRunner(threading.Thread):
def __init__(self, context: zmq.Context[zmq.Socket]) -> None:
threading.Thread.__init__(self)
self.name = "detection_proxy"
self.context = context
def run(self) -> None:
"""Run the proxy."""
control = self.context.socket(zmq.SUB)
control.connect(SOCKET_CONTROL)
control.setsockopt_string(zmq.SUBSCRIBE, "")
incoming = self.context.socket(zmq.XSUB)
incoming.bind(SOCKET_PUB)
outgoing = self.context.socket(zmq.XPUB)
outgoing.bind(SOCKET_SUB)
zmq.proxy_steerable(
incoming, outgoing, None, control
) # blocking, will unblock terminate message is received
incoming.close()
outgoing.close()
class DetectionProxy:
"""Proxies video and audio detections."""
def __init__(self) -> None:
self.context = zmq.Context()
self.control = self.context.socket(zmq.PUB)
self.control.bind(SOCKET_CONTROL)
self.runner = DetectionProxyRunner(self.context)
self.runner.start()
def stop(self) -> None:
self.control.send_string("TERMINATE") # tell the proxy to stop
self.runner.join()
self.context.destroy()
class DetectionPublisher:
"""Simplifies receiving video and audio detections."""
def __init__(self, topic: DetectionTypeEnum) -> None:
self.topic = topic
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.connect(SOCKET_PUB)
def send_data(self, payload: any) -> None:
"""Publish detection."""
self.socket.send_string(self.topic.value, flags=zmq.SNDMORE)
self.socket.send_pyobj(payload)
def stop(self) -> None:
self.socket.close()
self.context.destroy()
class DetectionSubscriber:
"""Simplifies receiving video and audio detections."""
def __init__(self, topic: DetectionTypeEnum) -> None:
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic.value)
self.socket.connect(SOCKET_SUB)
def get_data(self, timeout: float = None) -> Optional[tuple[str, any]]:
"""Returns detections or None if no update."""
try:
has_update, _, _ = zmq.select([self.socket], [], [], timeout)
if has_update:
topic = DetectionTypeEnum[self.socket.recv_string(flags=zmq.NOBLOCK)]
return (topic, self.socket.recv_pyobj())
except zmq.ZMQError:
pass
return (None, None)
def stop(self) -> None:
self.socket.close()
self.context.destroy()

View File

@ -2,13 +2,19 @@
import logging import logging
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Any, Callable from typing import Any, Callable, Optional
from frigate.comms.config_updater import ConfigPublisher
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import INSERT_MANY_RECORDINGS, INSERT_PREVIEW, REQUEST_REGION_GRID from frigate.const import (
from frigate.models import Previews, Recordings INSERT_MANY_RECORDINGS,
INSERT_PREVIEW,
REQUEST_REGION_GRID,
UPSERT_REVIEW_SEGMENT,
)
from frigate.models import Previews, Recordings, ReviewSegment
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes from frigate.types import PTZMetricsTypes
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.util.services import restart_frigate from frigate.util.services import restart_frigate
@ -40,16 +46,14 @@ class Dispatcher:
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
config_updater: ConfigPublisher,
onvif: OnvifController, onvif: OnvifController,
camera_metrics: dict[str, CameraMetricsTypes],
feature_metrics: dict[str, FeatureMetricsTypes],
ptz_metrics: dict[str, PTZMetricsTypes], ptz_metrics: dict[str, PTZMetricsTypes],
communicators: list[Communicator], communicators: list[Communicator],
) -> None: ) -> None:
self.config = config self.config = config
self.config_updater = config_updater
self.onvif = onvif self.onvif = onvif
self.camera_metrics = camera_metrics
self.feature_metrics = feature_metrics
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
self.comms = communicators self.comms = communicators
@ -70,7 +74,7 @@ class Dispatcher:
for comm in self.comms: for comm in self.comms:
comm.subscribe(self._receive) comm.subscribe(self._receive)
def _receive(self, topic: str, payload: str) -> None: def _receive(self, topic: str, payload: str) -> Optional[Any]:
"""Handle receiving of payload from communicators.""" """Handle receiving of payload from communicators."""
if topic.endswith("set"): if topic.endswith("set"):
try: try:
@ -95,15 +99,23 @@ class Dispatcher:
Recordings.insert_many(payload).execute() Recordings.insert_many(payload).execute()
elif topic == REQUEST_REGION_GRID: elif topic == REQUEST_REGION_GRID:
camera = payload camera = payload
self.camera_metrics[camera]["region_grid_queue"].put( grid = get_camera_regions_grid(
get_camera_regions_grid( camera,
camera, self.config.cameras[camera].detect,
self.config.cameras[camera].detect, max(self.config.model.width, self.config.model.height),
max(self.config.model.width, self.config.model.height),
)
) )
return grid
elif topic == INSERT_PREVIEW: elif topic == INSERT_PREVIEW:
Previews.insert(payload).execute() Previews.insert(payload).execute()
elif topic == UPSERT_REVIEW_SEGMENT:
(
ReviewSegment.insert(payload)
.on_conflict(
conflict_target=[ReviewSegment.id],
update=payload,
)
.execute()
)
else: else:
self.publish(topic, payload, retain=False) self.publish(topic, payload, retain=False)
@ -119,44 +131,51 @@ class Dispatcher:
def _on_detect_command(self, camera_name: str, payload: str) -> None: def _on_detect_command(self, camera_name: str, payload: str) -> None:
"""Callback for detect topic.""" """Callback for detect topic."""
detect_settings = self.config.cameras[camera_name].detect detect_settings = self.config.cameras[camera_name].detect
motion_settings = self.config.cameras[camera_name].motion
if payload == "ON": if payload == "ON":
if not self.camera_metrics[camera_name]["detection_enabled"].value: if not detect_settings.enabled:
logger.info(f"Turning on detection for {camera_name}") logger.info(f"Turning on detection for {camera_name}")
self.camera_metrics[camera_name]["detection_enabled"].value = True
detect_settings.enabled = True detect_settings.enabled = True
if not self.camera_metrics[camera_name]["motion_enabled"].value: if not motion_settings.enabled:
logger.info( logger.info(
f"Turning on motion for {camera_name} due to detection being enabled." f"Turning on motion for {camera_name} due to detection being enabled."
) )
self.camera_metrics[camera_name]["motion_enabled"].value = True motion_settings.enabled = True
self.config_updater.publish(
f"config/motion/{camera_name}", motion_settings
)
self.publish(f"{camera_name}/motion/state", payload, retain=True) self.publish(f"{camera_name}/motion/state", payload, retain=True)
elif payload == "OFF": elif payload == "OFF":
if self.camera_metrics[camera_name]["detection_enabled"].value: if detect_settings.enabled:
logger.info(f"Turning off detection for {camera_name}") logger.info(f"Turning off detection for {camera_name}")
self.camera_metrics[camera_name]["detection_enabled"].value = False
detect_settings.enabled = False detect_settings.enabled = False
self.config_updater.publish(f"config/detect/{camera_name}", detect_settings)
self.publish(f"{camera_name}/detect/state", payload, retain=True) self.publish(f"{camera_name}/detect/state", payload, retain=True)
def _on_motion_command(self, camera_name: str, payload: str) -> None: def _on_motion_command(self, camera_name: str, payload: str) -> None:
"""Callback for motion topic.""" """Callback for motion topic."""
detect_settings = self.config.cameras[camera_name].detect
motion_settings = self.config.cameras[camera_name].motion
if payload == "ON": if payload == "ON":
if not self.camera_metrics[camera_name]["motion_enabled"].value: if not motion_settings.enabled:
logger.info(f"Turning on motion for {camera_name}") logger.info(f"Turning on motion for {camera_name}")
self.camera_metrics[camera_name]["motion_enabled"].value = True motion_settings.enabled = True
elif payload == "OFF": elif payload == "OFF":
if self.camera_metrics[camera_name]["detection_enabled"].value: if detect_settings.enabled:
logger.error( logger.error(
"Turning off motion is not allowed when detection is enabled." "Turning off motion is not allowed when detection is enabled."
) )
return return
if self.camera_metrics[camera_name]["motion_enabled"].value: if motion_settings.enabled:
logger.info(f"Turning off motion for {camera_name}") logger.info(f"Turning off motion for {camera_name}")
self.camera_metrics[camera_name]["motion_enabled"].value = False motion_settings.enabled = False
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
self.publish(f"{camera_name}/motion/state", payload, retain=True) self.publish(f"{camera_name}/motion/state", payload, retain=True)
def _on_motion_improve_contrast_command( def _on_motion_improve_contrast_command(
@ -166,20 +185,15 @@ class Dispatcher:
motion_settings = self.config.cameras[camera_name].motion motion_settings = self.config.cameras[camera_name].motion
if payload == "ON": if payload == "ON":
if not self.camera_metrics[camera_name]["improve_contrast_enabled"].value: if not motion_settings.improve_contrast:
logger.info(f"Turning on improve contrast for {camera_name}") logger.info(f"Turning on improve contrast for {camera_name}")
self.camera_metrics[camera_name][
"improve_contrast_enabled"
].value = True
motion_settings.improve_contrast = True # type: ignore[union-attr] motion_settings.improve_contrast = True # type: ignore[union-attr]
elif payload == "OFF": elif payload == "OFF":
if self.camera_metrics[camera_name]["improve_contrast_enabled"].value: if motion_settings.improve_contrast:
logger.info(f"Turning off improve contrast for {camera_name}") logger.info(f"Turning off improve contrast for {camera_name}")
self.camera_metrics[camera_name][
"improve_contrast_enabled"
].value = False
motion_settings.improve_contrast = False # type: ignore[union-attr] motion_settings.improve_contrast = False # type: ignore[union-attr]
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True) self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None: def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None:
@ -218,8 +232,8 @@ class Dispatcher:
motion_settings = self.config.cameras[camera_name].motion motion_settings = self.config.cameras[camera_name].motion
logger.info(f"Setting motion contour area for {camera_name}: {payload}") logger.info(f"Setting motion contour area for {camera_name}: {payload}")
self.camera_metrics[camera_name]["motion_contour_area"].value = payload
motion_settings.contour_area = payload # type: ignore[union-attr] motion_settings.contour_area = payload # type: ignore[union-attr]
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True) self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True)
def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None: def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None:
@ -232,8 +246,8 @@ class Dispatcher:
motion_settings = self.config.cameras[camera_name].motion motion_settings = self.config.cameras[camera_name].motion
logger.info(f"Setting motion threshold for {camera_name}: {payload}") logger.info(f"Setting motion threshold for {camera_name}: {payload}")
self.camera_metrics[camera_name]["motion_threshold"].value = payload
motion_settings.threshold = payload # type: ignore[union-attr] motion_settings.threshold = payload # type: ignore[union-attr]
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True) self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
def _on_audio_command(self, camera_name: str, payload: str) -> None: def _on_audio_command(self, camera_name: str, payload: str) -> None:
@ -250,13 +264,12 @@ class Dispatcher:
if not audio_settings.enabled: if not audio_settings.enabled:
logger.info(f"Turning on audio detection for {camera_name}") logger.info(f"Turning on audio detection for {camera_name}")
audio_settings.enabled = True audio_settings.enabled = True
self.feature_metrics[camera_name]["audio_enabled"].value = True
elif payload == "OFF": elif payload == "OFF":
if self.feature_metrics[camera_name]["audio_enabled"].value: if audio_settings.enabled:
logger.info(f"Turning off audio detection for {camera_name}") logger.info(f"Turning off audio detection for {camera_name}")
audio_settings.enabled = False audio_settings.enabled = False
self.feature_metrics[camera_name]["audio_enabled"].value = False
self.config_updater.publish(f"config/audio/{camera_name}", audio_settings)
self.publish(f"{camera_name}/audio/state", payload, retain=True) self.publish(f"{camera_name}/audio/state", payload, retain=True)
def _on_recordings_command(self, camera_name: str, payload: str) -> None: def _on_recordings_command(self, camera_name: str, payload: str) -> None:
@ -273,13 +286,12 @@ class Dispatcher:
if not record_settings.enabled: if not record_settings.enabled:
logger.info(f"Turning on recordings for {camera_name}") logger.info(f"Turning on recordings for {camera_name}")
record_settings.enabled = True record_settings.enabled = True
self.feature_metrics[camera_name]["record_enabled"].value = True
elif payload == "OFF": elif payload == "OFF":
if self.feature_metrics[camera_name]["record_enabled"].value: if record_settings.enabled:
logger.info(f"Turning off recordings for {camera_name}") logger.info(f"Turning off recordings for {camera_name}")
record_settings.enabled = False record_settings.enabled = False
self.feature_metrics[camera_name]["record_enabled"].value = False
self.config_updater.publish(f"config/record/{camera_name}", record_settings)
self.publish(f"{camera_name}/recordings/state", payload, retain=True) self.publish(f"{camera_name}/recordings/state", payload, retain=True)
def _on_snapshots_command(self, camera_name: str, payload: str) -> None: def _on_snapshots_command(self, camera_name: str, payload: str) -> None:
@ -317,17 +329,16 @@ class Dispatcher:
birdseye_settings = self.config.cameras[camera_name].birdseye birdseye_settings = self.config.cameras[camera_name].birdseye
if payload == "ON": if payload == "ON":
if not self.camera_metrics[camera_name]["birdseye_enabled"].value: if not birdseye_settings.enabled:
logger.info(f"Turning on birdseye for {camera_name}") logger.info(f"Turning on birdseye for {camera_name}")
self.camera_metrics[camera_name]["birdseye_enabled"].value = True
birdseye_settings.enabled = True birdseye_settings.enabled = True
elif payload == "OFF": elif payload == "OFF":
if self.camera_metrics[camera_name]["birdseye_enabled"].value: if birdseye_settings.enabled:
logger.info(f"Turning off birdseye for {camera_name}") logger.info(f"Turning off birdseye for {camera_name}")
self.camera_metrics[camera_name]["birdseye_enabled"].value = False
birdseye_settings.enabled = False birdseye_settings.enabled = False
self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings)
self.publish(f"{camera_name}/birdseye/state", payload, retain=True) self.publish(f"{camera_name}/birdseye/state", payload, retain=True)
def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None: def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None:
@ -337,17 +348,16 @@ class Dispatcher:
logger.info(f"Invalid birdseye_mode command: {payload}") logger.info(f"Invalid birdseye_mode command: {payload}")
return return
birdseye_config = self.config.cameras[camera_name].birdseye birdseye_settings = self.config.cameras[camera_name].birdseye
if not birdseye_config.enabled:
if not birdseye_settings.enabled:
logger.info(f"Birdseye mode not enabled for {camera_name}") logger.info(f"Birdseye mode not enabled for {camera_name}")
return return
new_birdseye_mode = BirdseyeModeEnum(payload.lower()) birdseye_settings.mode = BirdseyeModeEnum(payload.lower())
logger.info(f"Setting birdseye mode for {camera_name} to {new_birdseye_mode}") logger.info(
f"Setting birdseye mode for {camera_name} to {birdseye_settings.mode}"
# update the metric (need the mode converted to an int) )
self.camera_metrics[camera_name][
"birdseye_mode"
].value = BirdseyeModeEnum.get_index(new_birdseye_mode)
self.config_updater.publish(f"config/birdseye/{camera_name}", birdseye_settings)
self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True) self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True)

View File

@ -1,16 +1,22 @@
"""Facilitates communication between processes."""
import multiprocessing as mp import multiprocessing as mp
import queue
import threading import threading
from multiprocessing import Queue
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from typing import Callable from typing import Callable
import zmq
from frigate.comms.dispatcher import Communicator from frigate.comms.dispatcher import Communicator
SOCKET_REP_REQ = "ipc:///tmp/cache/comms"
class InterProcessCommunicator(Communicator): class InterProcessCommunicator(Communicator):
def __init__(self, queue: Queue) -> None: def __init__(self) -> None:
self.queue = queue self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind(SOCKET_REP_REQ)
self.stop_event: MpEvent = mp.Event() self.stop_event: MpEvent = mp.Event()
def publish(self, topic: str, payload: str, retain: bool) -> None: def publish(self, topic: str, payload: str, retain: bool) -> None:
@ -24,16 +30,44 @@ class InterProcessCommunicator(Communicator):
def read(self) -> None: def read(self) -> None:
while not self.stop_event.is_set(): while not self.stop_event.is_set():
try: while True: # load all messages that are queued
( has_message, _, _ = zmq.select([self.socket], [], [], 1)
topic,
value,
) = self.queue.get(True, 1)
except queue.Empty:
continue
self._dispatcher(topic, value) if not has_message:
break
try:
(topic, value) = self.socket.recv_pyobj(flags=zmq.NOBLOCK)
response = self._dispatcher(topic, value)
if response is not None:
self.socket.send_pyobj(response)
else:
self.socket.send_pyobj([])
except zmq.ZMQError:
break
def stop(self) -> None: def stop(self) -> None:
self.stop_event.set() self.stop_event.set()
self.reader_thread.join() self.reader_thread.join()
self.socket.close()
self.context.destroy()
class InterProcessRequestor:
"""Simplifies sending data to InterProcessCommunicator and getting a reply."""
def __init__(self) -> None:
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect(SOCKET_REP_REQ)
def send_data(self, topic: str, data: any) -> any:
"""Sends data and then waits for reply."""
self.socket.send_pyobj((topic, data))
return self.socket.recv_pyobj()
def stop(self) -> None:
self.socket.close()
self.context.destroy()

View File

@ -3,6 +3,7 @@ import threading
from typing import Any, Callable from typing import Any, Callable
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
from paho.mqtt.enums import CallbackAPIVersion
from frigate.comms.dispatcher import Communicator from frigate.comms.dispatcher import Communicator
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
@ -96,9 +97,11 @@ class MqttClient(Communicator): # type: ignore[misc]
) )
self.publish( self.publish(
f"{camera_name}/birdseye_mode/state", f"{camera_name}/birdseye_mode/state",
camera.birdseye.mode.value.upper() (
if camera.birdseye.enabled camera.birdseye.mode.value.upper()
else "OFF", if camera.birdseye.enabled
else "OFF"
),
retain=True, retain=True,
) )
@ -117,25 +120,26 @@ class MqttClient(Communicator): # type: ignore[misc]
client: mqtt.Client, client: mqtt.Client,
userdata: Any, userdata: Any,
flags: Any, flags: Any,
rc: mqtt.ReasonCodes, reason_code: mqtt.ReasonCode,
properties: Any,
) -> None: ) -> None:
"""Mqtt connection callback.""" """Mqtt connection callback."""
threading.current_thread().name = "mqtt" threading.current_thread().name = "mqtt"
if rc != 0: if reason_code != 0:
if rc == 3: if reason_code == "Server Unavailable":
logger.error( logger.error(
"Unable to connect to MQTT server: MQTT Server unavailable" "Unable to connect to MQTT server: MQTT Server unavailable"
) )
elif rc == 4: elif reason_code == "Bad user name or password":
logger.error( logger.error(
"Unable to connect to MQTT server: MQTT Bad username or password" "Unable to connect to MQTT server: MQTT Bad username or password"
) )
elif rc == 5: elif reason_code == "Not authorized":
logger.error("Unable to connect to MQTT server: MQTT Not authorized") logger.error("Unable to connect to MQTT server: MQTT Not authorized")
else: else:
logger.error( logger.error(
"Unable to connect to MQTT server: Connection refused. Error code: " "Unable to connect to MQTT server: Connection refused. Error code: "
+ str(rc) + reason_code.getName()
) )
self.connected = True self.connected = True
@ -144,7 +148,12 @@ class MqttClient(Communicator): # type: ignore[misc]
self._set_initial_topics() self._set_initial_topics()
def _on_disconnect( def _on_disconnect(
self, client: mqtt.Client, userdata: Any, flags: Any, rc: mqtt self,
client: mqtt.Client,
userdata: Any,
flags: Any,
reason_code: mqtt.ReasonCode,
properties: Any,
) -> None: ) -> None:
"""Mqtt disconnection callback.""" """Mqtt disconnection callback."""
self.connected = False self.connected = False
@ -152,7 +161,10 @@ class MqttClient(Communicator): # type: ignore[misc]
def _start(self) -> None: def _start(self) -> None:
"""Start mqtt client.""" """Start mqtt client."""
self.client = mqtt.Client(client_id=self.mqtt_config.client_id) self.client = mqtt.Client(
callback_api_version=CallbackAPIVersion.VERSION2,
client_id=self.mqtt_config.client_id,
)
self.client.on_connect = self._on_connect self.client.on_connect = self._on_connect
self.client.will_set( self.client.will_set(
self.mqtt_config.topic_prefix + "/available", self.mqtt_config.topic_prefix + "/available",

View File

@ -38,6 +38,7 @@ class WebSocketClient(Communicator): # type: ignore[misc]
def __init__(self, config: FrigateConfig) -> None: def __init__(self, config: FrigateConfig) -> None:
self.config = config self.config = config
self.websocket_server = None
def subscribe(self, receiver: Callable) -> None: def subscribe(self, receiver: Callable) -> None:
self._dispatcher = receiver self._dispatcher = receiver
@ -98,6 +99,10 @@ class WebSocketClient(Communicator): # type: ignore[misc]
logger.debug(f"payload for {topic} wasn't text. Skipping...") logger.debug(f"payload for {topic} wasn't text. Skipping...")
return return
if self.websocket_server is None:
logger.debug("Skipping message, websocket not connected yet")
return
try: try:
self.websocket_server.manager.broadcast(ws_message) self.websocket_server.manager.broadcast(ws_message)
except ConnectionResetError: except ConnectionResetError:

View File

@ -6,11 +6,19 @@ import logging
import os import os
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union from typing import Any, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
from pydantic import BaseModel, Extra, Field, parse_obj_as, validator from pydantic import (
BaseModel,
ConfigDict,
Field,
TypeAdapter,
ValidationInfo,
field_serializer,
field_validator,
)
from pydantic.fields import PrivateAttr from pydantic.fields import PrivateAttr
from frigate.const import ( from frigate.const import (
@ -50,7 +58,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too # read docker secret files as env vars too
if os.path.isdir("/run/secrets"): if os.path.isdir("/run/secrets") and os.access("/run/secrets", os.R_OK):
for secret_file in os.listdir("/run/secrets"): for secret_file in os.listdir("/run/secrets"):
if secret_file.startswith("FRIGATE_"): if secret_file.startswith("FRIGATE_"):
FRIGATE_ENV_VARS[secret_file] = Path( FRIGATE_ENV_VARS[secret_file] = Path(
@ -58,6 +66,7 @@ if os.path.isdir("/run/secrets"):
).read_text() ).read_text()
DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_ALERT_OBJECTS = ["person", "car"]
DEFAULT_LISTEN_AUDIO = ["bark", "fire_alarm", "scream", "speech", "yell"] DEFAULT_LISTEN_AUDIO = ["bark", "fire_alarm", "scream", "speech", "yell"]
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}} DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720} DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720}
@ -65,8 +74,7 @@ DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
class FrigateBaseModel(BaseModel): class FrigateBaseModel(BaseModel):
class Config: model_config = ConfigDict(extra="forbid", protected_namespaces=())
extra = Extra.forbid
class LiveModeEnum(str, Enum): class LiveModeEnum(str, Enum):
@ -92,7 +100,7 @@ class UIConfig(FrigateBaseModel):
live_mode: LiveModeEnum = Field( live_mode: LiveModeEnum = Field(
default=LiveModeEnum.mse, title="Default Live Mode." default=LiveModeEnum.mse, title="Default Live Mode."
) )
timezone: Optional[str] = Field(title="Override UI timezone.") timezone: Optional[str] = Field(default=None, title="Override UI timezone.")
use_experimental: bool = Field(default=False, title="Experimental UI") use_experimental: bool = Field(default=False, title="Experimental UI")
time_format: TimeFormatEnum = Field( time_format: TimeFormatEnum = Field(
default=TimeFormatEnum.browser, title="Override UI time format." default=TimeFormatEnum.browser, title="Override UI time format."
@ -134,16 +142,17 @@ class MqttConfig(FrigateBaseModel):
topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix") topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix")
client_id: str = Field(default="frigate", title="MQTT Client ID") client_id: str = Field(default="frigate", title="MQTT Client ID")
stats_interval: int = Field(default=60, title="MQTT Camera Stats Interval") stats_interval: int = Field(default=60, title="MQTT Camera Stats Interval")
user: Optional[str] = Field(title="MQTT Username") user: Optional[str] = Field(None, title="MQTT Username")
password: Optional[str] = Field(title="MQTT Password") password: Optional[str] = Field(None, title="MQTT Password", validate_default=True)
tls_ca_certs: Optional[str] = Field(title="MQTT TLS CA Certificates") tls_ca_certs: Optional[str] = Field(None, title="MQTT TLS CA Certificates")
tls_client_cert: Optional[str] = Field(title="MQTT TLS Client Certificate") tls_client_cert: Optional[str] = Field(None, title="MQTT TLS Client Certificate")
tls_client_key: Optional[str] = Field(title="MQTT TLS Client Key") tls_client_key: Optional[str] = Field(None, title="MQTT TLS Client Key")
tls_insecure: Optional[bool] = Field(title="MQTT TLS Insecure") tls_insecure: Optional[bool] = Field(None, title="MQTT TLS Insecure")
@validator("password", pre=True, always=True) @field_validator("password")
def validate_password(cls, v, values): def user_requires_pass(cls, v, info: ValidationInfo):
if (v is None) != (values["user"] is None): print(f"doing a check where {v} is None and {info.data['user']} is None")
if (v is None) != (info.data["user"] is None):
raise ValueError("Password must be provided with username.") raise ValueError("Password must be provided with username.")
return v return v
@ -185,18 +194,19 @@ class PtzAutotrackConfig(FrigateBaseModel):
title="Internal value used for PTZ movements based on the speed of your camera's motor.", title="Internal value used for PTZ movements based on the speed of your camera's motor.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of autotracking." None, title="Keep track of original state of autotracking."
) )
@validator("movement_weights", pre=True) @field_validator("movement_weights", mode="before")
@classmethod
def validate_weights(cls, v): def validate_weights(cls, v):
if v is None: if v is None:
return None return None
if isinstance(v, str): if isinstance(v, str):
weights = list(map(float, v.split(","))) weights = list(map(str, map(float, v.split(","))))
elif isinstance(v, list): elif isinstance(v, list):
weights = [float(val) for val in v] weights = [str(float(val)) for val in v]
else: else:
raise ValueError("Invalid type for movement_weights") raise ValueError("Invalid type for movement_weights")
@ -209,8 +219,8 @@ class PtzAutotrackConfig(FrigateBaseModel):
class OnvifConfig(FrigateBaseModel): class OnvifConfig(FrigateBaseModel):
host: str = Field(default="", title="Onvif Host") host: str = Field(default="", title="Onvif Host")
port: int = Field(default=8000, title="Onvif Port") port: int = Field(default=8000, title="Onvif Port")
user: Optional[str] = Field(title="Onvif Username") user: Optional[str] = Field(None, title="Onvif Username")
password: Optional[str] = Field(title="Onvif Password") password: Optional[str] = Field(None, title="Onvif Password")
autotracking: PtzAutotrackConfig = Field( autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig, default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.", title="PTZ auto tracking config.",
@ -241,6 +251,7 @@ class EventsConfig(FrigateBaseModel):
title="List of required zones to be entered in order to save the event.", title="List of required zones to be entered in order to save the event.",
) )
objects: Optional[List[str]] = Field( objects: Optional[List[str]] = Field(
None,
title="List of objects to be detected in order to save the event.", title="List of objects to be detected in order to save the event.",
) )
retain: RetainConfig = Field( retain: RetainConfig = Field(
@ -295,11 +306,12 @@ class RecordConfig(FrigateBaseModel):
default_factory=RecordPreviewConfig, title="Recording Preview Config" default_factory=RecordPreviewConfig, title="Recording Preview Config"
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of recording." None, title="Keep track of original state of recording."
) )
class MotionConfig(FrigateBaseModel): class MotionConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable motion on all cameras.")
threshold: int = Field( threshold: int = Field(
default=30, default=30,
title="Motion detection threshold (1-255).", title="Motion detection threshold (1-255).",
@ -321,6 +333,18 @@ class MotionConfig(FrigateBaseModel):
default=30, default=30,
title="Delay for updating MQTT with no motion detected.", title="Delay for updating MQTT with no motion detected.",
) )
enabled_in_config: Optional[bool] = Field(
None, title="Keep track of original state of motion detection."
)
raw_mask: Union[str, List[str]] = ""
@field_serializer("mask", when_used="json")
def serialize_mask(self, value: Any, info):
return self.raw_mask
@field_serializer("raw_mask", when_used="json")
def serialize_raw_mask(self, value: Any, info):
return None
class RuntimeMotionConfig(MotionConfig): class RuntimeMotionConfig(MotionConfig):
@ -343,19 +367,25 @@ class RuntimeMotionConfig(MotionConfig):
super().__init__(**config) super().__init__(**config)
def dict(self, **kwargs): def dict(self, **kwargs):
ret = super().dict(**kwargs) ret = super().model_dump(**kwargs)
if "mask" in ret: if "mask" in ret:
ret["mask"] = ret["raw_mask"] ret["mask"] = ret["raw_mask"]
ret.pop("raw_mask") ret.pop("raw_mask")
return ret return ret
class Config: @field_serializer("mask", when_used="json")
arbitrary_types_allowed = True def serialize_mask(self, value: Any, info):
extra = Extra.ignore return self.raw_mask
@field_serializer("raw_mask", when_used="json")
def serialize_raw_mask(self, value: Any, info):
return None
model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore")
class StationaryMaxFramesConfig(FrigateBaseModel): class StationaryMaxFramesConfig(FrigateBaseModel):
default: Optional[int] = Field(title="Default max frames.", ge=1) default: Optional[int] = Field(None, title="Default max frames.", ge=1)
objects: Dict[str, int] = Field( objects: Dict[str, int] = Field(
default_factory=dict, title="Object specific max frames." default_factory=dict, title="Object specific max frames."
) )
@ -363,10 +393,12 @@ class StationaryMaxFramesConfig(FrigateBaseModel):
class StationaryConfig(FrigateBaseModel): class StationaryConfig(FrigateBaseModel):
interval: Optional[int] = Field( interval: Optional[int] = Field(
None,
title="Frame interval for checking stationary objects.", title="Frame interval for checking stationary objects.",
gt=0, gt=0,
) )
threshold: Optional[int] = Field( threshold: Optional[int] = Field(
None,
title="Number of frames without a position change for an object to be considered stationary", title="Number of frames without a position change for an object to be considered stationary",
ge=1, ge=1,
) )
@ -377,17 +409,21 @@ class StationaryConfig(FrigateBaseModel):
class DetectConfig(FrigateBaseModel): class DetectConfig(FrigateBaseModel):
height: Optional[int] = Field(title="Height of the stream for the detect role.") height: Optional[int] = Field(
width: Optional[int] = Field(title="Width of the stream for the detect role.") None, title="Height of the stream for the detect role."
)
width: Optional[int] = Field(None, title="Width of the stream for the detect role.")
fps: int = Field( fps: int = Field(
default=5, title="Number of frames per second to process through detection." default=5, title="Number of frames per second to process through detection."
) )
enabled: bool = Field(default=True, title="Detection Enabled.") enabled: bool = Field(default=True, title="Detection Enabled.")
min_initialized: Optional[int] = Field( min_initialized: Optional[int] = Field(
title="Minimum number of consecutive hits for an object to be initialized by the tracker." None,
title="Minimum number of consecutive hits for an object to be initialized by the tracker.",
) )
max_disappeared: Optional[int] = Field( max_disappeared: Optional[int] = Field(
title="Maximum number of frames the object can dissapear before detection ends." None,
title="Maximum number of frames the object can dissapear before detection ends.",
) )
stationary: StationaryConfig = Field( stationary: StationaryConfig = Field(
default_factory=StationaryConfig, default_factory=StationaryConfig,
@ -421,8 +457,18 @@ class FilterConfig(FrigateBaseModel):
default=0.5, title="Minimum detection confidence for object to be counted." default=0.5, title="Minimum detection confidence for object to be counted."
) )
mask: Optional[Union[str, List[str]]] = Field( mask: Optional[Union[str, List[str]]] = Field(
None,
title="Detection area polygon mask for this filter configuration.", title="Detection area polygon mask for this filter configuration.",
) )
raw_mask: Union[str, List[str]] = ""
@field_serializer("mask", when_used="json")
def serialize_mask(self, value: Any, info):
return self.raw_mask
@field_serializer("raw_mask", when_used="json")
def serialize_raw_mask(self, value: Any, info):
return None
class AudioFilterConfig(FrigateBaseModel): class AudioFilterConfig(FrigateBaseModel):
@ -435,8 +481,8 @@ class AudioFilterConfig(FrigateBaseModel):
class RuntimeFilterConfig(FilterConfig): class RuntimeFilterConfig(FilterConfig):
mask: Optional[np.ndarray] mask: Optional[np.ndarray] = None
raw_mask: Optional[Union[str, List[str]]] raw_mask: Optional[Union[str, List[str]]] = None
def __init__(self, **config): def __init__(self, **config):
mask = config.get("mask") mask = config.get("mask")
@ -448,15 +494,13 @@ class RuntimeFilterConfig(FilterConfig):
super().__init__(**config) super().__init__(**config)
def dict(self, **kwargs): def dict(self, **kwargs):
ret = super().dict(**kwargs) ret = super().model_dump(**kwargs)
if "mask" in ret: if "mask" in ret:
ret["mask"] = ret["raw_mask"] ret["mask"] = ret["raw_mask"]
ret.pop("raw_mask") ret.pop("raw_mask")
return ret return ret
class Config: model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore")
arbitrary_types_allowed = True
extra = Extra.ignore
# this uses the base model because the color is an extra attribute # this uses the base model because the color is an extra attribute
@ -508,6 +552,9 @@ class ZoneConfig(BaseModel):
class ObjectConfig(FrigateBaseModel): class ObjectConfig(FrigateBaseModel):
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
alert: List[str] = Field(
default=DEFAULT_ALERT_OBJECTS, title="Objects to create alerts for."
)
filters: Dict[str, FilterConfig] = Field(default={}, title="Object filters.") filters: Dict[str, FilterConfig] = Field(default={}, title="Object filters.")
mask: Union[str, List[str]] = Field(default="", title="Object mask.") mask: Union[str, List[str]] = Field(default="", title="Object mask.")
@ -523,9 +570,11 @@ class AudioConfig(FrigateBaseModel):
listen: List[str] = Field( listen: List[str] = Field(
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for." default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
) )
filters: Optional[Dict[str, AudioFilterConfig]] = Field(title="Audio filters.") filters: Optional[Dict[str, AudioFilterConfig]] = Field(
None, title="Audio filters."
)
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of audio detection." None, title="Keep track of original state of audio detection."
) )
num_threads: int = Field(default=2, title="Number of detection threads", ge=1) num_threads: int = Field(default=2, title="Number of detection threads", ge=1)
@ -544,6 +593,13 @@ class BirdseyeModeEnum(str, Enum):
return list(cls)[index] return list(cls)[index]
class BirdseyeLayoutConfig(FrigateBaseModel):
scaling_factor: float = Field(
default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0
)
max_cameras: Optional[int] = Field(default=None, title="Max cameras")
class BirdseyeConfig(FrigateBaseModel): class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view.") enabled: bool = Field(default=True, title="Enable birdseye view.")
restream: bool = Field(default=False, title="Restream birdseye via RTSP.") restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
@ -555,9 +611,15 @@ class BirdseyeConfig(FrigateBaseModel):
ge=1, ge=1,
le=31, le=31,
) )
inactivity_threshold: int = Field(
default=30, title="Birdseye Inactivity Threshold", gt=0
)
mode: BirdseyeModeEnum = Field( mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, title="Tracking mode." default=BirdseyeModeEnum.objects, title="Tracking mode."
) )
layout: BirdseyeLayoutConfig = Field(
default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config"
)
# uses BaseModel because some global attributes are not available at the camera level # uses BaseModel because some global attributes are not available at the camera level
@ -593,6 +655,7 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.", title="Record role FFmpeg output arguments.",
) )
_force_record_hvc1: bool = PrivateAttr(default=False)
class FfmpegConfig(FrigateBaseModel): class FfmpegConfig(FrigateBaseModel):
@ -638,7 +701,8 @@ class CameraInput(FrigateBaseModel):
class CameraFfmpegConfig(FfmpegConfig): class CameraFfmpegConfig(FfmpegConfig):
inputs: List[CameraInput] = Field(title="Camera inputs.") inputs: List[CameraInput] = Field(title="Camera inputs.")
@validator("inputs") @field_validator("inputs")
@classmethod
def validate_roles(cls, v): def validate_roles(cls, v):
roles = [role for i in v for role in i.roles] roles = [role for i in v for role in i.roles]
roles_set = set(roles) roles_set = set(roles)
@ -668,7 +732,7 @@ class SnapshotsConfig(FrigateBaseModel):
default_factory=list, default_factory=list,
title="List of required zones to be entered in order to save a snapshot.", title="List of required zones to be entered in order to save a snapshot.",
) )
height: Optional[int] = Field(title="Snapshot image height.") height: Optional[int] = Field(None, title="Snapshot image height.")
retain: RetainConfig = Field( retain: RetainConfig = Field(
default_factory=RetainConfig, title="Snapshot retention." default_factory=RetainConfig, title="Snapshot retention."
) )
@ -705,7 +769,7 @@ class TimestampStyleConfig(FrigateBaseModel):
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.") format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.") color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.") thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.") effect: Optional[TimestampEffectEnum] = Field(None, title="Timestamp effect.")
class CameraMqttConfig(FrigateBaseModel): class CameraMqttConfig(FrigateBaseModel):
@ -733,8 +797,7 @@ class CameraLiveConfig(FrigateBaseModel):
class RestreamConfig(BaseModel): class RestreamConfig(BaseModel):
class Config: model_config = ConfigDict(extra="allow")
extra = Extra.allow
class CameraUiConfig(FrigateBaseModel): class CameraUiConfig(FrigateBaseModel):
@ -745,7 +808,7 @@ class CameraUiConfig(FrigateBaseModel):
class CameraConfig(FrigateBaseModel): class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(title="Camera name.", regex=REGEX_CAMERA_NAME) name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME)
enabled: bool = Field(default=True, title="Enable camera.") enabled: bool = Field(default=True, title="Enable camera.")
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
best_image_timeout: int = Field( best_image_timeout: int = Field(
@ -753,6 +816,7 @@ class CameraConfig(FrigateBaseModel):
title="How long to wait for the image with the highest confidence score.", title="How long to wait for the image with the highest confidence score.",
) )
webui_url: Optional[str] = Field( webui_url: Optional[str] = Field(
None,
title="URL to visit the camera directly from system page", title="URL to visit the camera directly from system page",
) )
zones: Dict[str, ZoneConfig] = Field( zones: Dict[str, ZoneConfig] = Field(
@ -776,7 +840,9 @@ class CameraConfig(FrigateBaseModel):
audio: AudioConfig = Field( audio: AudioConfig = Field(
default_factory=AudioConfig, title="Audio events configuration." default_factory=AudioConfig, title="Audio events configuration."
) )
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") motion: Optional[MotionConfig] = Field(
None, title="Motion detection configuration."
)
detect: DetectConfig = Field( detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration." default_factory=DetectConfig, title="Object detection configuration."
) )
@ -857,7 +923,10 @@ class CameraConfig(FrigateBaseModel):
if "record" in ffmpeg_input.roles and self.record.enabled: if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = get_ffmpeg_arg_list( record_args = get_ffmpeg_arg_list(
parse_preset_output_record(self.ffmpeg.output_args.record) parse_preset_output_record(
self.ffmpeg.output_args.record,
self.ffmpeg.output_args._force_record_hvc1,
)
or self.ffmpeg.output_args.record or self.ffmpeg.output_args.record
) )
@ -874,6 +943,10 @@ class CameraConfig(FrigateBaseModel):
global_args = get_ffmpeg_arg_list( global_args = get_ffmpeg_arg_list(
ffmpeg_input.global_args or self.ffmpeg.global_args ffmpeg_input.global_args or self.ffmpeg.global_args
) )
camera_arg = (
self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
)
hwaccel_args = get_ffmpeg_arg_list( hwaccel_args = get_ffmpeg_arg_list(
parse_preset_hardware_acceleration_decode( parse_preset_hardware_acceleration_decode(
ffmpeg_input.hwaccel_args, ffmpeg_input.hwaccel_args,
@ -883,12 +956,13 @@ class CameraConfig(FrigateBaseModel):
) )
or ffmpeg_input.hwaccel_args or ffmpeg_input.hwaccel_args
or parse_preset_hardware_acceleration_decode( or parse_preset_hardware_acceleration_decode(
self.ffmpeg.hwaccel_args, camera_arg,
self.detect.fps, self.detect.fps,
self.detect.width, self.detect.width,
self.detect.height, self.detect.height,
) )
or self.ffmpeg.hwaccel_args or camera_arg
or []
) )
input_args = get_ffmpeg_arg_list( input_args = get_ffmpeg_arg_list(
parse_preset_input(ffmpeg_input.input_args, self.detect.fps) parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
@ -953,7 +1027,7 @@ def verify_valid_live_stream_name(
"""Verify that a restream exists to use for live view.""" """Verify that a restream exists to use for live view."""
if ( if (
camera_config.live.stream_name camera_config.live.stream_name
not in frigate_config.go2rtc.dict().get("streams", {}).keys() not in frigate_config.go2rtc.model_dump().get("streams", {}).keys()
): ):
return ValueError( return ValueError(
f"No restream with name {camera_config.live.stream_name} exists for camera {camera_config.name}." f"No restream with name {camera_config.live.stream_name} exists for camera {camera_config.name}."
@ -1023,6 +1097,14 @@ def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
) )
def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None:
"""Verify that required_zones are specified when autotracking is enabled."""
if camera_config.detect.enabled and not camera_config.motion.enabled:
raise ValueError(
f"Camera {camera_config.name} has motion detection disabled and object detection enabled but object detection requires motion detection."
)
class FrigateConfig(FrigateBaseModel): class FrigateConfig(FrigateBaseModel):
mqtt: MqttConfig = Field(title="MQTT Configuration.") mqtt: MqttConfig = Field(title="MQTT Configuration.")
database: DatabaseConfig = Field( database: DatabaseConfig = Field(
@ -1070,7 +1152,7 @@ class FrigateConfig(FrigateBaseModel):
default_factory=AudioConfig, title="Global Audio events configuration." default_factory=AudioConfig, title="Global Audio events configuration."
) )
motion: Optional[MotionConfig] = Field( motion: Optional[MotionConfig] = Field(
title="Global motion detection configuration." None, title="Global motion detection configuration."
) )
detect: DetectConfig = Field( detect: DetectConfig = Field(
default_factory=DetectConfig, title="Global object tracking configuration." default_factory=DetectConfig, title="Global object tracking configuration."
@ -1083,7 +1165,7 @@ class FrigateConfig(FrigateBaseModel):
def runtime_config(self, plus_api: PlusApi = None) -> FrigateConfig: def runtime_config(self, plus_api: PlusApi = None) -> FrigateConfig:
"""Merge camera config with globals.""" """Merge camera config with globals."""
config = self.copy(deep=True) config = self.model_copy(deep=True)
# MQTT user/password substitutions # MQTT user/password substitutions
if config.mqtt.user or config.mqtt.password: if config.mqtt.user or config.mqtt.password:
@ -1102,7 +1184,7 @@ class FrigateConfig(FrigateBaseModel):
config.ffmpeg.hwaccel_args = auto_detect_hwaccel() config.ffmpeg.hwaccel_args = auto_detect_hwaccel()
# Global config to propagate down to camera level # Global config to propagate down to camera level
global_config = config.dict( global_config = config.model_dump(
include={ include={
"audio": ..., "audio": ...,
"birdseye": ..., "birdseye": ...,
@ -1119,42 +1201,55 @@ class FrigateConfig(FrigateBaseModel):
) )
for name, camera in config.cameras.items(): for name, camera in config.cameras.items():
merged_config = deep_merge(camera.dict(exclude_unset=True), global_config) merged_config = deep_merge(
camera_config: CameraConfig = CameraConfig.parse_obj( camera.model_dump(exclude_unset=True), global_config
)
camera_config: CameraConfig = CameraConfig.model_validate(
{"name": name, **merged_config} {"name": name, **merged_config}
) )
if camera_config.ffmpeg.hwaccel_args == "auto": if camera_config.ffmpeg.hwaccel_args == "auto":
camera_config.ffmpeg.hwaccel_args = config.ffmpeg.hwaccel_args camera_config.ffmpeg.hwaccel_args = config.ffmpeg.hwaccel_args
if ( for input in camera_config.ffmpeg.inputs:
camera_config.detect.height is None need_record_fourcc = "record" in input.roles
or camera_config.detect.width is None need_detect_dimensions = "detect" in input.roles and (
): camera_config.detect.height is None
for input in camera_config.ffmpeg.inputs: or camera_config.detect.width is None
if "detect" in input.roles: )
stream_info = {"width": 0, "height": 0}
try:
stream_info = asyncio.run(get_video_properties(input.path))
except Exception:
logger.warn(
f"Error detecting stream resolution automatically for {input.path} Applying default values."
)
stream_info = {"width": 0, "height": 0}
camera_config.detect.width = ( if need_detect_dimensions or need_record_fourcc:
stream_info["width"] stream_info = {"width": 0, "height": 0, "fourcc": None}
if stream_info.get("width") try:
else DEFAULT_DETECT_DIMENSIONS["width"] stream_info = asyncio.run(get_video_properties(input.path))
) except Exception:
camera_config.detect.height = ( logger.warn(
stream_info["height"] f"Error detecting stream parameters automatically for {input.path} Applying default values."
if stream_info.get("height")
else DEFAULT_DETECT_DIMENSIONS["height"]
) )
stream_info = {"width": 0, "height": 0, "fourcc": None}
if need_detect_dimensions:
camera_config.detect.width = (
stream_info["width"]
if stream_info.get("width")
else DEFAULT_DETECT_DIMENSIONS["width"]
)
camera_config.detect.height = (
stream_info["height"]
if stream_info.get("height")
else DEFAULT_DETECT_DIMENSIONS["height"]
)
if need_record_fourcc:
# Apple only supports HEVC if it is hvc1 (vs. hev1)
camera_config.ffmpeg.output_args._force_record_hvc1 = (
stream_info["fourcc"] == "hevc"
if stream_info.get("hevc")
else False
)
# Default min_initialized configuration # Default min_initialized configuration
min_initialized = camera_config.detect.fps / 2 min_initialized = int(camera_config.detect.fps / 2)
if camera_config.detect.min_initialized is None: if camera_config.detect.min_initialized is None:
camera_config.detect.min_initialized = min_initialized camera_config.detect.min_initialized = min_initialized
@ -1184,8 +1279,8 @@ class FrigateConfig(FrigateBaseModel):
**FRIGATE_ENV_VARS **FRIGATE_ENV_VARS
) )
# set config pre-value # set config pre-value
camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.audio.enabled_in_config = camera_config.audio.enabled camera_config.audio.enabled_in_config = camera_config.audio.enabled
camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.onvif.autotracking.enabled_in_config = ( camera_config.onvif.autotracking.enabled_in_config = (
camera_config.onvif.autotracking.enabled camera_config.onvif.autotracking.enabled
) )
@ -1218,7 +1313,7 @@ class FrigateConfig(FrigateBaseModel):
# Set runtime filter to create masks # Set runtime filter to create masks
camera_config.objects.filters[object] = RuntimeFilterConfig( camera_config.objects.filters[object] = RuntimeFilterConfig(
frame_shape=camera_config.frame_shape, frame_shape=camera_config.frame_shape,
**filter.dict(exclude_unset=True), **filter.model_dump(exclude_unset=True),
) )
# Convert motion configuration # Convert motion configuration
@ -1230,8 +1325,9 @@ class FrigateConfig(FrigateBaseModel):
camera_config.motion = RuntimeMotionConfig( camera_config.motion = RuntimeMotionConfig(
frame_shape=camera_config.frame_shape, frame_shape=camera_config.frame_shape,
raw_mask=camera_config.motion.mask, raw_mask=camera_config.motion.mask,
**camera_config.motion.dict(exclude_unset=True), **camera_config.motion.model_dump(exclude_unset=True),
) )
camera_config.motion.enabled_in_config = camera_config.motion.enabled
# Set live view stream if none is set # Set live view stream if none is set
if not camera_config.live.stream_name: if not camera_config.live.stream_name:
@ -1243,6 +1339,7 @@ class FrigateConfig(FrigateBaseModel):
verify_recording_segments_setup_with_reasonable_time(camera_config) verify_recording_segments_setup_with_reasonable_time(camera_config)
verify_zone_objects_are_tracked(camera_config) verify_zone_objects_are_tracked(camera_config)
verify_autotrack_zones(camera_config) verify_autotrack_zones(camera_config)
verify_motion_and_detect(camera_config)
# generate the ffmpeg commands # generate the ffmpeg commands
camera_config.create_ffmpeg_cmds() camera_config.create_ffmpeg_cmds()
@ -1258,17 +1355,21 @@ class FrigateConfig(FrigateBaseModel):
config.model.check_and_load_plus_model(plus_api) config.model.check_and_load_plus_model(plus_api)
for key, detector in config.detectors.items(): for key, detector in config.detectors.items():
detector_config: DetectorConfig = parse_obj_as(DetectorConfig, detector) adapter = TypeAdapter(DetectorConfig)
model_dict = (
detector if isinstance(detector, dict) else detector.model_dump()
)
detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None: if detector_config.model is None:
detector_config.model = config.model detector_config.model = config.model
else: else:
model = detector_config.model model = detector_config.model
schema = ModelConfig.schema()["properties"] schema = ModelConfig.model_json_schema()["properties"]
if ( if (
model.width != schema["width"]["default"] model.width != schema["width"]["default"]
or model.height != schema["height"]["default"] or model.height != schema["height"]["default"]
or model.labelmap_path is not None or model.labelmap_path is not None
or model.labelmap is not {} or model.labelmap
or model.input_tensor != schema["input_tensor"]["default"] or model.input_tensor != schema["input_tensor"]["default"]
or model.input_pixel_format or model.input_pixel_format
!= schema["input_pixel_format"]["default"] != schema["input_pixel_format"]["default"]
@ -1277,8 +1378,8 @@ class FrigateConfig(FrigateBaseModel):
"Customizing more than a detector model path is unsupported." "Customizing more than a detector model path is unsupported."
) )
merged_model = deep_merge( merged_model = deep_merge(
detector_config.model.dict(exclude_unset=True), detector_config.model.model_dump(exclude_unset=True),
config.model.dict(exclude_unset=True), config.model.model_dump(exclude_unset=True),
) )
if "path" not in merged_model: if "path" not in merged_model:
@ -1287,7 +1388,7 @@ class FrigateConfig(FrigateBaseModel):
elif detector_config.type == "edgetpu": elif detector_config.type == "edgetpu":
merged_model["path"] = "/edgetpu_model.tflite" merged_model["path"] = "/edgetpu_model.tflite"
detector_config.model = ModelConfig.parse_obj(merged_model) detector_config.model = ModelConfig.model_validate(merged_model)
detector_config.model.check_and_load_plus_model( detector_config.model.check_and_load_plus_model(
plus_api, detector_config.type plus_api, detector_config.type
) )
@ -1296,7 +1397,8 @@ class FrigateConfig(FrigateBaseModel):
return config return config
@validator("cameras") @field_validator("cameras")
@classmethod
def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig]): def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig]):
zones = [zone for camera in v.values() for zone in camera.zones.keys()] zones = [zone for camera in v.values() for zone in camera.zones.keys()]
for zone in zones: for zone in zones:
@ -1314,9 +1416,9 @@ class FrigateConfig(FrigateBaseModel):
elif config_file.endswith(".json"): elif config_file.endswith(".json"):
config = json.loads(raw_config) config = json.loads(raw_config)
return cls.parse_obj(config) return cls.model_validate(config)
@classmethod @classmethod
def parse_raw(cls, raw_config): def parse_raw(cls, raw_config):
config = load_config_with_no_duplicates(raw_config) config = load_config_with_no_duplicates(raw_config)
return cls.parse_obj(config) return cls.model_validate(config)

View File

@ -26,6 +26,10 @@ LABEL_CONSOLIDATION_MAP = {
"face": 0.5, "face": 0.5,
} }
LABEL_CONSOLIDATION_DEFAULT = 0.9 LABEL_CONSOLIDATION_DEFAULT = 0.9
LABEL_NMS_MAP = {
"car": 0.6,
}
LABEL_NMS_DEFAULT = 0.4
# Audio Consts # Audio Consts
@ -66,6 +70,7 @@ MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to
INSERT_MANY_RECORDINGS = "insert_many_recordings" INSERT_MANY_RECORDINGS = "insert_many_recordings"
INSERT_PREVIEW = "insert_preview" INSERT_PREVIEW = "insert_preview"
REQUEST_REGION_GRID = "request_region_grid" REQUEST_REGION_GRID = "request_region_grid"
UPSERT_REVIEW_SEGMENT = "upsert_review_segment"
# Autotracking # Autotracking

View File

@ -7,7 +7,7 @@ from typing import Dict, Optional, Tuple
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import requests import requests
from pydantic import BaseModel, Extra, Field from pydantic import BaseModel, ConfigDict, Field
from pydantic.fields import PrivateAttr from pydantic.fields import PrivateAttr
from frigate.plus import PlusApi from frigate.plus import PlusApi
@ -35,8 +35,10 @@ class ModelTypeEnum(str, Enum):
class ModelConfig(BaseModel): class ModelConfig(BaseModel):
path: Optional[str] = Field(title="Custom Object detection model path.") path: Optional[str] = Field(None, title="Custom Object detection model path.")
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.") labelmap_path: Optional[str] = Field(
None, title="Label map for custom object detector."
)
width: int = Field(default=320, title="Object detection model input width.") width: int = Field(default=320, title="Object detection model input width.")
height: int = Field(default=320, title="Object detection model input height.") height: int = Field(default=320, title="Object detection model input height.")
labelmap: Dict[int, str] = Field( labelmap: Dict[int, str] = Field(
@ -132,17 +134,15 @@ class ModelConfig(BaseModel):
for key, val in enumerate(enabled_labels): for key, val in enumerate(enabled_labels):
self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
class Config: model_config = ConfigDict(extra="forbid", protected_namespaces=())
extra = Extra.forbid
class BaseDetectorConfig(BaseModel): class BaseDetectorConfig(BaseModel):
# the type field must be defined in all subclasses # the type field must be defined in all subclasses
type: str = Field(default="cpu", title="Detector Type") type: str = Field(default="cpu", title="Detector Type")
model: ModelConfig = Field( model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration." default=None, title="Detector specific model configuration."
) )
model_config = ConfigDict(
class Config: extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
extra = Extra.allow )
arbitrary_types_allowed = True

View File

@ -6,6 +6,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import yolov8_postprocess
try: try:
from tflite_runtime.interpreter import Interpreter, load_delegate from tflite_runtime.interpreter import Interpreter, load_delegate
@ -54,11 +55,29 @@ class EdgeTpuTfl(DetectionApi):
self.tensor_input_details = self.interpreter.get_input_details() self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details() self.tensor_output_details = self.interpreter.get_output_details()
self.model_type = detector_config.model.model_type
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
if self.model_type == "yolov8":
scale, zero_point = self.tensor_input_details[0]["quantization"]
tensor_input = (
(tensor_input - scale * zero_point * 255) * (1.0 / (scale * 255))
).astype(self.tensor_input_details[0]["dtype"])
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke() self.interpreter.invoke()
if self.model_type == "yolov8":
scale, zero_point = self.tensor_output_details[0]["quantization"]
tensor_output = self.interpreter.get_tensor(
self.tensor_output_details[0]["index"]
)
tensor_output = (tensor_output.astype(np.float32) - zero_point) * scale
model_input_shape = self.tensor_input_details[0]["shape"]
tensor_output[:, [0, 2]] *= model_input_shape[2]
tensor_output[:, [1, 3]] *= model_input_shape[1]
return yolov8_postprocess(model_input_shape, tensor_output)
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]

View File

@ -0,0 +1,65 @@
import glob
import logging
import numpy as np
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess, yolov8_postprocess
logger = logging.getLogger(__name__)
DETECTOR_KEY = "onnx"
class ONNXDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class ONNXDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: ONNXDetectorConfig):
try:
import onnxruntime
logger.info("ONNX: loaded onnxruntime module")
except ModuleNotFoundError:
logger.error(
"ONNX: module loading failed, need 'pip install onnxruntime'?!?"
)
raise
assert (
detector_config.model.model_type == "yolov8"
), "ONNX: detector_config.model.model_type: only yolov8 supported"
assert (
detector_config.model.input_tensor == "nhwc"
), "ONNX: detector_config.model.input_tensor: only nhwc supported"
if detector_config.model.input_pixel_format != "rgb":
logger.warn(
"ONNX: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
)
assert detector_config.model.path is not None, (
"ONNX: No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
+ " and "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
)
path = detector_config.model.path
logger.info(f"ONNX: loading {detector_config.model.path}")
self.model = onnxruntime.InferenceSession(path)
logger.info(f"ONNX: {path} loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_inputs()[0].name
model_input_shape = self.model.get_inputs()[0].shape
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
tensor_output = self.model.run(None, {model_input_name: tensor_input})[0]
return yolov8_postprocess(model_input_shape, tensor_output)

View File

@ -42,11 +42,6 @@ class Rknn(DetectionApi):
type_key = DETECTOR_KEY type_key = DETECTOR_KEY
def __init__(self, config: RknnDetectorConfig): def __init__(self, config: RknnDetectorConfig):
# create symlink for Home Assistant add on
if not os.path.isfile("/proc/device-tree/compatible"):
if os.path.isfile("/device-tree/compatible"):
os.symlink("/device-tree/compatible", "/proc/device-tree/compatible")
# find out SoC # find out SoC
try: try:
with open("/proc/device-tree/compatible") as file: with open("/proc/device-tree/compatible") as file:
@ -105,10 +100,10 @@ class Rknn(DetectionApi):
if (config.model.width != 320) or (config.model.height != 320): if (config.model.width != 320) or (config.model.height != 320):
logger.error( logger.error(
"Make sure to set the model width and heigth to 320 in your config.yml." "Make sure to set the model width and height to 320 in your config.yml."
) )
raise Exception( raise Exception(
"Make sure to set the model width and heigth to 320 in your config.yml." "Make sure to set the model width and height to 320 in your config.yml."
) )
if config.model.input_pixel_format != "bgr": if config.model.input_pixel_format != "bgr":

View File

@ -0,0 +1,143 @@
import ctypes
import glob
import logging
import os
import subprocess
import sys
import numpy as np
from pydantic import Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess, yolov8_postprocess
logger = logging.getLogger(__name__)
DETECTOR_KEY = "rocm"
def detect_gfx_version():
return subprocess.getoutput(
"unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo | grep gfx |head -1|awk '{print $2}'"
)
def auto_override_gfx_version():
# If environment variable already in place, do not override
gfx_version = detect_gfx_version()
old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION")
if old_override not in (None, ""):
logger.warning(
f"AMD/ROCm: detected {gfx_version} but HSA_OVERRIDE_GFX_VERSION already present ({old_override}), not overriding!"
)
return old_override
mapping = {
"gfx90c": "9.0.0",
"gfx1031": "10.3.0",
"gfx1103": "11.0.0",
}
override = mapping.get(gfx_version)
if override is not None:
logger.warning(
f"AMD/ROCm: detected {gfx_version}, overriding HSA_OVERRIDE_GFX_VERSION={override}"
)
os.putenv("HSA_OVERRIDE_GFX_VERSION", override)
return override
return ""
class ROCmDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
conserve_cpu: bool = Field(
default=True,
title="Conserve CPU at the expense of latency (and reduced max throughput)",
)
auto_override_gfx: bool = Field(
default=True, title="Automatically detect and override gfx version"
)
class ROCmDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: ROCmDetectorConfig):
if detector_config.auto_override_gfx:
auto_override_gfx_version()
try:
sys.path.append("/opt/rocm/lib")
import migraphx
logger.info("AMD/ROCm: loaded migraphx module")
except ModuleNotFoundError:
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
assert (
detector_config.model.model_type == "yolov8"
), "AMD/ROCm: detector_config.model.model_type: only yolov8 supported"
assert (
detector_config.model.input_tensor == "nhwc"
), "AMD/ROCm: detector_config.model.input_tensor: only nhwc supported"
if detector_config.model.input_pixel_format != "rgb":
logger.warn(
"AMD/ROCm: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
)
assert detector_config.model.path is not None, (
"No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
+ " and "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
)
path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
self.model = migraphx.load(mxr_path)
elif os.path.exists(mxr_path):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
self.model = migraphx.load(mxr_path)
else:
logger.info(f"AMD/ROCm: loading model from {path}")
if path.endswith(".onnx"):
self.model = migraphx.parse_onnx(path)
elif (
path.endswith(".tf")
or path.endswith(".tf2")
or path.endswith(".tflite")
):
# untested
self.model = migraphx.parse_tf(path)
else:
raise Exception(f"AMD/ROCm: unknown model format {path}")
logger.info("AMD/ROCm: compiling the model")
self.model.compile(
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
)
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
os.makedirs("/config/model_cache/rocm", exist_ok=True)
migraphx.save(self.model, mxr_path)
logger.info("AMD/ROCm: model loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_parameter_names()[0]
model_input_shape = tuple(
self.model.get_parameter_shapes()[model_input_name].lens()
)
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
tensor_output = np.ctypeslib.as_array(
addr, shape=detector_result.get_shape().lens()
)
return yolov8_postprocess(model_input_shape, tensor_output)

83
frigate/detectors/util.py Normal file
View File

@ -0,0 +1,83 @@
import logging
import cv2
import numpy as np
logger = logging.getLogger(__name__)
def preprocess(tensor_input, model_input_shape, model_input_element_type):
model_input_shape = tuple(model_input_shape)
assert tensor_input.dtype == np.uint8, f"tensor_input.dtype: {tensor_input.dtype}"
if len(tensor_input.shape) == 3:
tensor_input = tensor_input[np.newaxis, :]
if model_input_element_type == np.uint8:
# nothing to do for uint8 model input
assert (
model_input_shape == tensor_input.shape
), f"model_input_shape: {model_input_shape}, tensor_input.shape: {tensor_input.shape}"
return tensor_input
assert (
model_input_element_type == np.float32
), f"model_input_element_type: {model_input_element_type}"
# tensor_input must be nhwc
assert tensor_input.shape[3] == 3, f"tensor_input.shape: {tensor_input.shape}"
if tensor_input.shape[1:3] != model_input_shape[2:4]:
logger.warn(
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
)
# cv2.dnn.blobFromImage is faster than numpying it
return cv2.dnn.blobFromImage(
tensor_input[0],
1.0 / 255,
(model_input_shape[3], model_input_shape[2]),
None,
swapRB=False,
)
def yolov8_postprocess(
model_input_shape,
tensor_output,
box_count=20,
score_threshold=0.5,
nms_threshold=0.5,
):
model_box_count = tensor_output.shape[2]
probs = tensor_output[0, 4:, :]
all_ids = np.argmax(probs, axis=0)
all_confidences = probs.T[np.arange(model_box_count), all_ids]
all_boxes = tensor_output[0, 0:4, :].T
mask = all_confidences > score_threshold
class_ids = all_ids[mask]
confidences = all_confidences[mask]
cx, cy, w, h = all_boxes[mask].T
if model_input_shape[3] == 3:
scale_y, scale_x = 1 / model_input_shape[1], 1 / model_input_shape[2]
else:
scale_y, scale_x = 1 / model_input_shape[2], 1 / model_input_shape[3]
detections = np.stack(
(
class_ids,
confidences,
scale_y * (cy - h / 2),
scale_x * (cx - w / 2),
scale_y * (cy + h / 2),
scale_x * (cx + w / 2),
),
axis=1,
)
if detections.shape[0] > box_count:
# if too many detections, do nms filtering to suppress overlapping boxes
boxes = np.stack((cx - w / 2, cy - h / 2, w, h), axis=1)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold, nms_threshold)
detections = detections[indexes]
# if still too many, trim the rest by confidence
if detections.shape[0] > box_count:
detections = detections[
np.argpartition(detections[:, 1], -box_count)[-box_count:]
]
detections = detections.copy()
detections.resize((box_count, 6))
return detections

View File

@ -13,7 +13,9 @@ import numpy as np
import requests import requests
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig
from frigate.const import ( from frigate.const import (
AUDIO_DURATION, AUDIO_DURATION,
@ -26,7 +28,7 @@ from frigate.const import (
from frigate.ffmpeg_presets import parse_preset_input from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.object_detection import load_labels from frigate.object_detection import load_labels
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes from frigate.types import CameraMetricsTypes
from frigate.util.builtin import get_ffmpeg_arg_list from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.services import listen from frigate.util.services import listen
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
@ -67,10 +69,7 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
def listen_to_audio( def listen_to_audio(
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes], camera_metrics: dict[str, CameraMetricsTypes],
process_info: dict[str, FeatureMetricsTypes],
inter_process_communicator: InterProcessCommunicator,
) -> None: ) -> None:
stop_event = mp.Event() stop_event = mp.Event()
audio_threads: list[threading.Thread] = [] audio_threads: list[threading.Thread] = []
@ -96,11 +95,8 @@ def listen_to_audio(
if camera.enabled and camera.audio.enabled_in_config: if camera.enabled and camera.audio.enabled_in_config:
audio = AudioEventMaintainer( audio = AudioEventMaintainer(
camera, camera,
recordings_info_queue,
camera_metrics, camera_metrics,
process_info,
stop_event, stop_event,
inter_process_communicator,
) )
audio_threads.append(audio) audio_threads.append(audio)
audio.start() audio.start()
@ -170,19 +166,13 @@ class AudioEventMaintainer(threading.Thread):
def __init__( def __init__(
self, self,
camera: CameraConfig, camera: CameraConfig,
recordings_info_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes], camera_metrics: dict[str, CameraMetricsTypes],
feature_metrics: dict[str, FeatureMetricsTypes],
stop_event: mp.Event, stop_event: mp.Event,
inter_process_communicator: InterProcessCommunicator,
) -> None: ) -> None:
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = f"{camera.name}_audio_event_processor" self.name = f"{camera.name}_audio_event_processor"
self.config = camera self.config = camera
self.recordings_info_queue = recordings_info_queue
self.camera_metrics = camera_metrics self.camera_metrics = camera_metrics
self.feature_metrics = feature_metrics
self.inter_process_communicator = inter_process_communicator
self.detections: dict[dict[str, any]] = {} self.detections: dict[dict[str, any]] = {}
self.stop_event = stop_event self.stop_event = stop_event
self.detector = AudioTfl(stop_event, self.config.audio.num_threads) self.detector = AudioTfl(stop_event, self.config.audio.num_threads)
@ -193,8 +183,13 @@ class AudioEventMaintainer(threading.Thread):
self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio") self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio")
self.audio_listener = None self.audio_listener = None
# create communication for audio detections
self.requestor = InterProcessRequestor()
self.config_subscriber = ConfigSubscriber(f"config/audio/{camera.name}")
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio)
def detect_audio(self, audio) -> None: def detect_audio(self, audio) -> None:
if not self.feature_metrics[self.config.name]["audio_enabled"].value: if not self.config.audio.enabled or self.stop_event.is_set():
return return
audio_as_float = audio.astype(np.float32) audio_as_float = audio.astype(np.float32)
@ -222,8 +217,8 @@ class AudioEventMaintainer(threading.Thread):
self.handle_detection(label, score) self.handle_detection(label, score)
audio_detections.append(label) audio_detections.append(label)
# add audio info to recordings queue # send audio detection data
self.recordings_info_queue.put( self.detection_publisher.send_data(
( (
self.config.name, self.config.name,
datetime.datetime.now().timestamp(), datetime.datetime.now().timestamp(),
@ -245,24 +240,18 @@ class AudioEventMaintainer(threading.Thread):
else: else:
dBFS = 0 dBFS = 0
self.inter_process_communicator.queue.put( self.requestor.send_data(f"{self.config.name}/audio/dBFS", float(dBFS))
(f"{self.config.name}/audio/dBFS", float(dBFS)) self.requestor.send_data(f"{self.config.name}/audio/rms", float(rms))
)
self.inter_process_communicator.queue.put(
(f"{self.config.name}/audio/rms", float(rms))
)
return float(rms), float(dBFS) return float(rms), float(dBFS)
def handle_detection(self, label: str, score: float) -> None: def handle_detection(self, label: str, score: float) -> None:
if self.detections.get(label): if self.detections.get(label):
self.detections[label][ self.detections[label]["last_detection"] = (
"last_detection" datetime.datetime.now().timestamp()
] = datetime.datetime.now().timestamp()
else:
self.inter_process_communicator.queue.put(
(f"{self.config.name}/audio/{label}", "ON")
) )
else:
self.requestor.send_data(f"{self.config.name}/audio/{label}", "ON")
resp = requests.post( resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create", f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create",
@ -288,8 +277,8 @@ class AudioEventMaintainer(threading.Thread):
now - detection.get("last_detection", now) now - detection.get("last_detection", now)
> self.config.audio.max_not_heard > self.config.audio.max_not_heard
): ):
self.inter_process_communicator.queue.put( self.requestor.send_data(
(f"{self.config.name}/audio/{detection['label']}", "OFF") f"{self.config.name}/audio/{detection['label']}", "OFF"
) )
resp = requests.put( resp = requests.put(
@ -346,7 +335,19 @@ class AudioEventMaintainer(threading.Thread):
self.start_or_restart_ffmpeg() self.start_or_restart_ffmpeg()
while not self.stop_event.is_set(): while not self.stop_event.is_set():
# check if there is an updated config
(
updated_topic,
updated_audio_config,
) = self.config_subscriber.check_for_update()
if updated_topic:
self.config.audio = updated_audio_config
self.read_audio() self.read_audio()
stop_ffmpeg(self.audio_listener, self.logger) stop_ffmpeg(self.audio_listener, self.logger)
self.logpipe.close() self.logpipe.close()
self.requestor.stop()
self.config_subscriber.stop()
self.detection_publisher.stop()

View File

@ -69,8 +69,8 @@ PRESETS_HW_ACCEL_DECODE = {
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda", FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}", "preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}", "preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
"preset-rk-h264": "-c:v h264_rkmpp_decoder", "preset-rk-h264": "-hwaccel rkmpp -hwaccel_output_format drm_prime",
"preset-rk-h265": "-c:v hevc_rkmpp_decoder", "preset-rk-h265": "-hwaccel rkmpp -hwaccel_output_format drm_prime",
} }
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[ PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA FFMPEG_HWACCEL_NVIDIA
@ -91,8 +91,8 @@ PRESETS_HW_ACCEL_SCALE = {
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-jetson-h264": "-r {0}", # scaled in decoder "preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder "preset-jetson-h265": "-r {0}", # scaled in decoder
"preset-rk-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rk-h264": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
"preset-rk-h265": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rk-h265": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
"default": "-r {0} -vf fps={0},scale={1}:{2}", "default": "-r {0} -vf fps={0},scale={1}:{2}",
} }
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[ PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[
@ -111,16 +111,16 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}", FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}", "preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp -profile:v high {1}",
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}", "preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp -profile:v high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}", "default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}",
} }
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h264"] = (
"preset-nvidia-h264" PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
] = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA] )
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h265"] = (
"preset-nvidia-h265" PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
] = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA] )
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = { PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}", "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}",
@ -132,13 +132,13 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}", "preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}", "preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}",
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}", "preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp -profile:v high {1}",
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}", "preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp -profile:v high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}", "default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}",
} }
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE["preset-nvidia-h264"] = (
"preset-nvidia-h264" PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[FFMPEG_HWACCEL_NVIDIA]
] = PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[FFMPEG_HWACCEL_NVIDIA] )
# encoding of previews is only done on CPU due to comparable encode times and better quality from libx264 # encoding of previews is only done on CPU due to comparable encode times and better quality from libx264
PRESETS_HW_ACCEL_ENCODE_PREVIEW = { PRESETS_HW_ACCEL_ENCODE_PREVIEW = {
@ -175,7 +175,7 @@ def parse_preset_hardware_acceleration_scale(
if not isinstance(arg, str) or " " in arg: if not isinstance(arg, str) or " " in arg:
scale = PRESETS_HW_ACCEL_SCALE["default"] scale = PRESETS_HW_ACCEL_SCALE["default"]
else: else:
scale = PRESETS_HW_ACCEL_SCALE.get(arg, "") scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
scale = scale.format(fps, width, height).split(" ") scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args) scale.extend(detect_args)
@ -461,9 +461,18 @@ PRESETS_RECORD_OUTPUT = {
} }
def parse_preset_output_record(arg: Any) -> list[str]: def parse_preset_output_record(arg: Any, force_record_hvc1: bool) -> list[str]:
"""Return the correct preset if in preset format otherwise return None.""" """Return the correct preset if in preset format otherwise return None."""
if not isinstance(arg, str): if not isinstance(arg, str):
return None return None
return PRESETS_RECORD_OUTPUT.get(arg, None) preset = PRESETS_RECORD_OUTPUT.get(arg, None)
if not preset:
return None
if force_record_hvc1:
# Apple only supports HEVC if it is hvc1 (vs. hev1)
preset += ["-tag:v", "hvc1"]
return preset

View File

@ -24,11 +24,11 @@ from flask import (
Flask, Flask,
Response, Response,
current_app, current_app,
escape,
jsonify, jsonify,
make_response, make_response,
request, request,
) )
from markupsafe import escape
from peewee import DoesNotExist, fn, operator from peewee import DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
@ -45,12 +45,12 @@ from frigate.const import (
RECORD_DIR, RECORD_DIR,
) )
from frigate.events.external import ExternalEventProcessor from frigate.events.external import ExternalEventProcessor
from frigate.models import Event, Previews, Recordings, Regions, Timeline from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment, Timeline
from frigate.object_processing import TrackedObject from frigate.object_processing import TrackedObject
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.record.export import PlaybackFactorEnum, RecordingExporter
from frigate.stats import stats_snapshot from frigate.stats.emitter import StatsEmitter
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.util.builtin import ( from frigate.util.builtin import (
clean_camera_user_pass, clean_camera_user_pass,
@ -70,12 +70,12 @@ bp = Blueprint("frigate", __name__)
def create_app( def create_app(
frigate_config, frigate_config,
database: SqliteQueueDatabase, database: SqliteQueueDatabase,
stats_tracking,
detected_frames_processor, detected_frames_processor,
storage_maintainer: StorageMaintainer, storage_maintainer: StorageMaintainer,
onvif: OnvifController, onvif: OnvifController,
external_processor: ExternalEventProcessor, external_processor: ExternalEventProcessor,
plus_api: PlusApi, plus_api: PlusApi,
stats_emitter: StatsEmitter,
): ):
app = Flask(__name__) app = Flask(__name__)
@ -97,14 +97,13 @@ def create_app(
database.close() database.close()
app.frigate_config = frigate_config app.frigate_config = frigate_config
app.stats_tracking = stats_tracking
app.detected_frames_processor = detected_frames_processor app.detected_frames_processor = detected_frames_processor
app.storage_maintainer = storage_maintainer app.storage_maintainer = storage_maintainer
app.onvif = onvif app.onvif = onvif
app.external_processor = external_processor app.external_processor = external_processor
app.plus_api = plus_api app.plus_api = plus_api
app.camera_error_image = None app.camera_error_image = None
app.hwaccel_errors = [] app.stats_emitter = stats_emitter
app.register_blueprint(bp) app.register_blueprint(bp)
@ -277,6 +276,13 @@ def send_to_plus(id):
box, box,
event.label, event.label,
) )
except ValueError:
message = "Error uploading annotation, unsupported label provided."
logger.error(message)
return make_response(
jsonify({"success": False, "message": message}),
400,
)
except Exception as ex: except Exception as ex:
logger.exception(ex) logger.exception(ex)
return make_response( return make_response(
@ -348,6 +354,13 @@ def false_positive(id):
event.model_type, event.model_type,
event.detector_type, event.detector_type,
) )
except ValueError:
message = "Error uploading false positive, unsupported label provided."
logger.error(message)
return make_response(
jsonify({"success": False, "message": message}),
400,
)
except Exception as ex: except Exception as ex:
logger.exception(ex) logger.exception(ex)
return make_response( return make_response(
@ -593,6 +606,22 @@ def event_thumbnail(id, max_cache_age=2592000):
return response return response
@bp.route("/events/<id>/preview.gif")
def event_preview(id: str):
try:
event: Event = Event.get(Event.id == id)
except DoesNotExist:
return make_response(
jsonify({"success": False, "message": "Event not found"}), 404
)
start_ts = event.start_time
end_ts = start_ts + (
min(event.end_time - event.start_time, 20) if event.end_time else 20
)
return preview_gif(event.camera, start_ts, end_ts)
@bp.route("/timeline") @bp.route("/timeline")
def timeline(): def timeline():
camera = request.args.get("camera", "all") camera = request.args.get("camera", "all")
@ -904,9 +933,9 @@ def event_snapshot(id):
else: else:
response.headers["Cache-Control"] = "no-store" response.headers["Cache-Control"] = "no-store"
if download: if download:
response.headers[ response.headers["Content-Disposition"] = (
"Content-Disposition" f"attachment; filename=snapshot-{id}.jpg"
] = f"attachment; filename=snapshot-{id}.jpg" )
return response return response
@ -1093,9 +1122,9 @@ def event_clip(id):
if download: if download:
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
response.headers["Content-Length"] = os.path.getsize(clip_path) response.headers["Content-Length"] = os.path.getsize(clip_path)
response.headers[ response.headers["X-Accel-Redirect"] = (
"X-Accel-Redirect" f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
] = f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers )
return response return response
@ -1371,7 +1400,7 @@ def end_event(event_id):
@bp.route("/config") @bp.route("/config")
def config(): def config():
config = current_app.frigate_config.dict() config = current_app.frigate_config.model_dump(mode="json", exclude_none=True)
# remove the mqtt password # remove the mqtt password
config["mqtt"].pop("password", None) config["mqtt"].pop("password", None)
@ -1391,9 +1420,9 @@ def config():
config["plus"] = {"enabled": current_app.plus_api.is_active()} config["plus"] = {"enabled": current_app.plus_api.is_active()}
for detector, detector_config in config["detectors"].items(): for detector, detector_config in config["detectors"].items():
detector_config["model"][ detector_config["model"]["labelmap"] = (
"labelmap" current_app.frigate_config.model.merged_labelmap
] = current_app.frigate_config.model.merged_labelmap )
return jsonify(config) return jsonify(config)
@ -1587,12 +1616,12 @@ def version():
@bp.route("/stats") @bp.route("/stats")
def stats(): def stats():
stats = stats_snapshot( return jsonify(current_app.stats_emitter.get_latest_stats())
current_app.frigate_config,
current_app.stats_tracking,
current_app.hwaccel_errors, @bp.route("/stats/history")
) def stats_history():
return jsonify(stats) return jsonify(current_app.stats_emitter.get_stats_history())
@bp.route("/<camera_name>") @bp.route("/<camera_name>")
@ -1789,20 +1818,18 @@ def get_snapshot_from_recording(camera_name: str, frame_time: str):
@bp.route("/recordings/storage", methods=["GET"]) @bp.route("/recordings/storage", methods=["GET"])
def get_recordings_storage_usage(): def get_recordings_storage_usage():
recording_stats = stats_snapshot( recording_stats = current_app.stats_emitter.get_latest_stats()["service"][
current_app.frigate_config, "storage"
current_app.stats_tracking, ][RECORD_DIR]
current_app.hwaccel_errors,
)["service"]["storage"][RECORD_DIR]
if not recording_stats: if not recording_stats:
return jsonify({}) return jsonify({})
total_mb = recording_stats["total"] total_mb = recording_stats["total"]
camera_usages: dict[ camera_usages: dict[str, dict] = (
str, dict current_app.storage_maintainer.calculate_camera_usages()
] = current_app.storage_maintainer.calculate_camera_usages() )
for camera_name in camera_usages.keys(): for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"): if camera_usages.get(camera_name, {}).get("usage"):
@ -1990,9 +2017,9 @@ def recording_clip(camera_name, start_ts, end_ts):
if download: if download:
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
response.headers["Content-Length"] = os.path.getsize(path) response.headers["Content-Length"] = os.path.getsize(path)
response.headers[ response.headers["X-Accel-Redirect"] = (
"X-Accel-Redirect" f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
] = f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers )
return response return response
@ -2148,32 +2175,13 @@ def preview_hour(year_month, day, hour, camera_name, tz_name):
return preview_ts(camera_name, start_ts, end_ts) return preview_ts(camera_name, start_ts, end_ts)
@bp.route("/preview/<camera_name>/<frame_time>/thumbnail.jpg") @bp.route("/preview/<file_name>/thumbnail.jpg")
def preview_thumbnail(camera_name, frame_time): def preview_thumbnail(file_name: str):
"""Get a thumbnail from the cached preview jpgs.""" """Get a thumbnail from the cached preview jpgs."""
safe_file_name_current = secure_filename(file_name)
preview_dir = os.path.join(CACHE_DIR, "preview_frames") preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera_name}"
file_check = f"{file_start}-{frame_time}.jpg"
selected_preview = None
for file in os.listdir(preview_dir): with open(os.path.join(preview_dir, safe_file_name_current), "rb") as image_file:
if file.startswith(file_start):
if file < file_check:
selected_preview = file
break
if selected_preview is None:
return make_response(
jsonify(
{
"success": False,
"message": "Could not find valid preview jpg.",
}
),
404,
)
with open(os.path.join(preview_dir, selected_preview), "rb") as image_file:
jpg_bytes = image_file.read() jpg_bytes = image_file.read()
response = make_response(jpg_bytes) response = make_response(jpg_bytes)
@ -2182,6 +2190,172 @@ def preview_thumbnail(camera_name, frame_time):
return response return response
@bp.route("/preview/<camera_name>/start/<int:start_ts>/end/<int:end_ts>/frames")
@bp.route("/preview/<camera_name>/start/<float:start_ts>/end/<float:end_ts>/frames")
def get_preview_frames_from_cache(camera_name: str, start_ts, end_ts):
"""Get list of cached preview frames"""
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera_name}"
start_file = f"{file_start}-{start_ts}.jpg"
end_file = f"{file_start}-{end_ts}.jpg"
selected_previews = []
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
if file < start_file:
continue
if file > end_file:
break
selected_previews.append(file)
return jsonify(selected_previews)
@bp.route("/<camera_name>/start/<int:start_ts>/end/<int:end_ts>/preview.gif")
@bp.route("/<camera_name>/start/<float:start_ts>/end/<float:end_ts>/preview.gif")
def preview_gif(camera_name: str, start_ts, end_ts, max_cache_age=2592000):
if datetime.fromtimestamp(start_ts) < datetime.now().replace(minute=0, second=0):
# has preview mp4
preview: Previews = (
Previews.select(
Previews.camera,
Previews.path,
Previews.duration,
Previews.start_time,
Previews.end_time,
)
.where(
Previews.start_time.between(start_ts, end_ts)
| Previews.end_time.between(start_ts, end_ts)
| ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
)
.where(Previews.camera == camera_name)
.limit(1)
.get()
)
if not preview:
return make_response(
jsonify({"success": False, "message": "Preview not found"}), 404
)
diff = start_ts - preview.start_time
minutes = int(diff / 60)
seconds = int(diff % 60)
ffmpeg_cmd = [
"ffmpeg",
"-hide_banner",
"-loglevel",
"warning",
"-ss",
f"00:{minutes}:{seconds}",
"-t",
f"{end_ts - start_ts}",
"-i",
preview.path,
"-r",
"8",
"-vf",
"setpts=0.12*PTS",
"-loop",
"0",
"-c:v",
"gif",
"-f",
"gif",
"-",
]
process = sp.run(
ffmpeg_cmd,
capture_output=True,
)
if process.returncode != 0:
logger.error(process.stderr)
return make_response(
jsonify({"success": False, "message": "Unable to create preview gif"}),
500,
)
gif_bytes = process.stdout
else:
# need to generate from existing images
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera_name}"
start_file = f"{file_start}-{start_ts}.jpg"
end_file = f"{file_start}-{end_ts}.jpg"
selected_previews = []
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
if file < start_file:
continue
if file > end_file:
break
selected_previews.append(f"file '{os.path.join(preview_dir, file)}'")
selected_previews.append("duration 0.12")
if not selected_previews:
return make_response(
jsonify({"success": False, "message": "Preview not found"}), 404
)
last_file = selected_previews[-2]
selected_previews.append(last_file)
ffmpeg_cmd = [
"ffmpeg",
"-hide_banner",
"-loglevel",
"warning",
"-f",
"concat",
"-y",
"-protocol_whitelist",
"pipe,file",
"-safe",
"0",
"-i",
"/dev/stdin",
"-loop",
"0",
"-c:v",
"gif",
"-f",
"gif",
"-",
]
process = sp.run(
ffmpeg_cmd,
input=str.encode("\n".join(selected_previews)),
capture_output=True,
)
if process.returncode != 0:
logger.error(process.stderr)
return make_response(
jsonify({"success": False, "message": "Unable to create preview gif"}),
500,
)
gif_bytes = process.stdout
response = make_response(gif_bytes)
response.headers["Content-Type"] = "image/gif"
response.headers["Cache-Control"] = f"private, max-age={max_cache_age}"
return response
@bp.route("/vod/event/<id>") @bp.route("/vod/event/<id>")
def vod_event(id): def vod_event(id):
try: try:
@ -2238,6 +2412,138 @@ def vod_event(id):
) )
@bp.route("/review")
def review():
cameras = request.args.get("cameras", "all")
labels = request.args.get("labels", "all")
reviewed = request.args.get("reviewed", type=int, default=0)
limit = request.args.get("limit", 100)
severity = request.args.get("severity", None)
before = request.args.get("before", type=float, default=datetime.now().timestamp())
after = request.args.get(
"after", type=float, default=(datetime.now() - timedelta(hours=18)).timestamp()
)
clauses = [((ReviewSegment.start_time > after) & (ReviewSegment.end_time < before))]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((ReviewSegment.camera << camera_list))
if labels != "all":
# use matching so segments with multiple labels
# still match on a search where any label matches
label_clauses = []
filtered_labels = labels.split(",")
for label in filtered_labels:
label_clauses.append(
(ReviewSegment.data["objects"].cast("text") % f'*"{label}"*')
)
label_clause = reduce(operator.or_, label_clauses)
clauses.append((label_clause))
if reviewed == 0:
clauses.append((ReviewSegment.has_been_reviewed == False))
if severity:
clauses.append((ReviewSegment.severity == severity))
review = (
ReviewSegment.select()
.where(reduce(operator.and_, clauses))
.order_by(ReviewSegment.severity.asc())
.order_by(ReviewSegment.start_time.desc())
.limit(limit)
.dicts()
)
return jsonify([r for r in review])
@bp.route("/review/<id>/viewed", methods=("POST",))
def set_reviewed(id):
try:
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == id)
except DoesNotExist:
return make_response(
jsonify({"success": False, "message": "Review " + id + " not found"}), 404
)
review.has_been_reviewed = True
review.save()
return make_response(
jsonify({"success": True, "message": "Reviewed " + id + " viewed"}), 200
)
@bp.route("/reviews/<ids>/viewed", methods=("POST",))
def set_multiple_reviewed(ids: str):
list_of_ids = ids.split(",")
if not list_of_ids or len(list_of_ids) == 0:
return make_response(
jsonify({"success": False, "message": "Not a valid list of ids"}), 404
)
ReviewSegment.update(has_been_reviewed=True).where(
ReviewSegment.id << list_of_ids
).execute()
return make_response(
jsonify({"success": True, "message": "Reviewed multiple items"}), 200
)
@bp.route("/review/<id>/viewed", methods=("DELETE",))
def set_not_reviewed(id):
try:
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == id)
except DoesNotExist:
return make_response(
jsonify({"success": False, "message": "Review " + id + " not found"}), 404
)
review.has_been_reviewed = False
review.save()
return make_response(
jsonify({"success": True, "message": "Reviewed " + id + " not viewed"}), 200
)
@bp.route("/reviews/<ids>", methods=("DELETE",))
def delete_reviews(ids: str):
list_of_ids = ids.split(",")
if not list_of_ids or len(list_of_ids) == 0:
return make_response(
jsonify({"success": False, "message": "Not a valid list of ids"}), 404
)
ReviewSegment.delete().where(ReviewSegment.id << list_of_ids).execute()
return make_response(jsonify({"success": True, "message": "Delete reviews"}), 200)
@bp.route("/review/<id>/preview.gif")
def review_preview(id: str):
try:
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == id)
except DoesNotExist:
return make_response(
jsonify({"success": False, "message": "Review segment not found"}), 404
)
padding = 8
start_ts = review.start_time - padding
end_ts = review.end_time + padding
return preview_gif(review.camera, start_ts, end_ts)
@bp.route( @bp.route(
"/export/<camera_name>/start/<int:start_time>/end/<int:end_time>", methods=["POST"] "/export/<camera_name>/start/<int:start_time>/end/<int:end_time>", methods=["POST"]
) )
@ -2281,9 +2587,11 @@ def export_recording(camera_name: str, start_time, end_time):
camera_name, camera_name,
int(start_time), int(start_time),
int(end_time), int(end_time),
PlaybackFactorEnum[playback_factor] (
if playback_factor in PlaybackFactorEnum.__members__.values() PlaybackFactorEnum[playback_factor]
else PlaybackFactorEnum.realtime, if playback_factor in PlaybackFactorEnum.__members__.values()
else PlaybackFactorEnum.realtime
),
) )
exporter.start() exporter.start()
return make_response( return make_response(
@ -2439,12 +2747,16 @@ def ffprobe():
output.append( output.append(
{ {
"return_code": ffprobe.returncode, "return_code": ffprobe.returncode,
"stderr": ffprobe.stderr.decode("unicode_escape").strip() "stderr": (
if ffprobe.returncode != 0 ffprobe.stderr.decode("unicode_escape").strip()
else "", if ffprobe.returncode != 0
"stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip()) else ""
if ffprobe.returncode == 0 ),
else "", "stdout": (
json.loads(ffprobe.stdout.decode("unicode_escape").strip())
if ffprobe.returncode == 0
else ""
),
} }
) )
@ -2457,12 +2769,16 @@ def vainfo():
return jsonify( return jsonify(
{ {
"return_code": vainfo.returncode, "return_code": vainfo.returncode,
"stderr": vainfo.stderr.decode("unicode_escape").strip() "stderr": (
if vainfo.returncode != 0 vainfo.stderr.decode("unicode_escape").strip()
else "", if vainfo.returncode != 0
"stdout": vainfo.stdout.decode("unicode_escape").strip() else ""
if vainfo.returncode == 0 ),
else "", "stdout": (
vainfo.stdout.decode("unicode_escape").strip()
if vainfo.returncode == 0
else ""
),
} }
) )

View File

@ -76,6 +76,17 @@ class Recordings(Model): # type: ignore[misc]
segment_size = FloatField(default=0) # this should be stored as MB segment_size = FloatField(default=0) # this should be stored as MB
class ReviewSegment(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
has_been_reviewed = BooleanField(default=False)
severity = CharField(max_length=30) # alert, detection, significant_motion
thumb_path = CharField(unique=True)
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
class Previews(Model): # type: ignore[misc] class Previews(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=True, max_length=30) id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20) camera = CharField(index=True, max_length=20)

View File

@ -24,3 +24,7 @@ class MotionDetector(ABC):
@abstractmethod @abstractmethod
def is_calibrating(self): def is_calibrating(self):
pass pass
@abstractmethod
def stop(self):
pass

View File

@ -5,6 +5,7 @@ import imutils
import numpy as np import numpy as np
from scipy.ndimage import gaussian_filter from scipy.ndimage import gaussian_filter
from frigate.comms.config_updater import ConfigSubscriber
from frigate.config import MotionConfig from frigate.config import MotionConfig
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
@ -17,9 +18,6 @@ class ImprovedMotionDetector(MotionDetector):
frame_shape, frame_shape,
config: MotionConfig, config: MotionConfig,
fps: int, fps: int,
improve_contrast,
threshold,
contour_area,
name="improved", name="improved",
blur_radius=1, blur_radius=1,
interpolation=cv2.INTER_NEAREST, interpolation=cv2.INTER_NEAREST,
@ -44,14 +42,12 @@ class ImprovedMotionDetector(MotionDetector):
self.mask = np.where(resized_mask == [0]) self.mask = np.where(resized_mask == [0])
self.save_images = False self.save_images = False
self.calibrating = True self.calibrating = True
self.improve_contrast = improve_contrast
self.threshold = threshold
self.contour_area = contour_area
self.blur_radius = blur_radius self.blur_radius = blur_radius
self.interpolation = interpolation self.interpolation = interpolation
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8) self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
self.contrast_values[:, 1:2] = 255 self.contrast_values[:, 1:2] = 255
self.contrast_values_index = 0 self.contrast_values_index = 0
self.config_subscriber = ConfigSubscriber(f"config/motion/{name}")
def is_calibrating(self): def is_calibrating(self):
return self.calibrating return self.calibrating
@ -59,6 +55,15 @@ class ImprovedMotionDetector(MotionDetector):
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []
# check for updated motion config
_, updated_motion_config = self.config_subscriber.check_for_update()
if updated_motion_config:
self.config = updated_motion_config
if not self.config.enabled:
return motion_boxes
gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
# resize frame # resize frame
@ -72,7 +77,7 @@ class ImprovedMotionDetector(MotionDetector):
resized_saved = resized_frame.copy() resized_saved = resized_frame.copy()
# Improve contrast # Improve contrast
if self.improve_contrast.value: if self.config.improve_contrast:
# TODO tracking moving average of min/max to avoid sudden contrast changes # TODO tracking moving average of min/max to avoid sudden contrast changes
minval = np.percentile(resized_frame, 4).astype(np.uint8) minval = np.percentile(resized_frame, 4).astype(np.uint8)
maxval = np.percentile(resized_frame, 96).astype(np.uint8) maxval = np.percentile(resized_frame, 96).astype(np.uint8)
@ -96,7 +101,8 @@ class ImprovedMotionDetector(MotionDetector):
# mask frame # mask frame
# this has to come after contrast improvement # this has to come after contrast improvement
resized_frame[self.mask] = [255] # Setting masked pixels to zero, to match the average frame at startup
resized_frame[self.mask] = [0]
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius) resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
@ -110,7 +116,7 @@ class ImprovedMotionDetector(MotionDetector):
# compute the threshold image for the current frame # compute the threshold image for the current frame
thresh = cv2.threshold( thresh = cv2.threshold(
frameDelta, self.threshold.value, 255, cv2.THRESH_BINARY frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
)[1] )[1]
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
@ -127,7 +133,7 @@ class ImprovedMotionDetector(MotionDetector):
# if the contour is big enough, count it as motion # if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
total_contour_area += contour_area total_contour_area += contour_area
if contour_area > self.contour_area.value: if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c) x, y, w, h = cv2.boundingRect(c)
motion_boxes.append( motion_boxes.append(
( (
@ -170,9 +176,11 @@ class ImprovedMotionDetector(MotionDetector):
] ]
cv2.imwrite( cv2.imwrite(
f"debug/frames/{self.name}-{self.frame_counter}.jpg", f"debug/frames/{self.name}-{self.frame_counter}.jpg",
cv2.hconcat(frames) (
if self.frame_shape[0] > self.frame_shape[1] cv2.hconcat(frames)
else cv2.vconcat(frames), if self.frame_shape[0] > self.frame_shape[1]
else cv2.vconcat(frames)
),
) )
if len(motion_boxes) > 0: if len(motion_boxes) > 0:
@ -194,3 +202,7 @@ class ImprovedMotionDetector(MotionDetector):
self.motion_frame_count = 0 self.motion_frame_count = 0
return motion_boxes return motion_boxes
def stop(self) -> None:
"""stop the motion detector."""
self.config_subscriber.stop()

View File

@ -12,6 +12,7 @@ from typing import Callable
import cv2 import cv2
import numpy as np import numpy as np
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.dispatcher import Dispatcher from frigate.comms.dispatcher import Dispatcher
from frigate.config import ( from frigate.config import (
CameraConfig, CameraConfig,
@ -488,8 +489,12 @@ class CameraState:
# draw the bounding boxes on the frame # draw the bounding boxes on the frame
for obj in tracked_objects.values(): for obj in tracked_objects.values():
if obj["frame_time"] == frame_time: if obj["frame_time"] == frame_time:
thickness = 2 if obj["stationary"]:
color = self.config.model.colormap[obj["label"]] color = (220, 220, 220)
thickness = 1
else:
thickness = 2
color = self.config.model.colormap[obj["label"]]
else: else:
thickness = 1 thickness = 1
color = (255, 0, 0) color = (255, 0, 0)
@ -813,8 +818,6 @@ class TrackedObjectProcessor(threading.Thread):
tracked_objects_queue, tracked_objects_queue,
event_queue, event_queue,
event_processed_queue, event_processed_queue,
video_output_queue,
recordings_info_queue,
ptz_autotracker_thread, ptz_autotracker_thread,
stop_event, stop_event,
): ):
@ -825,13 +828,12 @@ class TrackedObjectProcessor(threading.Thread):
self.tracked_objects_queue = tracked_objects_queue self.tracked_objects_queue = tracked_objects_queue
self.event_queue = event_queue self.event_queue = event_queue
self.event_processed_queue = event_processed_queue self.event_processed_queue = event_processed_queue
self.video_output_queue = video_output_queue
self.recordings_info_queue = recordings_info_queue
self.stop_event = stop_event self.stop_event = stop_event
self.camera_states: dict[str, CameraState] = {} self.camera_states: dict[str, CameraState] = {}
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.last_motion_detected: dict[str, float] = {} self.last_motion_detected: dict[str, float] = {}
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video)
def start(camera, obj: TrackedObject, current_frame_time): def start(camera, obj: TrackedObject, current_frame_time):
self.event_queue.put( self.event_queue.put(
@ -1116,18 +1118,8 @@ class TrackedObjectProcessor(threading.Thread):
o.to_dict() for o in camera_state.tracked_objects.values() o.to_dict() for o in camera_state.tracked_objects.values()
] ]
self.video_output_queue.put( # publish info on this frame
( self.detection_publisher.send_data(
camera,
frame_time,
tracked_objects,
motion_boxes,
regions,
)
)
# send info on this frame to the recordings maintainer
self.recordings_info_queue.put(
( (
camera, camera,
frame_time, frame_time,
@ -1212,4 +1204,5 @@ class TrackedObjectProcessor(threading.Thread):
event_id, camera = self.event_processed_queue.get() event_id, camera = self.event_processed_queue.get()
self.camera_states[camera].finished(event_id) self.camera_states[camera].finished(event_id)
self.detection_publisher.stop()
logger.info("Exiting object processor...") logger.info("Exiting object processor...")

View File

@ -14,9 +14,9 @@ import traceback
import cv2 import cv2
import numpy as np import numpy as np
from frigate.comms.config_updater import ConfigSubscriber
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.types import CameraMetricsTypes
from frigate.util.image import ( from frigate.util.image import (
SharedMemoryFrameManager, SharedMemoryFrameManager,
copy_yuv_to_position, copy_yuv_to_position,
@ -33,11 +33,13 @@ def get_standard_aspect_ratio(width: int, height: int) -> tuple[int, int]:
(16, 9), (16, 9),
(9, 16), (9, 16),
(20, 10), (20, 10),
(16, 3), # max wide camera
(16, 6), # reolink duo 2 (16, 6), # reolink duo 2
(32, 9), # panoramic cameras (32, 9), # panoramic cameras
(12, 9), (12, 9),
(9, 12), (9, 12),
(22, 15), # Amcrest, NTSC DVT (22, 15), # Amcrest, NTSC DVT
(1, 1), # fisheye
] # aspects are scaled to have common relative size ] # aspects are scaled to have common relative size
known_aspects_ratios = list( known_aspects_ratios = list(
map(lambda aspect: aspect[0] / aspect[1], known_aspects) map(lambda aspect: aspect[0] / aspect[1], known_aspects)
@ -66,7 +68,13 @@ def get_canvas_shape(width: int, height: int) -> tuple[int, int]:
class Canvas: class Canvas:
def __init__(self, canvas_width: int, canvas_height: int) -> None: def __init__(
self,
canvas_width: int,
canvas_height: int,
scaling_factor: int,
) -> None:
self.scaling_factor = scaling_factor
gcd = math.gcd(canvas_width, canvas_height) gcd = math.gcd(canvas_width, canvas_height)
self.aspect = get_standard_aspect_ratio( self.aspect = get_standard_aspect_ratio(
(canvas_width / gcd), (canvas_height / gcd) (canvas_width / gcd), (canvas_height / gcd)
@ -80,7 +88,7 @@ class Canvas:
return (self.aspect[0] * coefficient, self.aspect[1] * coefficient) return (self.aspect[0] * coefficient, self.aspect[1] * coefficient)
def get_coefficient(self, camera_count: int) -> int: def get_coefficient(self, camera_count: int) -> int:
return self.coefficient_cache.get(camera_count, 2) return self.coefficient_cache.get(camera_count, self.scaling_factor)
def set_coefficient(self, camera_count: int, coefficient: int) -> None: def set_coefficient(self, camera_count: int, coefficient: int) -> None:
self.coefficient_cache[camera_count] = coefficient self.coefficient_cache[camera_count] = coefficient
@ -259,7 +267,6 @@ class BirdsEyeFrameManager:
config: FrigateConfig, config: FrigateConfig,
frame_manager: SharedMemoryFrameManager, frame_manager: SharedMemoryFrameManager,
stop_event: mp.Event, stop_event: mp.Event,
camera_metrics: dict[str, CameraMetricsTypes],
): ):
self.config = config self.config = config
self.mode = config.birdseye.mode self.mode = config.birdseye.mode
@ -268,9 +275,12 @@ class BirdsEyeFrameManager:
self.frame_shape = (height, width) self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width) self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
self.canvas = Canvas(width, height) self.canvas = Canvas(width, height, config.birdseye.layout.scaling_factor)
self.stop_event = stop_event self.stop_event = stop_event
self.camera_metrics = camera_metrics self.inactivity_threshold = config.birdseye.inactivity_threshold
if config.birdseye.layout.max_cameras:
self.last_refresh_time = 0
# initialize the frame as black and with the Frigate logo # initialize the frame as black and with the Frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8) self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
@ -376,16 +386,39 @@ class BirdsEyeFrameManager:
def update_frame(self): def update_frame(self):
"""Update to a new frame for birdseye.""" """Update to a new frame for birdseye."""
# determine how many cameras are tracking objects within the last 30 seconds # determine how many cameras are tracking objects within the last inactivity_threshold seconds
active_cameras = set( active_cameras: set[str] = set(
[ [
cam cam
for cam, cam_data in self.cameras.items() for cam, cam_data in self.cameras.items()
if cam_data["last_active_frame"] > 0 if cam_data["last_active_frame"] > 0
and cam_data["current_frame"] - cam_data["last_active_frame"] < 30 and cam_data["current_frame"] - cam_data["last_active_frame"]
< self.inactivity_threshold
] ]
) )
max_cameras = self.config.birdseye.layout.max_cameras
max_camera_refresh = False
if max_cameras:
now = datetime.datetime.now().timestamp()
if len(active_cameras) == max_cameras and now - self.last_refresh_time < 10:
# don't refresh cameras too often
active_cameras = self.active_cameras
else:
limited_active_cameras = sorted(
active_cameras,
key=lambda active_camera: (
self.cameras[active_camera]["current_frame"]
- self.cameras[active_camera]["last_active_frame"]
),
)
active_cameras = limited_active_cameras[
: self.config.birdseye.layout.max_cameras
]
max_camera_refresh = True
self.last_refresh_time = now
# if there are no active cameras # if there are no active cameras
if len(active_cameras) == 0: if len(active_cameras) == 0:
# if the layout is already cleared # if the layout is already cleared
@ -399,7 +432,15 @@ class BirdsEyeFrameManager:
return True return True
# check if we need to reset the layout because there is a different number of cameras # check if we need to reset the layout because there is a different number of cameras
reset_layout = len(self.active_cameras) - len(active_cameras) != 0 if len(self.active_cameras) - len(active_cameras) == 0:
if len(self.active_cameras) == 1 and self.active_cameras != active_cameras:
reset_layout = True
elif max_camera_refresh:
reset_layout = True
else:
reset_layout = False
else:
reset_layout = True
# reset the layout if it needs to be different # reset the layout if it needs to be different
if reset_layout: if reset_layout:
@ -423,17 +464,23 @@ class BirdsEyeFrameManager:
camera = active_cameras_to_add[0] camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy() camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1]) scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
coefficient = (
1 # center camera view in canvas and ensure that it fits
if scaled_width <= self.canvas.width if scaled_width < self.canvas.width:
else self.canvas.width / scaled_width coefficient = 1
) x_offset = int((self.canvas.width - scaled_width) / 2)
else:
coefficient = self.canvas.width / scaled_width
x_offset = int(
(self.canvas.width - (scaled_width * coefficient)) / 2
)
self.camera_layout = [ self.camera_layout = [
[ [
( (
camera, camera,
( (
0, x_offset,
0, 0,
int(scaled_width * coefficient), int(scaled_width * coefficient),
int(self.canvas.height * coefficient), int(self.canvas.height * coefficient),
@ -477,7 +524,11 @@ class BirdsEyeFrameManager:
return True return True
def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]: def calculate_layout(
self,
cameras_to_add: list[str],
coefficient: float,
) -> tuple[any]:
"""Calculate the optimal layout for 2+ cameras.""" """Calculate the optimal layout for 2+ cameras."""
def map_layout(camera_layout: list[list[any]], row_height: int): def map_layout(camera_layout: list[list[any]], row_height: int):
@ -619,15 +670,12 @@ class BirdsEyeFrameManager:
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool: def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
# don't process if birdseye is disabled for this camera # don't process if birdseye is disabled for this camera
camera_config = self.config.cameras[camera].birdseye camera_config = self.config.cameras[camera].birdseye
if not camera_config.enabled: if not camera_config.enabled:
return False return False
# get our metrics (sync'd across processes)
# which allows us to control it via mqtt (or any other dispatcher)
camera_metrics = self.camera_metrics[camera]
# disabling birdseye is a little tricky # disabling birdseye is a little tricky
if not camera_metrics["birdseye_enabled"].value: if not camera_config.enabled:
# if we've rendered a frame (we have a value for last_active_frame) # if we've rendered a frame (we have a value for last_active_frame)
# then we need to set it to zero # then we need to set it to zero
if self.cameras[camera]["last_active_frame"] > 0: if self.cameras[camera]["last_active_frame"] > 0:
@ -635,12 +683,9 @@ class BirdsEyeFrameManager:
return False return False
# get the birdseye mode state from camera metrics
birdseye_mode = BirdseyeModeEnum.get(camera_metrics["birdseye_mode"].value)
# update the last active frame for the camera # update the last active frame for the camera
self.cameras[camera]["current_frame"] = frame_time self.cameras[camera]["current_frame"] = frame_time
if self.camera_active(birdseye_mode, object_count, motion_count): if self.camera_active(camera_config.mode, object_count, motion_count):
self.cameras[camera]["last_active_frame"] = frame_time self.cameras[camera]["last_active_frame"] = frame_time
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
@ -669,7 +714,6 @@ class Birdseye:
self, self,
config: FrigateConfig, config: FrigateConfig,
frame_manager: SharedMemoryFrameManager, frame_manager: SharedMemoryFrameManager,
camera_metrics: dict[str, CameraMetricsTypes],
stop_event: mp.Event, stop_event: mp.Event,
websocket_server, websocket_server,
) -> None: ) -> None:
@ -689,9 +733,8 @@ class Birdseye:
self.broadcaster = BroadcastThread( self.broadcaster = BroadcastThread(
"birdseye", self.converter, websocket_server, stop_event "birdseye", self.converter, websocket_server, stop_event
) )
self.birdseye_manager = BirdsEyeFrameManager( self.birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
config, frame_manager, stop_event, camera_metrics self.config_subscriber = ConfigSubscriber("config/birdseye/")
)
if config.birdseye.restream: if config.birdseye.restream:
self.birdseye_buffer = frame_manager.create( self.birdseye_buffer = frame_manager.create(
@ -710,6 +753,19 @@ class Birdseye:
frame_time: float, frame_time: float,
frame, frame,
) -> None: ) -> None:
# check if there is an updated config
while True:
(
updated_topic,
updated_birdseye_config,
) = self.config_subscriber.check_for_update()
if not updated_topic:
break
camera_name = updated_topic.rpartition("/")[-1]
self.config.cameras[camera_name].birdseye = updated_birdseye_config
if self.birdseye_manager.update( if self.birdseye_manager.update(
camera, camera,
len([o for o in current_tracked_objects if not o["stationary"]]), len([o for o in current_tracked_objects if not o["stationary"]]),
@ -729,5 +785,6 @@ class Birdseye:
pass pass
def stop(self) -> None: def stop(self) -> None:
self.config_subscriber.stop()
self.converter.join() self.converter.join()
self.broadcaster.join() self.broadcaster.join()

View File

@ -2,7 +2,6 @@
import logging import logging
import multiprocessing as mp import multiprocessing as mp
import queue
import signal import signal
import threading import threading
from typing import Optional from typing import Optional
@ -16,12 +15,12 @@ from ws4py.server.wsgirefserver import (
) )
from ws4py.server.wsgiutils import WebSocketWSGIApplication from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.ws import WebSocket from frigate.comms.ws import WebSocket
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.output.birdseye import Birdseye from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder from frigate.output.preview import PreviewRecorder
from frigate.types import CameraMetricsTypes
from frigate.util.image import SharedMemoryFrameManager from frigate.util.image import SharedMemoryFrameManager
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -29,9 +28,6 @@ logger = logging.getLogger(__name__)
def output_frames( def output_frames(
config: FrigateConfig, config: FrigateConfig,
video_output_queue: mp.Queue,
inter_process_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes],
): ):
threading.current_thread().name = "output" threading.current_thread().name = "output"
setproctitle("frigate.output") setproctitle("frigate.output")
@ -59,6 +55,8 @@ def output_frames(
websocket_server.initialize_websockets_manager() websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever) websocket_thread = threading.Thread(target=websocket_server.serve_forever)
detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video)
jsmpeg_cameras: dict[str, JsmpegCamera] = {} jsmpeg_cameras: dict[str, JsmpegCamera] = {}
birdseye: Optional[Birdseye] = None birdseye: Optional[Birdseye] = None
preview_recorders: dict[str, PreviewRecorder] = {} preview_recorders: dict[str, PreviewRecorder] = {}
@ -68,27 +66,27 @@ def output_frames(
continue continue
jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server) jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server)
preview_recorders[camera] = PreviewRecorder(cam_config, inter_process_queue) preview_recorders[camera] = PreviewRecorder(cam_config)
if config.birdseye.enabled: if config.birdseye.enabled:
birdseye = Birdseye( birdseye = Birdseye(config, frame_manager, stop_event, websocket_server)
config, frame_manager, camera_metrics, stop_event, websocket_server
)
websocket_thread.start() websocket_thread.start()
while not stop_event.is_set(): while not stop_event.is_set():
try: (topic, data) = detection_subscriber.get_data(timeout=10)
(
camera, if not topic:
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 1)
except queue.Empty:
continue continue
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = data
frame_id = f"{camera}{frame_time}" frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv) frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
@ -127,19 +125,26 @@ def output_frames(
previous_frames[camera] = frame_time previous_frames[camera] = frame_time
while not video_output_queue.empty(): while True:
(topic, data) = detection_subscriber.get_data(timeout=0)
if not topic:
break
( (
camera, camera,
frame_time, frame_time,
current_tracked_objects, current_tracked_objects,
motion_boxes, motion_boxes,
regions, regions,
) = video_output_queue.get(True, 10) ) = data
frame_id = f"{camera}{frame_time}" frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv) frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id) frame_manager.delete(frame_id)
detection_subscriber.stop()
for jsmpeg in jsmpeg_cameras.values(): for jsmpeg in jsmpeg_cameras.values():
jsmpeg.stop() jsmpeg.stop()

View File

@ -2,7 +2,6 @@
import datetime import datetime
import logging import logging
import multiprocessing as mp
import os import os
import shutil import shutil
import subprocess as sp import subprocess as sp
@ -12,6 +11,7 @@ from pathlib import Path
import cv2 import cv2
import numpy as np import numpy as np
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, RecordQualityEnum from frigate.config import CameraConfig, RecordQualityEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, INSERT_PREVIEW from frigate.const import CACHE_DIR, CLIPS_DIR, INSERT_PREVIEW
from frigate.ffmpeg_presets import ( from frigate.ffmpeg_presets import (
@ -20,21 +20,21 @@ from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_encode, parse_preset_hardware_acceleration_encode,
) )
from frigate.models import Previews from frigate.models import Previews
from frigate.object_processing import TrackedObject
from frigate.util.image import copy_yuv_to_position, get_yuv_crop from frigate.util.image import copy_yuv_to_position, get_yuv_crop
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
FOLDER_PREVIEW_FRAMES = "preview_frames" FOLDER_PREVIEW_FRAMES = "preview_frames"
PREVIEW_OUTPUT_FPS = 1
PREVIEW_SEGMENT_DURATION = 3600 # one hour PREVIEW_SEGMENT_DURATION = 3600 # one hour
# important to have lower keyframe to maintain scrubbing performance # important to have lower keyframe to maintain scrubbing performance
PREVIEW_KEYFRAME_INTERVAL = 60 PREVIEW_KEYFRAME_INTERVAL = 60
PREVIEW_BIT_RATES = { PREVIEW_BIT_RATES = {
RecordQualityEnum.very_low: 4096, RecordQualityEnum.very_low: 5120,
RecordQualityEnum.low: 6144, RecordQualityEnum.low: 7168,
RecordQualityEnum.medium: 8192, RecordQualityEnum.medium: 9216,
RecordQualityEnum.high: 12288, RecordQualityEnum.high: 13312,
RecordQualityEnum.very_high: 16384, RecordQualityEnum.very_high: 17408,
} }
@ -53,13 +53,13 @@ class FFMpegConverter(threading.Thread):
self, self,
config: CameraConfig, config: CameraConfig,
frame_times: list[float], frame_times: list[float],
inter_process_queue: mp.Queue, requestor: InterProcessRequestor,
): ):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = f"{config.name}_preview_converter" self.name = f"{config.name}_preview_converter"
self.config = config self.config = config
self.frame_times = frame_times self.frame_times = frame_times
self.inter_process_queue = inter_process_queue self.requestor = requestor
self.path = os.path.join( self.path = os.path.join(
CLIPS_DIR, CLIPS_DIR,
f"previews/{self.config.name}/{self.frame_times[0]}-{self.frame_times[-1]}.mp4", f"previews/{self.config.name}/{self.frame_times[0]}-{self.frame_times[-1]}.mp4",
@ -69,7 +69,7 @@ class FFMpegConverter(threading.Thread):
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode( self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
config.ffmpeg.hwaccel_args, config.ffmpeg.hwaccel_args,
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -i /dev/stdin", input="-f concat -y -protocol_whitelist pipe,file -safe 0 -i /dev/stdin",
output=f"-g {PREVIEW_KEYFRAME_INTERVAL} -fpsmax {PREVIEW_OUTPUT_FPS} -bf 0 -b:v {PREVIEW_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}", output=f"-g {PREVIEW_KEYFRAME_INTERVAL} -fpsmax 2 -bf 0 -b:v {PREVIEW_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
type=EncodeTypeEnum.preview, type=EncodeTypeEnum.preview,
) )
@ -105,18 +105,16 @@ class FFMpegConverter(threading.Thread):
if p.returncode == 0: if p.returncode == 0:
logger.debug("successfully saved preview") logger.debug("successfully saved preview")
self.inter_process_queue.put_nowait( self.requestor.send_data(
( INSERT_PREVIEW,
INSERT_PREVIEW, {
{ Previews.id: f"{self.config.name}_{end}",
Previews.id: f"{self.config.name}_{end}", Previews.camera: self.config.name,
Previews.camera: self.config.name, Previews.path: self.path,
Previews.path: self.path, Previews.start_time: start,
Previews.start_time: start, Previews.end_time: end,
Previews.end_time: end, Previews.duration: end - start,
Previews.duration: end - start, },
},
)
) )
else: else:
logger.error(f"Error saving preview for {self.config.name} :: {p.stderr}") logger.error(f"Error saving preview for {self.config.name} :: {p.stderr}")
@ -128,17 +126,19 @@ class FFMpegConverter(threading.Thread):
class PreviewRecorder: class PreviewRecorder:
def __init__(self, config: CameraConfig, inter_process_queue: mp.Queue) -> None: def __init__(self, config: CameraConfig) -> None:
self.config = config self.config = config
self.inter_process_queue = inter_process_queue
self.start_time = 0 self.start_time = 0
self.last_output_time = 0 self.last_output_time = 0
self.output_frames = [] self.output_frames = []
self.out_height = 160 self.out_height = 180
self.out_width = ( self.out_width = (
int((config.detect.width / config.detect.height) * self.out_height) // 4 * 4 int((config.detect.width / config.detect.height) * self.out_height) // 4 * 4
) )
# create communication for finished previews
self.requestor = InterProcessRequestor()
y, u1, u2, v1, v2 = get_yuv_crop( y, u1, u2, v1, v2 = get_yuv_crop(
self.config.frame_shape_yuv, self.config.frame_shape_yuv,
( (
@ -175,8 +175,19 @@ class PreviewRecorder:
frame_time: float, frame_time: float,
) -> bool: ) -> bool:
"""Decide if this frame should be added to PREVIEW.""" """Decide if this frame should be added to PREVIEW."""
preview_output_fps = (
2
if any(
o["label"] == "car"
for o in get_active_objects(
frame_time, self.config, current_tracked_objects
)
)
else 1
)
# limit output to 1 fps # limit output to 1 fps
if (frame_time - self.last_output_time) < 1 / PREVIEW_OUTPUT_FPS: if (frame_time - self.last_output_time) < 1 / preview_output_fps:
return False return False
# send frame if a non-stationary object is in a zone # send frame if a non-stationary object is in a zone
@ -237,7 +248,7 @@ class PreviewRecorder:
FFMpegConverter( FFMpegConverter(
self.config, self.config,
self.output_frames, self.output_frames,
self.inter_process_queue, self.requestor,
).start() ).start()
# reset frame cache # reset frame cache
@ -262,3 +273,19 @@ class PreviewRecorder:
shutil.rmtree(os.path.join(CACHE_DIR, FOLDER_PREVIEW_FRAMES)) shutil.rmtree(os.path.join(CACHE_DIR, FOLDER_PREVIEW_FRAMES))
except FileNotFoundError: except FileNotFoundError:
pass pass
self.requestor.stop()
def get_active_objects(
frame_time: float, camera_config: CameraConfig, all_objects: list[TrackedObject]
) -> list[TrackedObject]:
"""get active objects for detection."""
return [
o
for o in all_objects
if o["motionless_count"] < camera_config.detect.stationary.threshold
and o["position_changes"] > 0
and o["frame_time"] == frame_time
and not o["false_positive"]
]

View File

@ -37,8 +37,10 @@ class PlusApi:
self.key = None self.key = None
if PLUS_ENV_VAR in os.environ: if PLUS_ENV_VAR in os.environ:
self.key = os.environ.get(PLUS_ENV_VAR) self.key = os.environ.get(PLUS_ENV_VAR)
elif os.path.isdir("/run/secrets") and PLUS_ENV_VAR in os.listdir( elif (
"/run/secrets" os.path.isdir("/run/secrets")
and os.access("/run/secrets", os.R_OK)
and PLUS_ENV_VAR in os.listdir("/run/secrets")
): ):
self.key = Path(os.path.join("/run/secrets", PLUS_ENV_VAR)).read_text() self.key = Path(os.path.join("/run/secrets", PLUS_ENV_VAR)).read_text()
# check for the addon options file # check for the addon options file
@ -171,6 +173,17 @@ class PlusApi:
) )
if not r.ok: if not r.ok:
try:
error_response = r.json()
errors = error_response.get("errors", [])
for error in errors:
if (
error.get("param") == "label"
and error.get("type") == "invalid_enum_value"
):
raise ValueError(f"Unsupported label value provided: {label}")
except ValueError as e:
raise e
raise Exception(r.text) raise Exception(r.text)
def add_annotation( def add_annotation(
@ -193,6 +206,17 @@ class PlusApi:
) )
if not r.ok: if not r.ok:
try:
error_response = r.json()
errors = error_response.get("errors", [])
for error in errors:
if (
error.get("param") == "label"
and error.get("type") == "invalid_enum_value"
):
raise ValueError(f"Unsupported label value provided: {label}")
except ValueError as e:
raise e
raise Exception(r.text) raise Exception(r.text)
def get_model_download_url( def get_model_download_url(

View File

@ -297,12 +297,12 @@ class PtzAutoTracker:
self.ptz_metrics[camera][ self.ptz_metrics[camera][
"ptz_max_zoom" "ptz_max_zoom"
].value = camera_config.onvif.autotracking.movement_weights[1] ].value = camera_config.onvif.autotracking.movement_weights[1]
self.intercept[ self.intercept[camera] = (
camera camera_config.onvif.autotracking.movement_weights[2]
] = camera_config.onvif.autotracking.movement_weights[2] )
self.move_coefficients[ self.move_coefficients[camera] = (
camera camera_config.onvif.autotracking.movement_weights[3:]
] = camera_config.onvif.autotracking.movement_weights[3:] )
else: else:
camera_config.onvif.autotracking.enabled = False camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
@ -603,9 +603,9 @@ class PtzAutoTracker:
) ** self.zoom_factor[camera] ) ** self.zoom_factor[camera]
if "original_target_box" not in self.tracked_object_metrics[camera]: if "original_target_box" not in self.tracked_object_metrics[camera]:
self.tracked_object_metrics[camera][ self.tracked_object_metrics[camera]["original_target_box"] = (
"original_target_box" self.tracked_object_metrics[camera]["target_box"]
] = self.tracked_object_metrics[camera]["target_box"] )
( (
self.tracked_object_metrics[camera]["valid_velocity"], self.tracked_object_metrics[camera]["valid_velocity"],

View File

@ -6,6 +6,7 @@ from enum import Enum
import numpy import numpy
from onvif import ONVIFCamera, ONVIFError from onvif import ONVIFCamera, ONVIFError
from zeep.exceptions import Fault, TransportError
from frigate.config import FrigateConfig, ZoomingModeEnum from frigate.config import FrigateConfig, ZoomingModeEnum
from frigate.types import PTZMetricsTypes from frigate.types import PTZMetricsTypes
@ -66,28 +67,68 @@ class OnvifController:
# create init services # create init services
media = onvif.create_media_service() media = onvif.create_media_service()
logger.debug(f"Onvif media xaddr for {camera_name}: {media.xaddr}")
try: try:
profile = media.GetProfiles()[0] # this will fire an exception if camera is not a ptz
except ONVIFError as e: capabilities = onvif.get_definition("ptz")
logger.error(f"Unable to connect to camera: {camera_name}: {e}") logger.debug(f"Onvif capabilities for {camera_name}: {capabilities}")
except (ONVIFError, Fault, TransportError) as e:
logger.error(
f"Unable to get Onvif capabilities for camera: {camera_name}: {e}"
)
return False
try:
profiles = media.GetProfiles()
except (ONVIFError, Fault, TransportError) as e:
logger.error(
f"Unable to get Onvif media profiles for camera: {camera_name}: {e}"
)
return False
profile = None
for key, onvif_profile in enumerate(profiles):
if (
onvif_profile.VideoEncoderConfiguration
and onvif_profile.VideoEncoderConfiguration.Encoding == "H264"
and onvif_profile.PTZConfiguration
and onvif_profile.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace
is not None
):
profile = onvif_profile
logger.debug(f"Selected Onvif profile for {camera_name}: {profile}")
break
if profile is None:
logger.error(
f"No appropriate Onvif profiles found for camera: {camera_name}."
)
return False
# get the PTZ config for the profile
try:
configs = profile.PTZConfiguration
logger.debug(
f"Onvif ptz config for media profile in {camera_name}: {configs}"
)
except Exception as e:
logger.error(
f"Invalid Onvif PTZ configuration for camera: {camera_name}: {e}"
)
return False return False
ptz = onvif.create_ptz_service() ptz = onvif.create_ptz_service()
request = ptz.create_type("GetConfigurations")
configs = ptz.GetConfigurations(request)[0]
logger.debug(f"Onvif configs for {camera_name}: {configs}")
request = ptz.create_type("GetConfigurationOptions") request = ptz.create_type("GetConfigurationOptions")
request.ConfigurationToken = profile.PTZConfiguration.token request.ConfigurationToken = profile.PTZConfiguration.token
ptz_config = ptz.GetConfigurationOptions(request) ptz_config = ptz.GetConfigurationOptions(request)
logger.debug(f"Onvif config for {camera_name}: {ptz_config}") logger.debug(f"Onvif config for {camera_name}: {ptz_config}")
service_capabilities_request = ptz.create_type("GetServiceCapabilities") service_capabilities_request = ptz.create_type("GetServiceCapabilities")
self.cams[camera_name][ self.cams[camera_name]["service_capabilities_request"] = (
"service_capabilities_request" service_capabilities_request
] = service_capabilities_request )
fov_space_id = next( fov_space_id = next(
( (
@ -113,7 +154,10 @@ class OnvifController:
# autoracking relative panning/tilting needs a relative zoom value set to 0 # autoracking relative panning/tilting needs a relative zoom value set to 0
# if camera supports relative movement # if camera supports relative movement
if self.config.cameras[camera_name].onvif.autotracking.zooming: if (
self.config.cameras[camera_name].onvif.autotracking.zooming
!= ZoomingModeEnum.disabled
):
zoom_space_id = next( zoom_space_id = next(
( (
i i
@ -144,23 +188,21 @@ class OnvifController:
try: try:
if ( if (
self.config.cameras[camera_name].onvif.autotracking.zooming self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative != ZoomingModeEnum.disabled
): ):
if zoom_space_id is not None: if zoom_space_id is not None:
move_request.Translation.Zoom.space = ptz_config["Spaces"][ move_request.Translation.Zoom.space = ptz_config["Spaces"][
"RelativeZoomTranslationSpace" "RelativeZoomTranslationSpace"
][0]["URI"] ][zoom_space_id]["URI"]
else:
move_request.Translation.Zoom = []
except Exception: except Exception:
if ( self.config.cameras[
self.config.cameras[camera_name].onvif.autotracking.zooming camera_name
== ZoomingModeEnum.relative ].onvif.autotracking.zooming = ZoomingModeEnum.disabled
): logger.warning(
self.config.cameras[ f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported"
camera_name )
].onvif.autotracking.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported"
)
if move_request.Speed is None: if move_request.Speed is None:
move_request.Speed = configs.DefaultPTZSpeed if configs else None move_request.Speed = configs.DefaultPTZSpeed if configs else None
@ -187,25 +229,24 @@ class OnvifController:
] = preset["token"] ] = preset["token"]
# get list of supported features # get list of supported features
ptz_config = ptz.GetConfigurationOptions(request)
supported_features = [] supported_features = []
if ptz_config.Spaces and ptz_config.Spaces.ContinuousPanTiltVelocitySpace: if configs.DefaultContinuousPanTiltVelocitySpace:
supported_features.append("pt") supported_features.append("pt")
if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace: if configs.DefaultContinuousZoomVelocitySpace:
supported_features.append("zoom") supported_features.append("zoom")
if ptz_config.Spaces and ptz_config.Spaces.RelativePanTiltTranslationSpace: if configs.DefaultRelativePanTiltTranslationSpace:
supported_features.append("pt-r") supported_features.append("pt-r")
if ptz_config.Spaces and ptz_config.Spaces.RelativeZoomTranslationSpace: if configs.DefaultRelativeZoomTranslationSpace:
supported_features.append("zoom-r") supported_features.append("zoom-r")
try: try:
# get camera's zoom limits from onvif config # get camera's zoom limits from onvif config
self.cams[camera_name][ self.cams[camera_name]["relative_zoom_range"] = (
"relative_zoom_range" ptz_config.Spaces.RelativeZoomTranslationSpace[0]
] = ptz_config.Spaces.RelativeZoomTranslationSpace[0] )
except Exception: except Exception:
if ( if (
self.config.cameras[camera_name].onvif.autotracking.zooming self.config.cameras[camera_name].onvif.autotracking.zooming
@ -218,13 +259,13 @@ class OnvifController:
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported" f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported"
) )
if ptz_config.Spaces and ptz_config.Spaces.AbsoluteZoomPositionSpace: if configs.DefaultAbsoluteZoomPositionSpace:
supported_features.append("zoom-a") supported_features.append("zoom-a")
try: try:
# get camera's zoom limits from onvif config # get camera's zoom limits from onvif config
self.cams[camera_name][ self.cams[camera_name]["absolute_zoom_range"] = (
"absolute_zoom_range" ptz_config.Spaces.AbsoluteZoomPositionSpace[0]
] = ptz_config.Spaces.AbsoluteZoomPositionSpace[0] )
self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits
except Exception: except Exception:
if self.config.cameras[camera_name].onvif.autotracking.zooming: if self.config.cameras[camera_name].onvif.autotracking.zooming:
@ -236,11 +277,14 @@ class OnvifController:
) )
# set relative pan/tilt space for autotracker # set relative pan/tilt space for autotracker
if fov_space_id is not None: if (
fov_space_id is not None
and configs.DefaultRelativePanTiltTranslationSpace is not None
):
supported_features.append("pt-r-fov") supported_features.append("pt-r-fov")
self.cams[camera_name][ self.cams[camera_name]["relative_fov_range"] = (
"relative_fov_range" ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id]
] = ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id] )
self.cams[camera_name]["features"] = supported_features self.cams[camera_name]["features"] = supported_features
@ -347,7 +391,11 @@ class OnvifController:
move_request.Translation.PanTilt.x = pan move_request.Translation.PanTilt.x = pan
move_request.Translation.PanTilt.y = tilt move_request.Translation.PanTilt.y = tilt
if "zoom-r" in self.cams[camera_name]["features"]: if (
"zoom-r" in self.cams[camera_name]["features"]
and self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative
):
move_request.Speed = { move_request.Speed = {
"PanTilt": { "PanTilt": {
"x": speed, "x": speed,
@ -363,7 +411,11 @@ class OnvifController:
move_request.Translation.PanTilt.x = 0 move_request.Translation.PanTilt.x = 0
move_request.Translation.PanTilt.y = 0 move_request.Translation.PanTilt.y = 0
if "zoom-r" in self.cams[camera_name]["features"]: if (
"zoom-r" in self.cams[camera_name]["features"]
and self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative
):
move_request.Translation.Zoom.x = 0 move_request.Translation.Zoom.x = 0
self.cams[camera_name]["active"] = False self.cams[camera_name]["active"] = False

View File

@ -9,7 +9,7 @@ from pathlib import Path
from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, RECORD_DIR from frigate.const import CACHE_DIR, RECORD_DIR
from frigate.models import Event, Previews, Recordings from frigate.models import Event, Previews, Recordings, ReviewSegment
from frigate.record.util import remove_empty_directories, sync_recordings from frigate.record.util import remove_empty_directories, sync_recordings
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
@ -174,6 +174,65 @@ class RecordingCleanup(threading.Thread):
Previews.id << deleted_previews_list[i : i + max_deletes] Previews.id << deleted_previews_list[i : i + max_deletes]
).execute() ).execute()
review_segments: list[ReviewSegment] = (
ReviewSegment.select(
ReviewSegment.id,
ReviewSegment.start_time,
ReviewSegment.end_time,
ReviewSegment.thumb_path,
)
.where(
ReviewSegment.camera == config.name,
ReviewSegment.end_time < expire_date,
)
.order_by(ReviewSegment.start_time)
.namedtuples()
.iterator()
)
# expire review segments
recording_start = 0
deleted_segments = set()
for segment in review_segments:
keep = False
# look for a reason to keep this segment
for idx in range(recording_start, len(kept_recordings)):
start_time, end_time = kept_recordings[idx]
# if the recording starts in the future, stop checking recordings
# and let this segment expire
if start_time > segment.end_time:
keep = False
break
# if the recording ends after the segment starts, keep it
# and stop looking at recordings
if end_time >= segment.start_time:
keep = True
break
# if the recording ends before this segment starts, skip
# this recording and check the next recording for an overlap.
# since the kept recordings and segments are sorted, we can skip recordings
# that end before the current segment started
if end_time < segment.start_time:
recording_start = idx
# Delete segments without any relevant recordings
if not keep:
Path(segment.thumb_path).unlink(missing_ok=True)
deleted_segments.add(segment.id)
# expire segments
logger.debug(f"Expiring {len(deleted_segments)} segments")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_segments_list = list(deleted_segments)
for i in range(0, len(deleted_segments_list), max_deletes):
ReviewSegment.delete().where(
ReviewSegment.id << deleted_segments_list[i : i + max_deletes]
).execute()
def expire_recordings(self) -> None: def expire_recordings(self) -> None:
"""Delete recordings based on retention config.""" """Delete recordings based on retention config."""
logger.debug("Start expire recordings.") logger.debug("Start expire recordings.")

View File

@ -3,9 +3,7 @@
import asyncio import asyncio
import datetime import datetime
import logging import logging
import multiprocessing as mp
import os import os
import queue
import random import random
import string import string
import threading import threading
@ -17,6 +15,9 @@ from typing import Any, Optional, Tuple
import numpy as np import numpy as np
import psutil import psutil
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import ( from frigate.const import (
CACHE_DIR, CACHE_DIR,
@ -27,7 +28,6 @@ from frigate.const import (
RECORD_DIR, RECORD_DIR,
) )
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.types import FeatureMetricsTypes
from frigate.util.image import area from frigate.util.image import area
from frigate.util.services import get_video_properties from frigate.util.services import get_video_properties
@ -56,22 +56,16 @@ class SegmentInfo:
class RecordingMaintainer(threading.Thread): class RecordingMaintainer(threading.Thread):
def __init__( def __init__(self, config: FrigateConfig, stop_event: MpEvent):
self,
config: FrigateConfig,
inter_process_queue: mp.Queue,
object_recordings_info_queue: mp.Queue,
audio_recordings_info_queue: Optional[mp.Queue],
process_info: dict[str, FeatureMetricsTypes],
stop_event: MpEvent,
):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = "recording_maintainer" self.name = "recording_maintainer"
self.config = config self.config = config
self.inter_process_queue = inter_process_queue
self.object_recordings_info_queue = object_recordings_info_queue # create communication for retained recordings
self.audio_recordings_info_queue = audio_recordings_info_queue self.requestor = InterProcessRequestor()
self.process_info = process_info self.config_subscriber = ConfigSubscriber("config/record/")
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all)
self.stop_event = stop_event self.stop_event = stop_event
self.object_recordings_info: dict[str, list] = defaultdict(list) self.object_recordings_info: dict[str, list] = defaultdict(list)
self.audio_recordings_info: dict[str, list] = defaultdict(list) self.audio_recordings_info: dict[str, list] = defaultdict(list)
@ -183,8 +177,9 @@ class RecordingMaintainer(threading.Thread):
recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks) recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
# fire and forget recordings entries # fire and forget recordings entries
self.inter_process_queue.put( self.requestor.send_data(
(INSERT_MANY_RECORDINGS, [r for r in recordings_to_insert if r is not None]) INSERT_MANY_RECORDINGS,
[r for r in recordings_to_insert if r is not None],
) )
async def validate_and_move_segment( async def validate_and_move_segment(
@ -196,7 +191,7 @@ class RecordingMaintainer(threading.Thread):
# Just delete files if recordings are turned off # Just delete files if recordings are turned off
if ( if (
camera not in self.config.cameras camera not in self.config.cameras
or not self.process_info[camera]["record_enabled"].value or not self.config.cameras[camera].record.enabled
): ):
Path(cache_path).unlink(missing_ok=True) Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None) self.end_time_cache.pop(cache_path, None)
@ -433,30 +428,45 @@ class RecordingMaintainer(threading.Thread):
return None return None
def run(self) -> None: def run(self) -> None:
camera_count = sum(camera.enabled for camera in self.config.cameras.values())
# Check for new files every 5 seconds # Check for new files every 5 seconds
wait_time = 0.0 wait_time = 0.0
while not self.stop_event.wait(wait_time): while not self.stop_event.wait(wait_time):
run_start = datetime.datetime.now().timestamp() run_start = datetime.datetime.now().timestamp()
# check if there is an updated config
while True:
(
updated_topic,
updated_record_config,
) = self.config_subscriber.check_for_update()
if not updated_topic:
break
camera_name = updated_topic.rpartition("/")[-1]
self.config.cameras[camera_name].record = updated_record_config
stale_frame_count = 0 stale_frame_count = 0
stale_frame_count_threshold = 10 stale_frame_count_threshold = 10
# empty the object recordings info queue # empty the object recordings info queue
while True: while True:
try: (topic, data) = self.detection_subscriber.get_data(
timeout=QUEUE_READ_TIMEOUT
)
if not topic:
break
if topic == DetectionTypeEnum.video:
( (
camera, camera,
frame_time, frame_time,
current_tracked_objects, current_tracked_objects,
motion_boxes, motion_boxes,
regions, regions,
) = self.object_recordings_info_queue.get( ) = data
True, timeout=QUEUE_READ_TIMEOUT
)
if frame_time < run_start - stale_frame_count_threshold: if self.config.cameras[camera].record.enabled:
stale_frame_count += 1
if self.process_info[camera]["record_enabled"].value:
self.object_recordings_info[camera].append( self.object_recordings_info[camera].append(
( (
frame_time, frame_time,
@ -465,56 +475,29 @@ class RecordingMaintainer(threading.Thread):
regions, regions,
) )
) )
except queue.Empty: elif topic == DetectionTypeEnum.audio:
q_size = self.object_recordings_info_queue.qsize() (
if q_size > camera_count: camera,
logger.debug( frame_time,
f"object_recordings_info loop queue not empty ({q_size})." dBFS,
audio_detections,
) = data
if self.config.cameras[camera].record.enabled:
self.audio_recordings_info[camera].append(
(
frame_time,
dBFS,
audio_detections,
)
) )
break
if frame_time < run_start - stale_frame_count_threshold:
stale_frame_count += 1
if stale_frame_count > 0: if stale_frame_count > 0:
logger.debug(f"Found {stale_frame_count} old frames.") logger.debug(f"Found {stale_frame_count} old frames.")
# empty the audio recordings info queue if audio is enabled
if self.audio_recordings_info_queue:
stale_frame_count = 0
while True:
try:
(
camera,
frame_time,
dBFS,
audio_detections,
) = self.audio_recordings_info_queue.get(
True, timeout=QUEUE_READ_TIMEOUT
)
if frame_time < run_start - stale_frame_count_threshold:
stale_frame_count += 1
if self.process_info[camera]["record_enabled"].value:
self.audio_recordings_info[camera].append(
(
frame_time,
dBFS,
audio_detections,
)
)
except queue.Empty:
q_size = self.audio_recordings_info_queue.qsize()
if q_size > camera_count:
logger.debug(
f"object_recordings_info loop audio queue not empty ({q_size})."
)
break
if stale_frame_count > 0:
logger.error(
f"Found {stale_frame_count} old audio frames, segments from recordings may be missing"
)
try: try:
asyncio.run(self.move_files()) asyncio.run(self.move_files())
except Exception as e: except Exception as e:
@ -525,4 +508,7 @@ class RecordingMaintainer(threading.Thread):
duration = datetime.datetime.now().timestamp() - run_start duration = datetime.datetime.now().timestamp() - run_start
wait_time = max(0, 5 - duration) wait_time = max(0, 5 - duration)
self.requestor.stop()
self.config_subscriber.stop()
self.detection_subscriber.stop()
logger.info("Exiting recording maintenance...") logger.info("Exiting recording maintenance...")

View File

@ -13,19 +13,12 @@ from setproctitle import setproctitle
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.record.maintainer import RecordingMaintainer from frigate.record.maintainer import RecordingMaintainer
from frigate.types import FeatureMetricsTypes
from frigate.util.services import listen from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def manage_recordings( def manage_recordings(config: FrigateConfig) -> None:
config: FrigateConfig,
inter_process_queue: mp.Queue,
object_recordings_info_queue: mp.Queue,
audio_recordings_info_queue: mp.Queue,
process_info: dict[str, FeatureMetricsTypes],
) -> None:
stop_event = mp.Event() stop_event = mp.Event()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
@ -52,10 +45,6 @@ def manage_recordings(
maintainer = RecordingMaintainer( maintainer = RecordingMaintainer(
config, config,
inter_process_queue,
object_recordings_info_queue,
audio_recordings_info_queue,
process_info,
stop_event, stop_event,
) )
maintainer.start() maintainer.start()

View File

View File

@ -0,0 +1,351 @@
"""Maintain review segments in db."""
import json
import logging
import os
import random
import string
import threading
from enum import Enum
from multiprocessing.synchronize import Event as MpEvent
from typing import Optional
import cv2
import numpy as np
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR, UPSERT_REVIEW_SEGMENT
from frigate.models import ReviewSegment
from frigate.object_processing import TrackedObject
from frigate.util.image import SharedMemoryFrameManager, calculate_16_9_crop
logger = logging.getLogger(__name__)
THUMB_HEIGHT = 180
THUMB_WIDTH = 320
class SeverityEnum(str, Enum):
alert = "alert"
detection = "detection"
signification_motion = "significant_motion"
class PendingReviewSegment:
def __init__(
self,
camera: str,
frame_time: float,
severity: SeverityEnum,
detections: set[str] = set(),
objects: set[str] = set(),
sub_labels: set[str] = set(),
zones: set[str] = set(),
audio: set[str] = set(),
motion: list[int] = [],
):
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
self.id = f"{frame_time}-{rand_id}"
self.camera = camera
self.start_time = frame_time
self.severity = severity
self.detections = detections
self.objects = objects
self.sub_labels = sub_labels
self.zones = zones
self.audio = audio
self.sig_motion_areas = motion
self.last_update = frame_time
# thumbnail
self.frame = np.zeros((THUMB_HEIGHT * 3 // 2, THUMB_WIDTH), np.uint8)
self.frame_active_count = 0
def update_frame(
self, camera_config: CameraConfig, frame, objects: list[TrackedObject]
):
min_x = camera_config.frame_shape[1]
min_y = camera_config.frame_shape[0]
max_x = 0
max_y = 0
# find bounds for all boxes
for o in objects:
min_x = min(o["box"][0], min_x)
min_y = min(o["box"][1], min_y)
max_x = max(o["box"][2], max_x)
max_y = max(o["box"][3], max_y)
region = calculate_16_9_crop(
camera_config.frame_shape, min_x, min_y, max_x, max_y
)
# could not find suitable 16:9 region
if not region:
return
self.frame_active_count = len(objects)
color_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
color_frame = color_frame[region[1] : region[3], region[0] : region[2]]
width = int(THUMB_HEIGHT * color_frame.shape[1] / color_frame.shape[0])
self.frame = cv2.resize(
color_frame, dsize=(width, THUMB_HEIGHT), interpolation=cv2.INTER_AREA
)
def end(self) -> dict:
path = os.path.join(CLIPS_DIR, f"thumb-{self.camera}-{self.id}.jpg")
if self.frame is not None:
cv2.imwrite(path, self.frame)
return {
ReviewSegment.id: self.id,
ReviewSegment.camera: self.camera,
ReviewSegment.start_time: self.start_time,
ReviewSegment.end_time: self.last_update,
ReviewSegment.severity: self.severity.value,
ReviewSegment.thumb_path: path,
ReviewSegment.data: {
"detections": list(self.detections),
"objects": list(self.objects),
"sub_labels": list(self.sub_labels),
"zones": list(self.zones),
"audio": list(self.audio),
"significant_motion_areas": self.sig_motion_areas,
},
}
class ReviewSegmentMaintainer(threading.Thread):
"""Maintain review segments."""
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
threading.Thread.__init__(self)
self.name = "review_segment_maintainer"
self.config = config
self.active_review_segments: dict[str, Optional[PendingReviewSegment]] = {}
self.frame_manager = SharedMemoryFrameManager()
# create communication for review segments
self.requestor = InterProcessRequestor()
self.config_subscriber = ConfigSubscriber("config/record/")
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all)
self.stop_event = stop_event
def end_segment(self, segment: PendingReviewSegment) -> None:
"""End segment."""
seg_data = segment.end()
self.requestor.send_data(UPSERT_REVIEW_SEGMENT, seg_data)
self.requestor.send_data(
"reviews",
json.dumps(
{"type": "end", "review": {k.name: v for k, v in seg_data.items()}}
),
)
self.active_review_segments[segment.camera] = None
def update_existing_segment(
self,
segment: PendingReviewSegment,
frame_time: float,
objects: list[TrackedObject],
motion: list,
) -> None:
"""Validate if existing review segment should continue."""
camera_config = self.config.cameras[segment.camera]
active_objects = get_active_objects(frame_time, camera_config, objects)
if len(active_objects) > 0:
segment.last_update = frame_time
# update type for this segment now that active objects are detected
if segment.severity == SeverityEnum.signification_motion:
segment.severity = SeverityEnum.detection
if len(active_objects) > segment.frame_active_count:
frame_id = f"{camera_config.name}{frame_time}"
yuv_frame = self.frame_manager.get(
frame_id, camera_config.frame_shape_yuv
)
segment.update_frame(camera_config, yuv_frame, active_objects)
self.frame_manager.close(frame_id)
for object in active_objects:
segment.detections.add(object["id"])
segment.objects.add(object["label"])
if object["sub_label"]:
segment.sub_labels.add(object["sub_label"][0])
# if object is alert label and has qualified for recording
# mark this review as alert
if (
segment.severity == SeverityEnum.detection
and object["has_clip"]
and object["label"] in camera_config.objects.alert
):
segment.severity = SeverityEnum.alert
# keep zones up to date
if len(object["current_zones"]) > 0:
segment.zones.update(object["current_zones"])
elif (
segment.severity == SeverityEnum.signification_motion and len(motion) >= 20
):
segment.last_update = frame_time
else:
if segment.severity == SeverityEnum.alert and frame_time > (
segment.last_update + 60
):
self.end_segment(segment)
elif frame_time > (segment.last_update + 10):
self.end_segment(segment)
def check_if_new_segment(
self,
camera: str,
frame_time: float,
objects: list[TrackedObject],
motion: list,
) -> None:
"""Check if a new review segment should be created."""
camera_config = self.config.cameras[camera]
active_objects = get_active_objects(frame_time, camera_config, objects)
if len(active_objects) > 0:
has_sig_object = False
detections: set = set()
objects: set = set()
sub_labels: set = set()
zones: set = set()
for object in active_objects:
if (
not has_sig_object
and object["has_clip"]
and object["label"] in camera_config.objects.alert
):
has_sig_object = True
detections.add(object["id"])
objects.add(object["label"])
if object["sub_label"]:
sub_labels.add(object["sub_label"][0])
zones.update(object["current_zones"])
self.active_review_segments[camera] = PendingReviewSegment(
camera,
frame_time,
SeverityEnum.alert if has_sig_object else SeverityEnum.detection,
detections,
objects=objects,
sub_labels=sub_labels,
audio=set(),
zones=zones,
motion=[],
)
frame_id = f"{camera_config.name}{frame_time}"
yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
self.active_review_segments[camera].update_frame(
camera_config, yuv_frame, active_objects
)
self.frame_manager.close(frame_id)
elif len(motion) >= 20:
self.active_review_segments[camera] = PendingReviewSegment(
camera, frame_time, SeverityEnum.signification_motion, motion=motion
)
def run(self) -> None:
while not self.stop_event.is_set():
# check if there is an updated config
while True:
(
updated_topic,
updated_record_config,
) = self.config_subscriber.check_for_update()
if not updated_topic:
break
camera_name = updated_topic.rpartition("/")[-1]
self.config.cameras[camera_name].record = updated_record_config
(topic, data) = self.detection_subscriber.get_data(timeout=1)
if not topic:
continue
if topic == DetectionTypeEnum.video:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = data
elif topic == DetectionTypeEnum.audio:
(
camera,
frame_time,
dBFS,
audio_detections,
) = data
if not self.config.cameras[camera].record.enabled:
continue
current_segment = self.active_review_segments.get(camera)
if current_segment is not None:
if topic == DetectionTypeEnum.video:
self.update_existing_segment(
current_segment,
frame_time,
current_tracked_objects,
motion_boxes,
)
elif topic == DetectionTypeEnum.audio and len(audio_detections) > 0:
current_segment.last_update = frame_time
current_segment.audio.update(audio_detections)
else:
if topic == DetectionTypeEnum.video:
self.check_if_new_segment(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
)
elif topic == DetectionTypeEnum.audio and len(audio_detections) > 0:
self.active_review_segments[camera] = PendingReviewSegment(
camera,
frame_time,
SeverityEnum.detection,
set(),
set(),
set(),
set(),
set(audio_detections),
[],
)
def get_active_objects(
frame_time: float, camera_config: CameraConfig, all_objects: list[TrackedObject]
) -> list[TrackedObject]:
"""get active objects for detection."""
return [
o
for o in all_objects
if o["motionless_count"] < camera_config.detect.stationary.threshold
and o["position_changes"] > 0
and o["frame_time"] == frame_time
and not o["false_positive"]
]

36
frigate/review/review.py Normal file
View File

@ -0,0 +1,36 @@
"""Run recording maintainer and cleanup."""
import logging
import multiprocessing as mp
import signal
import threading
from types import FrameType
from typing import Optional
from setproctitle import setproctitle
from frigate.config import FrigateConfig
from frigate.review.maintainer import ReviewSegmentMaintainer
from frigate.util.services import listen
logger = logging.getLogger(__name__)
def manage_review_segments(config: FrigateConfig) -> None:
stop_event = mp.Event()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = "process:review_segment_manager"
setproctitle("frigate.review_segment_manager")
listen()
maintainer = ReviewSegmentMaintainer(
config,
stop_event,
)
maintainer.start()

View File

61
frigate/stats/emitter.py Normal file
View File

@ -0,0 +1,61 @@
"""Emit stats to listeners."""
import json
import logging
import threading
import time
from multiprocessing.synchronize import Event as MpEvent
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.stats.util import stats_snapshot
from frigate.types import StatsTrackingTypes
logger = logging.getLogger(__name__)
class StatsEmitter(threading.Thread):
def __init__(
self,
config: FrigateConfig,
stats_tracking: StatsTrackingTypes,
stop_event: MpEvent,
):
threading.Thread.__init__(self)
self.name = "frigate_stats_emitter"
self.config = config
self.stats_tracking = stats_tracking
self.stop_event = stop_event
self.hwaccel_errors: list[str] = []
self.stats_history: list[dict[str, any]] = []
# create communication for stats
self.requestor = InterProcessRequestor()
def get_latest_stats(self) -> dict[str, any]:
"""Get latest stats."""
if len(self.stats_history) > 0:
return self.stats_history[-1]
else:
stats = stats_snapshot(
self.config, self.stats_tracking, self.hwaccel_errors
)
self.stats_history.append(stats)
return stats
def get_stats_history(self) -> list[dict[str, any]]:
"""Get stats history."""
return self.stats_history
def run(self) -> None:
time.sleep(10)
while not self.stop_event.wait(self.config.mqtt.stats_interval):
logger.debug("Starting stats collection")
stats = stats_snapshot(
self.config, self.stats_tracking, self.hwaccel_errors
)
self.stats_history.append(stats)
self.stats_history = self.stats_history[-10:]
self.requestor.send_data("stats", json.dumps(stats))
logger.debug("Finished stats collection")
logger.info("Exiting stats emitter...")

View File

@ -1,20 +1,17 @@
"""Utilities for stats."""
import asyncio import asyncio
import json
import logging
import os import os
import shutil import shutil
import threading
import time import time
from multiprocessing.synchronize import Event as MpEvent
from typing import Any, Optional from typing import Any, Optional
import psutil import psutil
import requests import requests
from requests.exceptions import RequestException from requests.exceptions import RequestException
from frigate.comms.dispatcher import Dispatcher
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
from frigate.types import CameraMetricsTypes, StatsTrackingTypes from frigate.types import CameraMetricsTypes, StatsTrackingTypes
from frigate.util.services import ( from frigate.util.services import (
@ -24,11 +21,10 @@ from frigate.util.services import (
get_intel_gpu_stats, get_intel_gpu_stats,
get_jetson_stats, get_jetson_stats,
get_nvidia_gpu_stats, get_nvidia_gpu_stats,
is_vaapi_amd_driver,
) )
from frigate.version import VERSION from frigate.version import VERSION
logger = logging.getLogger(__name__)
def get_latest_version(config: FrigateConfig) -> str: def get_latest_version(config: FrigateConfig) -> str:
if not config.telemetry.version_check: if not config.telemetry.version_check:
@ -205,9 +201,7 @@ async def set_gpu_stats(
stats["intel-qsv"] = {"gpu": -1, "mem": -1} stats["intel-qsv"] = {"gpu": -1, "mem": -1}
hwaccel_errors.append(args) hwaccel_errors.append(args)
elif "vaapi" in args: elif "vaapi" in args:
driver = os.environ.get(DRIVER_ENV_VAR) if is_vaapi_amd_driver():
if driver == DRIVER_AMD:
if not config.telemetry.stats.amd_gpu_stats: if not config.telemetry.stats.amd_gpu_stats:
continue continue
@ -265,7 +259,7 @@ def stats_snapshot(
"process_fps": round(camera_stats["process_fps"].value, 2), "process_fps": round(camera_stats["process_fps"].value, 2),
"skipped_fps": round(camera_stats["skipped_fps"].value, 2), "skipped_fps": round(camera_stats["skipped_fps"].value, 2),
"detection_fps": round(camera_stats["detection_fps"].value, 2), "detection_fps": round(camera_stats["detection_fps"].value, 2),
"detection_enabled": camera_stats["detection_enabled"].value, "detection_enabled": config.cameras[name].detect.enabled,
"pid": pid, "pid": pid,
"capture_pid": cpid, "capture_pid": cpid,
"ffmpeg_pid": ffmpeg_pid, "ffmpeg_pid": ffmpeg_pid,
@ -319,31 +313,3 @@ def stats_snapshot(
} }
return stats return stats
class StatsEmitter(threading.Thread):
def __init__(
self,
config: FrigateConfig,
stats_tracking: StatsTrackingTypes,
dispatcher: Dispatcher,
stop_event: MpEvent,
):
threading.Thread.__init__(self)
self.name = "frigate_stats_emitter"
self.config = config
self.stats_tracking = stats_tracking
self.dispatcher = dispatcher
self.stop_event = stop_event
self.hwaccel_errors: list[str] = []
def run(self) -> None:
time.sleep(10)
while not self.stop_event.wait(self.config.mqtt.stats_interval):
logger.debug("Starting stats collection")
stats = stats_snapshot(
self.config, self.stats_tracking, self.hwaccel_errors
)
self.dispatcher.publish("stats", json.dumps(stats), retain=False)
logger.debug("Finished stats collection")
logger.info("Exiting stats emitter...")

View File

@ -43,7 +43,7 @@ class TestUserPassMasking(unittest.TestCase):
self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554" self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554"
def test_rtsp_in_log_message(self): def test_rtsp_in_log_message(self):
"""Test that the rtsp url in a log message is espaced.""" """Test that the rtsp url in a log message is escaped."""
escaped = clean_camera_user_pass(self.rtsp_log_message) escaped = clean_camera_user_pass(self.rtsp_log_message)
print(f"The escaped is {escaped}") print(f"The escaped is {escaped}")
assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554" assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"

View File

@ -1,6 +1,7 @@
import json import json
import os import os
import unittest import unittest
from unittest.mock import patch
import numpy as np import numpy as np
from pydantic import ValidationError from pydantic import ValidationError
@ -70,7 +71,9 @@ class TestConfig(unittest.TestCase):
assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu
assert runtime_config.detectors["cpu"].model.width == 320 assert runtime_config.detectors["cpu"].model.width == 320
def test_detector_custom_model_path(self): @patch("frigate.detectors.detector_config.load_labels")
def test_detector_custom_model_path(self, mock_labels):
mock_labels.return_value = {}
config = { config = {
"detectors": { "detectors": {
"cpu": { "cpu": {
@ -110,7 +113,7 @@ class TestConfig(unittest.TestCase):
assert runtime_config.detectors["openvino"].model.path == "/etc/hosts" assert runtime_config.detectors["openvino"].model.path == "/etc/hosts"
assert runtime_config.model.width == 512 assert runtime_config.model.width == 512
assert runtime_config.detectors["cpu"].model.width == 512 assert runtime_config.detectors["cpu"].model.width == 320
assert runtime_config.detectors["edgetpu"].model.width == 160 assert runtime_config.detectors["edgetpu"].model.width == 160
assert runtime_config.detectors["openvino"].model.width == 512 assert runtime_config.detectors["openvino"].model.width == 512

View File

@ -41,9 +41,9 @@ class TestFfmpegPresets(unittest.TestCase):
assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True) assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True)
def test_ffmpeg_hwaccel_preset(self): def test_ffmpeg_hwaccel_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"hwaccel_args" "preset-rpi-64-h264"
] = "preset-rpi-64-h264" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rpi-64-h264" not in ( assert "preset-rpi-64-h264" not in (
@ -54,9 +54,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_hwaccel_not_preset(self): def test_ffmpeg_hwaccel_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"hwaccel_args" "-other-hwaccel args"
] = "-other-hwaccel args" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-other-hwaccel args" in ( assert "-other-hwaccel args" in (
@ -64,9 +64,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_hwaccel_scale_preset(self): def test_ffmpeg_hwaccel_scale_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"hwaccel_args" "preset-nvidia-h264"
] = "preset-nvidia-h264" )
self.default_ffmpeg["cameras"]["back"]["detect"] = { self.default_ffmpeg["cameras"]["back"]["detect"] = {
"height": 1920, "height": 1920,
"width": 2560, "width": 2560,
@ -85,9 +85,9 @@ class TestFfmpegPresets(unittest.TestCase):
def test_default_ffmpeg_input_arg_preset(self): def test_default_ffmpeg_input_arg_preset(self):
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = (
"input_args" "preset-rtsp-generic"
] = "preset-rtsp-generic" )
frigate_preset_config = FrigateConfig(**self.default_ffmpeg) frigate_preset_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
frigate_preset_config.cameras["back"].create_ffmpeg_cmds() frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
@ -98,9 +98,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_input_preset(self): def test_ffmpeg_input_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = (
"input_args" "preset-rtmp-generic"
] = "preset-rtmp-generic" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-generic" not in ( assert "preset-rtmp-generic" not in (
@ -131,9 +131,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_output_record_preset(self): def test_ffmpeg_output_record_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
"record" "preset-record-generic-audio-aac"
] = "preset-record-generic-audio-aac" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-record-generic-audio-aac" not in ( assert "preset-record-generic-audio-aac" not in (
@ -144,9 +144,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_output_record_not_preset(self): def test_ffmpeg_output_record_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
"record" "-some output"
] = "-some output" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in ( assert "-some output" in (

View File

@ -3,7 +3,7 @@ import json
import logging import logging
import os import os
import unittest import unittest
from unittest.mock import patch from unittest.mock import Mock
from peewee_migrate import Router from peewee_migrate import Router
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
@ -14,6 +14,7 @@ from frigate.config import FrigateConfig
from frigate.http import create_app from frigate.http import create_app
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.stats.emitter import StatsEmitter
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
@ -119,8 +120,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
id2 = "7890.random" id2 = "7890.random"
@ -155,8 +156,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
@ -176,8 +177,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
bad_id = "654321.other" bad_id = "654321.other"
@ -196,8 +197,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
@ -218,8 +219,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
@ -244,8 +245,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
morning_id = "123456.random" morning_id = "123456.random"
evening_id = "654321.random" evening_id = "654321.random"
@ -282,8 +283,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
sub_label = "sub" sub_label = "sub"
@ -317,8 +318,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
sub_label = "sub" sub_label = "sub"
@ -342,8 +343,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
with app.test_client() as client: with app.test_client() as client:
@ -359,8 +360,8 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
None,
) )
id = "123456.random" id = "123456.random"
@ -370,8 +371,9 @@ class TestHttp(unittest.TestCase):
assert recording assert recording
assert recording[0]["id"] == id assert recording[0]["id"] == id
@patch("frigate.http.stats_snapshot") def test_stats(self):
def test_stats(self, mock_stats): stats = Mock(spec=StatsEmitter)
stats.get_latest_stats.return_value = self.test_stats
app = create_app( app = create_app(
FrigateConfig(**self.minimal_config).runtime_config(), FrigateConfig(**self.minimal_config).runtime_config(),
self.db, self.db,
@ -379,14 +381,13 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None,
PlusApi(), PlusApi(),
stats,
) )
mock_stats.return_value = self.test_stats
with app.test_client() as client: with app.test_client() as client:
stats = client.get("/stats").json full_stats = client.get("/stats").json
assert stats == self.test_stats assert full_stats == self.test_stats
def _insert_mock_event( def _insert_mock_event(

View File

@ -148,8 +148,8 @@ class TestRegion(unittest.TestCase):
def test_combine_boxes(self): def test_combine_boxes(self):
boxes = [ boxes = [
(460, 0, 561, 144), (480, 0, 540, 128),
(565, 0, 586, 71), (536, 0, 558, 99),
] ]
# boundary_boxes = [get_cluster_boundary(box) for box in boxes] # boundary_boxes = [get_cluster_boundary(box) for box in boxes]
@ -167,8 +167,32 @@ class TestRegion(unittest.TestCase):
# save_clusters_image("combine", boxes, cluster_candidates, regions) # save_clusters_image("combine", boxes, cluster_candidates, regions)
assert len(regions) == 1 assert len(regions) == 1
def test_dont_combine_smaller_boxes(self):
boxes = [
(460, 0, 561, 144),
(565, 0, 586, 71),
]
# boundary_boxes = [get_cluster_boundary(box) for box in boxes]
# save_cluster_boundary_image("combine_bound", boxes, boundary_boxes)
cluster_candidates = get_cluster_candidates(
self.frame_shape, self.min_region_size, boxes
)
regions = [
get_cluster_region(self.frame_shape, self.min_region_size, candidate, boxes)
for candidate in cluster_candidates
]
# save_clusters_image("combine", boxes, cluster_candidates, regions)
assert len(regions) == 2
def test_dont_combine_boxes(self): def test_dont_combine_boxes(self):
boxes = [(460, 0, 532, 129), (586, 0, 606, 46)] boxes = [
(460, 0, 532, 129),
(586, 0, 606, 46),
]
# boundary_boxes = [get_cluster_boundary(box) for box in boxes] # boundary_boxes = [get_cluster_boundary(box) for box in boxes]
# save_cluster_boundary_image("dont_combine_bound", boxes, boundary_boxes) # save_cluster_boundary_image("dont_combine_bound", boxes, boundary_boxes)
@ -287,6 +311,15 @@ class TestObjectBoundingBoxes(unittest.TestCase):
consolidated_detections = reduce_detections(frame_shape, detections) consolidated_detections = reduce_detections(frame_shape, detections)
assert len(consolidated_detections) == len(detections) assert len(consolidated_detections) == len(detections)
def test_vert_stacked_cars_not_reduced(self):
detections = [
("car", 0.8, (954, 312, 1247, 475), 498512, 1.48, (800, 200, 1400, 600)),
("car", 0.85, (970, 380, 1273, 610), 698752, 1.56, (800, 200, 1400, 700)),
]
frame_shape = (720, 1280)
consolidated_detections = reduce_detections(frame_shape, detections)
assert len(consolidated_detections) == len(detections)
class TestRegionGrid(unittest.TestCase): class TestRegionGrid(unittest.TestCase):
def setUp(self) -> None: def setUp(self) -> None:

View File

@ -5,9 +5,9 @@ from frigate.config import DetectConfig
class ObjectTracker(ABC): class ObjectTracker(ABC):
@abstractmethod @abstractmethod
def __init__(self, config: DetectConfig): def __init__(self, config: DetectConfig) -> None:
pass pass
@abstractmethod @abstractmethod
def match_and_update(self, detections): def match_and_update(self, frame_time: float, detections) -> None:
pass pass

View File

@ -17,10 +17,15 @@ from frigate.ptz.autotrack import PtzMotionEstimator
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.types import PTZMetricsTypes from frigate.types import PTZMetricsTypes
from frigate.util.image import intersection_over_union from frigate.util.image import intersection_over_union
from frigate.util.object import average_boxes
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
THRESHOLD_STATIONARY_IOU_AVERAGE = 0.6
MAX_STATIONARY_HISTORY = 10
# Normalizes distance from estimate relative to object size # Normalizes distance from estimate relative to object size
# Other ideas: # Other ideas:
# - if estimates are inaccurate for first N detections, compare with last_detection (may be fine) # - if estimates are inaccurate for first N detections, compare with last_detection (may be fine)
@ -74,6 +79,7 @@ class NorfairTracker(ObjectTracker):
self.untracked_object_boxes: list[list[int]] = [] self.untracked_object_boxes: list[list[int]] = []
self.disappeared = {} self.disappeared = {}
self.positions = {} self.positions = {}
self.stationary_box_history: dict[str, list[list[int, int, int, int]]] = {}
self.camera_config = config self.camera_config = config
self.detect_config = config.detect self.detect_config = config.detect
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
@ -127,6 +133,7 @@ class NorfairTracker(ObjectTracker):
"xmax": self.detect_config.width, "xmax": self.detect_config.width,
"ymax": self.detect_config.height, "ymax": self.detect_config.height,
} }
self.stationary_box_history[id] = []
def deregister(self, id, track_id): def deregister(self, id, track_id):
del self.tracked_objects[id] del self.tracked_objects[id]
@ -138,22 +145,24 @@ class NorfairTracker(ObjectTracker):
# tracks the current position of the object based on the last N bounding boxes # tracks the current position of the object based on the last N bounding boxes
# returns False if the object has moved outside its previous position # returns False if the object has moved outside its previous position
def update_position(self, id, box): def update_position(self, id: str, box: list[int, int, int, int]):
position = self.positions[id] position = self.positions[id]
position_box = ( self.stationary_box_history[id].append(box)
position["xmin"],
position["ymin"], if len(self.stationary_box_history[id]) > MAX_STATIONARY_HISTORY:
position["xmax"], self.stationary_box_history[id] = self.stationary_box_history[id][
position["ymax"], -MAX_STATIONARY_HISTORY:
]
avg_iou = intersection_over_union(
box, average_boxes(self.stationary_box_history[id])
) )
xmin, ymin, xmax, ymax = box xmin, ymin, xmax, ymax = box
iou = intersection_over_union(position_box, box)
# if the iou drops below the threshold # if the iou drops below the threshold
# assume the object has moved to a new position and reset the computed box # assume the object has moved to a new position and reset the computed box
if iou < 0.6: if avg_iou < THRESHOLD_STATIONARY_IOU_AVERAGE:
self.positions[id] = { self.positions[id] = {
"xmins": [xmin], "xmins": [xmin],
"ymins": [ymin], "ymins": [ymin],
@ -220,6 +229,7 @@ class NorfairTracker(ObjectTracker):
): ):
self.tracked_objects[id]["position_changes"] += 1 self.tracked_objects[id]["position_changes"] += 1
self.tracked_objects[id]["motionless_count"] = 0 self.tracked_objects[id]["motionless_count"] = 0
self.stationary_box_history[id] = []
self.tracked_objects[id].update(obj) self.tracked_objects[id].update(obj)

View File

@ -10,23 +10,16 @@ from frigate.object_detection import ObjectDetectProcess
class CameraMetricsTypes(TypedDict): class CameraMetricsTypes(TypedDict):
camera_fps: Synchronized camera_fps: Synchronized
capture_process: Optional[Process] capture_process: Optional[Process]
detection_enabled: Synchronized
detection_fps: Synchronized detection_fps: Synchronized
detection_frame: Synchronized detection_frame: Synchronized
ffmpeg_pid: Synchronized ffmpeg_pid: Synchronized
frame_queue: Queue frame_queue: Queue
motion_enabled: Synchronized
improve_contrast_enabled: Synchronized
motion_threshold: Synchronized
motion_contour_area: Synchronized
process: Optional[Process] process: Optional[Process]
process_fps: Synchronized process_fps: Synchronized
read_start: Synchronized read_start: Synchronized
skipped_fps: Synchronized skipped_fps: Synchronized
audio_rms: Synchronized audio_rms: Synchronized
audio_dBFS: Synchronized audio_dBFS: Synchronized
birdseye_enabled: Synchronized
birdseye_mode: Synchronized
class PTZMetricsTypes(TypedDict): class PTZMetricsTypes(TypedDict):
@ -42,11 +35,6 @@ class PTZMetricsTypes(TypedDict):
ptz_min_zoom: Synchronized ptz_min_zoom: Synchronized
class FeatureMetricsTypes(TypedDict):
audio_enabled: Synchronized
record_enabled: Synchronized
class StatsTrackingTypes(TypedDict): class StatsTrackingTypes(TypedDict):
camera_metrics: dict[str, CameraMetricsTypes] camera_metrics: dict[str, CameraMetricsTypes]
detectors: dict[str, ObjectDetectProcess] detectors: dict[str, ObjectDetectProcess]

Some files were not shown because too many files have changed in this diff Show More