diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index ec33ffb86..fcf7ca693 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -19,7 +19,7 @@ sudo chown -R "$(id -u):$(id -g)" /media/frigate # When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the # s6 service file. For dev, where frigate is started from an interactive # shell, we define it in .bashrc instead. -echo 'export LIBAVFORMAT_VERSION_MAJOR=$(/usr/lib/ffmpeg/7.0/bin/ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc +echo 'export LIBAVFORMAT_VERSION_MAJOR=$("$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)" -version | grep -Po "libavformat\W+\K\d+")' >> "$HOME/.bashrc" make version diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 0bafeab80..8dee8e642 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -14,16 +14,18 @@ ARG BASE_HOOK= FROM ${BASE_IMAGE} AS base ARG PIP_BREAK_SYSTEM_PACKAGES +ARG BASE_HOOK -RUN ${BASE_HOOK} +RUN sh -c "$BASE_HOOK" FROM --platform=${BUILDPLATFORM} debian:12 AS base_host ARG PIP_BREAK_SYSTEM_PACKAGES FROM ${SLIM_BASE} AS slim-base ARG PIP_BREAK_SYSTEM_PACKAGES +ARG BASE_HOOK -RUN ${BASE_HOOK} +RUN sh -c "$BASE_HOOK" FROM slim-base AS wget ARG DEBIAN_FRONTEND @@ -229,8 +231,13 @@ ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PA RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \ /deps/install_deps.sh +ENV DEFAULT_FFMPEG_VERSION="7.0" +ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0" + +RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && python3 get-pip.py "pip" + RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ - python3 -m pip install --upgrade pip && \ pip3 install -U /deps/wheels/*.whl COPY --from=deps-rootfs / / diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index a7b7789c0..9684199f8 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -6,13 +6,13 @@ apt-get -qq update apt-get -qq install --no-install-recommends -y \ apt-transport-https \ + ca-certificates \ gnupg \ wget \ lbzip2 \ procps vainfo \ unzip locales tzdata libxml2 xz-utils \ python3.11 \ - python3-pip \ curl \ lsof \ jq \ @@ -31,28 +31,28 @@ unset DEBIAN_FRONTEND yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive rm /tmp/libedgetpu1-max.deb -# btbn-ffmpeg -> amd64 +# ffmpeg -> amd64 if [[ "${TARGETARCH}" == "amd64" ]]; then mkdir -p /usr/lib/ffmpeg/5.0 + wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz" + tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe + rm -rf ffmpeg.tar.xz mkdir -p /usr/lib/ffmpeg/7.0 - wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz" - tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 - rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay - wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz" - tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 - rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay + wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz" + tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe + rm -rf ffmpeg.tar.xz fi # ffmpeg -> arm64 if [[ "${TARGETARCH}" == "arm64" ]]; then mkdir -p /usr/lib/ffmpeg/5.0 + wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" + tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe + rm -f ffmpeg.tar.xz mkdir -p /usr/lib/ffmpeg/7.0 - wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" - tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 - rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay - wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz" - tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 - rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay + wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz" + tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe + rm -f ffmpeg.tar.xz fi # arch specific packages diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index f06f82d88..320ce3334 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -70,5 +70,5 @@ verboselogs==1.7.* virtualenv==20.17.* prometheus-client == 0.21.* # TFLite -tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64' +tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64' tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64' diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index e4a1b20e5..f764fd6b0 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -43,8 +43,10 @@ function migrate_db_path() { } function set_libva_version() { - local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) - export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+") + local ffmpeg_path + ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) + LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+") + export LIBAVFORMAT_VERSION_MAJOR } echo "[INFO] Preparing Frigate..." diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index 90c26ceff..2c3a7ab6f 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -44,10 +44,14 @@ function get_ip_and_port_from_supervisor() { } function set_libva_version() { - local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) - export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+") + local ffmpeg_path + ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py) + LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+") + export LIBAVFORMAT_VERSION_MAJOR } +set_libva_version + if [[ -f "/dev/shm/go2rtc.yaml" ]]; then echo "[INFO] Removing stale config from last run..." rm /dev/shm/go2rtc.yaml @@ -66,8 +70,6 @@ else echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually." fi -set_libva_version - readonly config_path="/config" if [[ -x "${config_path}/go2rtc" ]]; then diff --git a/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py b/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py index 27034bff9..ed7f6a891 100644 --- a/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py +++ b/docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py @@ -1,6 +1,5 @@ import json import os -import shutil import sys from ruamel.yaml import YAML @@ -35,10 +34,7 @@ except FileNotFoundError: path = config.get("ffmpeg", {}).get("path", "default") if path == "default": - if shutil.which("ffmpeg") is None: - print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg") - else: - print("ffmpeg") + print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg") elif path in INCLUDED_FFMPEG_VERSIONS: print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg") else: diff --git a/docker/main/rootfs/usr/local/go2rtc/create_config.py b/docker/main/rootfs/usr/local/go2rtc/create_config.py index 0e4aa7bd1..4fe26775e 100644 --- a/docker/main/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/main/rootfs/usr/local/go2rtc/create_config.py @@ -2,7 +2,6 @@ import json import os -import shutil import sys from pathlib import Path @@ -13,6 +12,7 @@ from frigate.const import ( BIRDSEYE_PIPE, DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, + LIBAVFORMAT_VERSION_MAJOR, ) from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode @@ -115,10 +115,7 @@ else: # ensure ffmpeg path is set correctly path = config.get("ffmpeg", {}).get("path", "default") if path == "default": - if shutil.which("ffmpeg") is None: - ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg" - else: - ffmpeg_path = "ffmpeg" + ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg" elif path in INCLUDED_FFMPEG_VERSIONS: ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg" else: @@ -130,14 +127,12 @@ elif go2rtc_config["ffmpeg"].get("bin") is None: go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path # need to replace ffmpeg command when using ffmpeg4 -if int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") < 59: - if go2rtc_config["ffmpeg"].get("rtsp") is None: - go2rtc_config["ffmpeg"]["rtsp"] = ( - "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" - ) -else: +if LIBAVFORMAT_VERSION_MAJOR < 59: + rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" if go2rtc_config.get("ffmpeg") is None: - go2rtc_config["ffmpeg"] = {"path": ""} + go2rtc_config["ffmpeg"] = {"rtsp": rtsp_args} + elif go2rtc_config["ffmpeg"].get("rtsp") is None: + go2rtc_config["ffmpeg"]["rtsp"] = rtsp_args for name in go2rtc_config.get("streams", {}): stream = go2rtc_config["streams"][name] diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile index 09380dfb3..59c8ad791 100644 --- a/docker/rockchip/Dockerfile +++ b/docker/rockchip/Dockerfile @@ -3,6 +3,9 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive +# Globally set pip break-system-packages option to avoid having to specify it every time +ARG PIP_BREAK_SYSTEM_PACKAGES=1 + FROM wheels as rk-wheels COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt @@ -13,6 +16,7 @@ RUN rm -rf /rk-wheels/opencv_python-* FROM deps AS rk-frigate ARG TARGETARCH +ARG PIP_BREAK_SYSTEM_PACKAGES RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ pip3 install --no-deps -U /deps/rk-wheels/*.whl @@ -24,8 +28,7 @@ COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/ -RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg -RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/ -ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}" +ENV DEFAULT_FFMPEG_VERSION="6.0" +ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}" diff --git a/docker/rpi/Dockerfile b/docker/rpi/Dockerfile index 581ca7ff8..35a225227 100644 --- a/docker/rpi/Dockerfile +++ b/docker/rpi/Dockerfile @@ -6,11 +6,12 @@ ARG DEBIAN_FRONTEND=noninteractive FROM deps AS rpi-deps ARG TARGETARCH -RUN rm -rf /usr/lib/btbn-ffmpeg/ - # Install dependencies RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \ /deps/install_deps.sh +ENV DEFAULT_FFMPEG_VERSION="rpi" +ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}" + WORKDIR /opt/frigate/ COPY --from=rootfs / / diff --git a/docker/rpi/install_deps.sh b/docker/rpi/install_deps.sh index ed34389e5..bf537d503 100755 --- a/docker/rpi/install_deps.sh +++ b/docker/rpi/install_deps.sh @@ -28,4 +28,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list apt-get -qq update apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg + mkdir -p /usr/lib/ffmpeg/rpi/bin + ln -svf /usr/bin/ffmpeg /usr/lib/ffmpeg/rpi/bin/ffmpeg + ln -svf /usr/bin/ffprobe /usr/lib/ffmpeg/rpi/bin/ffprobe fi diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 index 33fd8182a..7a88a03a6 100644 --- a/docker/tensorrt/Dockerfile.arm64 +++ b/docker/tensorrt/Dockerfile.arm64 @@ -76,8 +76,9 @@ RUN apt-get update \ && apt-get install -y python-is-python3 libprotobuf23 \ && rm -rf /var/lib/apt/lists/* -RUN rm -rf /usr/lib/btbn-ffmpeg/ COPY --from=jetson-ffmpeg /rootfs / +ENV DEFAULT_FFMPEG_VERSION="jetson" +ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}" # ffmpeg runtime dependencies RUN apt-get -qq update \ diff --git a/docker/tensorrt/build_jetson_ffmpeg.sh b/docker/tensorrt/build_jetson_ffmpeg.sh index 692612137..fb29eb214 100755 --- a/docker/tensorrt/build_jetson_ffmpeg.sh +++ b/docker/tensorrt/build_jetson_ffmpeg.sh @@ -5,7 +5,7 @@ set -euxo pipefail -INSTALL_PREFIX=/rootfs/usr/local +INSTALL_PREFIX=/rootfs/usr/lib/ffmpeg/jetson apt-get -qq update apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run index a88da89d6..e3440e7ac 100755 --- a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run +++ b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run @@ -20,7 +20,7 @@ FIRST_MODEL=true MODEL_DOWNLOAD="" MODEL_CONVERT="" -if [ -z "$YOLO_MODELS"]; then +if [ -z "$YOLO_MODELS" ]; then echo "tensorrt model preparation disabled" exit 0 fi @@ -64,7 +64,7 @@ fi # order to run libyolo here. # On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image. if [[ "$(arch)" == "aarch64" ]]; then - if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then + if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra && ! -e /usr/lib/aarch64-linux-gnu/tegra-egl ]]; then echo "ERROR: Container must be launched with nvidia runtime" exit 1 elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 || diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index c81851506..8d520d9f9 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -1,12 +1,14 @@ # NVidia TensorRT Support (amd64 only) --extra-index-url 'https://pypi.nvidia.com' numpy < 1.24; platform_machine == 'x86_64' -tensorrt == 8.6.1.*; platform_machine == 'x86_64' +tensorrt == 8.6.1; platform_machine == 'x86_64' +tensorrt_bindings == 8.6.1; platform_machine == 'x86_64' cuda-python == 11.8.*; platform_machine == 'x86_64' cython == 3.0.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64' nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' +nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64' nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl index 730f54053..ba3b93244 100644 --- a/docker/tensorrt/trt.hcl +++ b/docker/tensorrt/trt.hcl @@ -14,12 +14,17 @@ variable "COMPUTE_LEVEL" { default = "" } variable "BASE_HOOK" { - # Ensure an up-to-date python 3.11 is available in tensorrt/jetson image + # Ensure an up-to-date python 3.11 is available in jetson images default = <> /etc/apt/sources.list.d/deadsnakes.list + echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list + + # Add deadsnakes signing key + apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776 fi EOT } diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index c1f12ee08..b037c0768 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -37,7 +37,7 @@ See [the go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#modul ```yaml go2rtc: streams: - ... + # ... log: exec: trace ``` @@ -176,15 +176,13 @@ listen [::]:5000 ipv6only=off; ### Custom ffmpeg build -Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built ffmpeg binary can be downloaded to /config and used. +Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built `ffmpeg` and `ffprobe` binaries can be placed in `/config/custom-ffmpeg/bin` for Frigate to use. To do this: -1. Download your ffmpeg build and uncompress to the Frigate config folder. -2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings. -3. Restart Frigate and the custom version will be used if the mapping was done correctly. - -NOTE: The folder that is set for the config needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then the `ffmpeg -> path` field should be `/config/custom-ffmpeg/bin`. +1. Download your ffmpeg build and uncompress it to the `/config/custom-ffmpeg` folder. Verify that both the `ffmpeg` and `ffprobe` binaries are located in `/config/custom-ffmpeg/bin`. +2. Update the `ffmpeg.path` in your Frigate config to `/config/custom-ffmpeg`. +3. Restart Frigate and the custom version will be used if the steps above were done correctly. ### Custom go2rtc version @@ -192,7 +190,7 @@ Frigate currently includes go2rtc v1.9.2, there may be certain cases where you w To do this: -1. Download the go2rtc build to the /config folder. +1. Download the go2rtc build to the `/config` folder. 2. Rename the build to `go2rtc`. 3. Give `go2rtc` execute permission. 4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs. diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index d2a130c5e..4fd7aa568 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -3,13 +3,28 @@ id: license_plate_recognition title: License Plate Recognition (LPR) --- -Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera. +Frigate can recognize license plates on vehicles and automatically add the detected characters or recognized name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. + +LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles. + +When a plate is recognized, the detected characters or recognized name is: + +- Added as a `sub_label` to the `car` tracked object. +- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore. +- Filterable through the More Filters menu in Explore. +- Published via the `frigate/events` MQTT topic as a `sub_label` for the tracked object. + +## Model Requirements Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model. -Users without a model that detects license plates can still run LPR. A small, CPU inference, YOLOv9 license plate detection model will be used instead. You should _not_ define `license_plate` in your list of objects to track. +Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU. In this case, you should _not_ define `license_plate` in your list of objects to track. -LPR is most effective when the vehicle’s license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining recognition and keeping the most confident result. LPR will not run on stationary vehicles. +:::note + +Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably. + +::: ## Minimum System Requirements @@ -24,6 +39,8 @@ lpr: enabled: True ``` +Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run. + ## Advanced Configuration Fine-tune the LPR feature using these optional parameters: @@ -41,11 +58,12 @@ Fine-tune the LPR feature using these optional parameters: - **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a sub label. - Default: `0.9`. -- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub-label to an object. +- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub label to an object. - Use this to filter out short, incomplete, or incorrect detections. - **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded. - `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7" - `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC" + - Websites like https://regex101.com/ can help test regular expressions for your plates. ### Matching @@ -53,9 +71,9 @@ Fine-tune the LPR feature using these optional parameters: - These labels appear in the UI, filters, and notifications. - **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate. - For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. - - This parameter will not operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`. + - This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`. -### Examples +## Configuration Examples ```yaml lpr: @@ -69,7 +87,9 @@ lpr: Johnny: - "J*N-*234" # Matches JHN-1234 and JMN-I234, but also note that "*" matches any number of characters Sally: - - "[S5]LL-1234" # Matches both SLL-1234 and 5LL-1234 + - "[S5]LL 1234" # Matches both SLL 1234 and 5LL 1234 + Work Trucks: + - "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z ``` ```yaml @@ -77,12 +97,54 @@ lpr: enabled: True min_area: 4000 # Run recognition on larger plates only recognition_threshold: 0.85 - format: "^[A-Z]{3}-[0-9]{4}$" # Only recognize plates that are three letters, followed by a dash, followed by 4 numbers + format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers match_distance: 1 # Allow one character variation in plate matching known_plates: Delivery Van: - - "RJK-5678" - - "UPS-1234" - Employee Parking: - - "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z + - "RJ K5678" + - "UP A1234" + Supervisor: + - "MN D3163" ``` + +## FAQ + +### Why isn't my license plate being detected and recognized? + +Ensure that: + +- Your camera has a clear, well-lit view of the plate. +- The plate is large enough in the image (try adjusting `min_area`). +- A `car` is detected first, as LPR only runs on recognized vehicles. + +If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track. +If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track. + +### Can I run LPR without detecting `car` objects? + +No, Frigate requires a `car` to be detected first before recognizing a license plate. + +### How can I improve detection accuracy? + +- Use high-quality cameras with good resolution. +- Adjust `detection_threshold` and `recognition_threshold` values. +- Define a `format` regex to filter out invalid detections. + +### Does LPR work at night? + +Yes, but performance depends on camera quality, lighting, and infrared capabilities. Make sure your camera can capture clear images of plates at night. + +### How can I match known plates with minor variations? + +Use `match_distance` to allow small character mismatches. Alternatively, define multiple variations in `known_plates`. + +### How do I debug LPR issues? + +- View MQTT messages for `frigate/events` to verify detected plates. +- Adjust `detection_threshold` and `recognition_threshold` settings. +- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`. +- Enable debug logs for LPR by adding `frigate.data_processing.real_time.license_plate_processor: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary. + +### Will LPR slow down my system? + +LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results. diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index 8dd63f0f3..0c6793d58 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -140,12 +140,12 @@ cameras: zones: street: coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428 - distances: 10,12,11,13.5 + distances: 10,12,11,13.5 # in meters or feet ``` Each number in the `distance` field represents the real-world distance between the points in the `coordinates` list. So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI. -The `distance` values are measured in meters or feet, depending on how `unit_system` is configured in your `ui` config: +The `distance` values are measured in meters (metric) or feet (imperial), depending on how `unit_system` is configured in your `ui` config: ```yaml ui: @@ -153,7 +153,9 @@ ui: unit_system: metric ``` -The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents). These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph), depending on how `unit_system` is configured in your `ui` config. +The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents). + +These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph). For miles per hour, set `unit_system` to `imperial`. For kilometers per hour, set `unit_system` to `metric`. #### Best practices and caveats diff --git a/frigate/api/defs/response/event_response.py b/frigate/api/defs/response/event_response.py index 17b9b166f..083849706 100644 --- a/frigate/api/defs/response/event_response.py +++ b/frigate/api/defs/response/event_response.py @@ -12,7 +12,7 @@ class EventResponse(BaseModel): end_time: Optional[float] false_positive: Optional[bool] zones: list[str] - thumbnail: str + thumbnail: Optional[str] has_clip: bool has_snapshot: bool retain_indefinitely: bool diff --git a/frigate/camera/activity_manager.py b/frigate/camera/activity_manager.py index 9c06cf6f9..a6e40f4ca 100644 --- a/frigate/camera/activity_manager.py +++ b/frigate/camera/activity_manager.py @@ -33,7 +33,11 @@ class CameraActivityManager: self.zone_active_object_counts[zone] = Counter() self.all_zone_labels[zone] = set() - self.all_zone_labels[zone].update(zone_config.objects) + self.all_zone_labels[zone].update( + zone_config.objects + if zone_config.objects + else camera_config.objects.track + ) def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None: all_objects: list[dict[str, any]] = [] diff --git a/frigate/comms/config_updater.py b/frigate/comms/config_updater.py index 273103911..49be36c1e 100644 --- a/frigate/comms/config_updater.py +++ b/frigate/comms/config_updater.py @@ -32,7 +32,9 @@ class ConfigPublisher: class ConfigSubscriber: """Simplifies receiving an updated config.""" - def __init__(self, topic: str) -> None: + def __init__(self, topic: str, exact=False) -> None: + self.topic = topic + self.exact = exact self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.setsockopt_string(zmq.SUBSCRIBE, topic) @@ -42,7 +44,12 @@ class ConfigSubscriber: """Returns updated config or None if no update.""" try: topic = self.socket.recv_string(flags=zmq.NOBLOCK) - return (topic, self.socket.recv_pyobj()) + obj = self.socket.recv_pyobj() + + if not self.exact or self.topic == topic: + return (topic, obj) + else: + return (None, None) except zmq.ZMQError: return (None, None) diff --git a/frigate/config/camera/ffmpeg.py b/frigate/config/camera/ffmpeg.py index 4ab93d7b9..0b1ec2331 100644 --- a/frigate/config/camera/ffmpeg.py +++ b/frigate/config/camera/ffmpeg.py @@ -1,4 +1,3 @@ -import shutil from enum import Enum from typing import Union @@ -71,10 +70,7 @@ class FfmpegConfig(FrigateBaseModel): @property def ffmpeg_path(self) -> str: if self.path == "default": - if shutil.which("ffmpeg") is None: - return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg" - else: - return "ffmpeg" + return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg" elif self.path in INCLUDED_FFMPEG_VERSIONS: return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg" else: @@ -83,10 +79,7 @@ class FfmpegConfig(FrigateBaseModel): @property def ffprobe_path(self) -> str: if self.path == "default": - if shutil.which("ffprobe") is None: - return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe" - else: - return "ffprobe" + return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe" elif self.path in INCLUDED_FFMPEG_VERSIONS: return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe" else: diff --git a/frigate/const.py b/frigate/const.py index eb48e9bf9..866fa3d29 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -1,3 +1,4 @@ +import os import re CONFIG_DIR = "/config" @@ -61,8 +62,9 @@ MAX_WAL_SIZE = 10 # MB # Ffmpeg constants -DEFAULT_FFMPEG_VERSION = "7.0" -INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"] +DEFAULT_FFMPEG_VERSION = os.environ.get("DEFAULT_FFMPEG_VERSION", "") +INCLUDED_FFMPEG_VERSIONS = os.environ.get("INCLUDED_FFMPEG_VERSIONS", "").split(":") +LIBAVFORMAT_VERSION_MAJOR = int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59")) FFMPEG_HWACCEL_NVIDIA = "preset-nvidia" FFMPEG_HWACCEL_VAAPI = "preset-vaapi" FFMPEG_HWACCEL_VULKAN = "preset-vulkan" diff --git a/frigate/data_processing/real_time/license_plate_processor.py b/frigate/data_processing/real_time/license_plate_processor.py index 2d64e5cdb..bd7441928 100644 --- a/frigate/data_processing/real_time/license_plate_processor.py +++ b/frigate/data_processing/real_time/license_plate_processor.py @@ -16,7 +16,12 @@ from shapely.geometry import Polygon from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig from frigate.const import FRIGATE_LOCALHOST -from frigate.embeddings.functions.onnx import GenericONNXEmbedding, ModelTypeEnum +from frigate.embeddings.onnx.lpr_embedding import ( + LicensePlateDetector, + PaddleOCRClassification, + PaddleOCRDetection, + PaddleOCRRecognition, +) from frigate.util.image import area from ..types import DataProcessorMetrics @@ -52,49 +57,26 @@ class LicensePlateProcessor(RealTimeProcessorApi): self.lpr_recognition_model = None if self.config.lpr.enabled: - self.detection_model = GenericONNXEmbedding( - model_name="paddleocr-onnx", - model_file="detection.onnx", - download_urls={ - "detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx" - }, + self.detection_model = PaddleOCRDetection( model_size="large", - model_type=ModelTypeEnum.lpr_detect, requestor=self.requestor, device="CPU", ) - self.classification_model = GenericONNXEmbedding( - model_name="paddleocr-onnx", - model_file="classification.onnx", - download_urls={ - "classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx" - }, + self.classification_model = PaddleOCRClassification( model_size="large", - model_type=ModelTypeEnum.lpr_classify, requestor=self.requestor, device="CPU", ) - self.recognition_model = GenericONNXEmbedding( - model_name="paddleocr-onnx", - model_file="recognition.onnx", - download_urls={ - "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" - }, + self.recognition_model = PaddleOCRRecognition( model_size="large", - model_type=ModelTypeEnum.lpr_recognize, requestor=self.requestor, device="CPU", ) - self.yolov9_detection_model = GenericONNXEmbedding( - model_name="yolov9_license_plate", - model_file="yolov9-256-license-plates.onnx", - download_urls={ - "yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx" - }, + + self.yolov9_detection_model = LicensePlateDetector( model_size="large", - model_type=ModelTypeEnum.yolov9_lpr_detect, requestor=self.requestor, device="CPU", ) diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 5ce7ba86d..c06f46ba4 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -22,7 +22,7 @@ from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import serialize from frigate.util.path import get_event_thumbnail_bytes -from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum +from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding logger = logging.getLogger(__name__) @@ -97,36 +97,14 @@ class Embeddings: }, ) - self.text_embedding = GenericONNXEmbedding( - model_name="jinaai/jina-clip-v1", - model_file="text_model_fp16.onnx", - tokenizer_file="tokenizer", - download_urls={ - "text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx", - }, + self.text_embedding = JinaV1TextEmbedding( model_size=config.semantic_search.model_size, - model_type=ModelTypeEnum.text, requestor=self.requestor, device="CPU", ) - model_file = ( - "vision_model_fp16.onnx" - if self.config.semantic_search.model_size == "large" - else "vision_model_quantized.onnx" - ) - - download_urls = { - model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}", - "preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json", - } - - self.vision_embedding = GenericONNXEmbedding( - model_name="jinaai/jina-clip-v1", - model_file=model_file, - download_urls=download_urls, + self.vision_embedding = JinaV1ImageEmbedding( model_size=config.semantic_search.model_size, - model_type=ModelTypeEnum.vision, requestor=self.requestor, device="GPU" if config.semantic_search.model_size == "large" else "CPU", ) diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py deleted file mode 100644 index a8d52922b..000000000 --- a/frigate/embeddings/functions/onnx.py +++ /dev/null @@ -1,325 +0,0 @@ -import logging -import os -import warnings -from enum import Enum -from io import BytesIO -from typing import Dict, List, Optional, Union - -import cv2 -import numpy as np -import requests -from PIL import Image - -# importing this without pytorch or others causes a warning -# https://github.com/huggingface/transformers/issues/27214 -# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1 -from transformers import AutoFeatureExtractor, AutoTokenizer -from transformers.utils.logging import disable_progress_bar - -from frigate.comms.inter_process import InterProcessRequestor -from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE -from frigate.types import ModelStatusTypesEnum -from frigate.util.downloader import ModelDownloader -from frigate.util.model import ONNXModelRunner - -warnings.filterwarnings( - "ignore", - category=FutureWarning, - message="The class CLIPFeatureExtractor is deprecated", -) - -# disables the progress bar for downloading tokenizers and feature extractors -disable_progress_bar() -logger = logging.getLogger(__name__) - -FACE_EMBEDDING_SIZE = 160 -LPR_EMBEDDING_SIZE = 256 - - -class ModelTypeEnum(str, Enum): - face = "face" - vision = "vision" - text = "text" - lpr_detect = "lpr_detect" - lpr_classify = "lpr_classify" - lpr_recognize = "lpr_recognize" - yolov9_lpr_detect = "yolov9_lpr_detect" - - -class GenericONNXEmbedding: - """Generic embedding function for ONNX models (text and vision).""" - - def __init__( - self, - model_name: str, - model_file: str, - download_urls: Dict[str, str], - model_size: str, - model_type: ModelTypeEnum, - requestor: InterProcessRequestor, - tokenizer_file: Optional[str] = None, - device: str = "AUTO", - ): - self.model_name = model_name - self.model_file = model_file - self.tokenizer_file = tokenizer_file - self.requestor = requestor - self.download_urls = download_urls - self.model_type = model_type - self.model_size = model_size - self.device = device - self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) - self.tokenizer = None - self.feature_extractor = None - self.runner = None - files_names = list(self.download_urls.keys()) + ( - [self.tokenizer_file] if self.tokenizer_file else [] - ) - - if not all( - os.path.exists(os.path.join(self.download_path, n)) for n in files_names - ): - logger.debug(f"starting model download for {self.model_name}") - self.downloader = ModelDownloader( - model_name=self.model_name, - download_path=self.download_path, - file_names=files_names, - download_func=self._download_model, - ) - self.downloader.ensure_model_files() - else: - self.downloader = None - ModelDownloader.mark_files_state( - self.requestor, - self.model_name, - files_names, - ModelStatusTypesEnum.downloaded, - ) - self._load_model_and_utils() - logger.debug(f"models are already downloaded for {self.model_name}") - - def _download_model(self, path: str): - try: - file_name = os.path.basename(path) - - if file_name in self.download_urls: - ModelDownloader.download_from_url(self.download_urls[file_name], path) - elif ( - file_name == self.tokenizer_file - and self.model_type == ModelTypeEnum.text - ): - if not os.path.exists(path + "/" + self.model_name): - logger.info(f"Downloading {self.model_name} tokenizer") - - tokenizer = AutoTokenizer.from_pretrained( - self.model_name, - trust_remote_code=True, - cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer", - clean_up_tokenization_spaces=True, - ) - tokenizer.save_pretrained(path) - - self.downloader.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": f"{self.model_name}-{file_name}", - "state": ModelStatusTypesEnum.downloaded, - }, - ) - except Exception: - self.downloader.requestor.send_data( - UPDATE_MODEL_STATE, - { - "model": f"{self.model_name}-{file_name}", - "state": ModelStatusTypesEnum.error, - }, - ) - - def _load_model_and_utils(self): - if self.runner is None: - if self.downloader: - self.downloader.wait_for_download() - if self.model_type == ModelTypeEnum.text: - self.tokenizer = self._load_tokenizer() - elif self.model_type == ModelTypeEnum.vision: - self.feature_extractor = self._load_feature_extractor() - elif self.model_type == ModelTypeEnum.face: - self.feature_extractor = [] - elif self.model_type == ModelTypeEnum.lpr_detect: - self.feature_extractor = [] - elif self.model_type == ModelTypeEnum.lpr_classify: - self.feature_extractor = [] - elif self.model_type == ModelTypeEnum.lpr_recognize: - self.feature_extractor = [] - elif self.model_type == ModelTypeEnum.yolov9_lpr_detect: - self.feature_extractor = [] - - self.runner = ONNXModelRunner( - os.path.join(self.download_path, self.model_file), - self.device, - self.model_size, - ) - - def _load_tokenizer(self): - tokenizer_path = os.path.join(f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer") - return AutoTokenizer.from_pretrained( - self.model_name, - cache_dir=tokenizer_path, - trust_remote_code=True, - clean_up_tokenization_spaces=True, - ) - - def _load_feature_extractor(self): - return AutoFeatureExtractor.from_pretrained( - f"{MODEL_CACHE_DIR}/{self.model_name}", - ) - - def _preprocess_inputs(self, raw_inputs: any) -> any: - if self.model_type == ModelTypeEnum.text: - max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs) - return [ - self.tokenizer( - text, - padding="max_length", - truncation=True, - max_length=max_length, - return_tensors="np", - ) - for text in raw_inputs - ] - elif self.model_type == ModelTypeEnum.vision: - processed_images = [self._process_image(img) for img in raw_inputs] - return [ - self.feature_extractor(images=image, return_tensors="np") - for image in processed_images - ] - elif self.model_type == ModelTypeEnum.face: - if isinstance(raw_inputs, list): - raise ValueError("Face embedding does not support batch inputs.") - - pil = self._process_image(raw_inputs) - - # handle images larger than input size - width, height = pil.size - if width != FACE_EMBEDDING_SIZE or height != FACE_EMBEDDING_SIZE: - if width > height: - new_height = int(((height / width) * FACE_EMBEDDING_SIZE) // 4 * 4) - pil = pil.resize((FACE_EMBEDDING_SIZE, new_height)) - else: - new_width = int(((width / height) * FACE_EMBEDDING_SIZE) // 4 * 4) - pil = pil.resize((new_width, FACE_EMBEDDING_SIZE)) - - og = np.array(pil).astype(np.float32) - - # Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE - og_h, og_w, channels = og.shape - frame = np.full( - (FACE_EMBEDDING_SIZE, FACE_EMBEDDING_SIZE, channels), - (0, 0, 0), - dtype=np.float32, - ) - - # compute center offset - x_center = (FACE_EMBEDDING_SIZE - og_w) // 2 - y_center = (FACE_EMBEDDING_SIZE - og_h) // 2 - - # copy img image into center of result image - frame[y_center : y_center + og_h, x_center : x_center + og_w] = og - frame = np.expand_dims(frame, axis=0) - return [{"input_2": frame}] - elif self.model_type == ModelTypeEnum.lpr_detect: - preprocessed = [] - for x in raw_inputs: - preprocessed.append(x) - return [{"x": preprocessed[0]}] - elif self.model_type == ModelTypeEnum.lpr_classify: - processed = [] - for img in raw_inputs: - processed.append({"x": img}) - return processed - elif self.model_type == ModelTypeEnum.lpr_recognize: - processed = [] - for img in raw_inputs: - processed.append({"x": img}) - return processed - elif self.model_type == ModelTypeEnum.yolov9_lpr_detect: - if isinstance(raw_inputs, list): - raise ValueError( - "License plate embedding does not support batch inputs." - ) - # Get image as numpy array - img = self._process_image(raw_inputs) - height, width, channels = img.shape - - # Resize maintaining aspect ratio - if width > height: - new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4) - img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height)) - else: - new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4) - img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE)) - - # Get new dimensions after resize - og_h, og_w, channels = img.shape - - # Create black square frame - frame = np.full( - (LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels), - (0, 0, 0), - dtype=np.float32, - ) - - # Center the resized image in the square frame - x_center = (LPR_EMBEDDING_SIZE - og_w) // 2 - y_center = (LPR_EMBEDDING_SIZE - og_h) // 2 - frame[y_center : y_center + og_h, x_center : x_center + og_w] = img - - # Normalize to 0-1 - frame = frame / 255.0 - - # Convert from HWC to CHW format and add batch dimension - frame = np.transpose(frame, (2, 0, 1)) - frame = np.expand_dims(frame, axis=0) - return [{"images": frame}] - else: - raise ValueError(f"Unable to preprocess inputs for {self.model_type}") - - def _process_image(self, image, output: str = "RGB") -> Image.Image: - if isinstance(image, str): - if image.startswith("http"): - response = requests.get(image) - image = Image.open(BytesIO(response.content)).convert(output) - elif isinstance(image, bytes): - image = Image.open(BytesIO(image)).convert(output) - - return image - - def __call__( - self, inputs: Union[List[str], List[Image.Image], List[str]] - ) -> List[np.ndarray]: - self._load_model_and_utils() - if self.runner is None or ( - self.tokenizer is None and self.feature_extractor is None - ): - logger.error( - f"{self.model_name} model or tokenizer/feature extractor is not loaded." - ) - return [] - - processed_inputs = self._preprocess_inputs(inputs) - input_names = self.runner.get_input_names() - onnx_inputs = {name: [] for name in input_names} - input: dict[str, any] - for input in processed_inputs: - for key, value in input.items(): - if key in input_names: - onnx_inputs[key].append(value[0]) - - for key in input_names: - if onnx_inputs.get(key): - onnx_inputs[key] = np.stack(onnx_inputs[key]) - else: - logger.warning(f"Expected input '{key}' not found in onnx_inputs") - - embeddings = self.runner.run(onnx_inputs)[0] - return [embedding for embedding in embeddings] diff --git a/frigate/embeddings/onnx/base_embedding.py b/frigate/embeddings/onnx/base_embedding.py new file mode 100644 index 000000000..6f74afa2a --- /dev/null +++ b/frigate/embeddings/onnx/base_embedding.py @@ -0,0 +1,95 @@ +"""Base class for onnx embedding implementations.""" + +import logging +import os +from abc import ABC, abstractmethod +from enum import Enum +from io import BytesIO + +import numpy as np +import requests +from PIL import Image + +from frigate.const import UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum +from frigate.util.downloader import ModelDownloader + +logger = logging.getLogger(__name__) + + +class EmbeddingTypeEnum(str, Enum): + thumbnail = "thumbnail" + description = "description" + + +class BaseEmbedding(ABC): + """Base embedding class.""" + + def __init__(self, model_name: str, model_file: str, download_urls: dict[str, str]): + self.model_name = model_name + self.model_file = model_file + self.download_urls = download_urls + self.downloader: ModelDownloader = None + + def _download_model(self, path: str): + try: + file_name = os.path.basename(path) + + if file_name in self.download_urls: + ModelDownloader.download_from_url(self.download_urls[file_name], path) + + self.downloader.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": f"{self.model_name}-{file_name}", + "state": ModelStatusTypesEnum.downloaded, + }, + ) + except Exception: + self.downloader.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": f"{self.model_name}-{file_name}", + "state": ModelStatusTypesEnum.error, + }, + ) + + @abstractmethod + def _load_model_and_utils(self): + pass + + @abstractmethod + def _preprocess_inputs(self, raw_inputs: any) -> any: + pass + + def _process_image(self, image, output: str = "RGB") -> Image.Image: + if isinstance(image, str): + if image.startswith("http"): + response = requests.get(image) + image = Image.open(BytesIO(response.content)).convert(output) + elif isinstance(image, bytes): + image = Image.open(BytesIO(image)).convert(output) + + return image + + def __call__( + self, inputs: list[str] | list[Image.Image] | list[str] + ) -> list[np.ndarray]: + self._load_model_and_utils() + processed = self._preprocess_inputs(inputs) + input_names = self.runner.get_input_names() + onnx_inputs = {name: [] for name in input_names} + input: dict[str, any] + for input in processed: + for key, value in input.items(): + if key in input_names: + onnx_inputs[key].append(value[0]) + + for key in input_names: + if onnx_inputs.get(key): + onnx_inputs[key] = np.stack(onnx_inputs[key]) + else: + logger.warning(f"Expected input '{key}' not found in onnx_inputs") + + embeddings = self.runner.run(onnx_inputs)[0] + return [embedding for embedding in embeddings] diff --git a/frigate/embeddings/onnx/jina_v1_embedding.py b/frigate/embeddings/onnx/jina_v1_embedding.py new file mode 100644 index 000000000..9924ff9e1 --- /dev/null +++ b/frigate/embeddings/onnx/jina_v1_embedding.py @@ -0,0 +1,216 @@ +"""JinaV1 Embeddings.""" + +import logging +import os +import warnings + +# importing this without pytorch or others causes a warning +# https://github.com/huggingface/transformers/issues/27214 +# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1 +from transformers import AutoFeatureExtractor, AutoTokenizer +from transformers.utils.logging import disable_progress_bar + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum +from frigate.util.downloader import ModelDownloader + +from .base_embedding import BaseEmbedding +from .runner import ONNXModelRunner + +warnings.filterwarnings( + "ignore", + category=FutureWarning, + message="The class CLIPFeatureExtractor is deprecated", +) + +# disables the progress bar for downloading tokenizers and feature extractors +disable_progress_bar() +logger = logging.getLogger(__name__) + + +class JinaV1TextEmbedding(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + ): + super().__init__( + model_name="jinaai/jina-clip-v1", + model_file="text_model_fp16.onnx", + download_urls={ + "text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx", + }, + ) + self.tokenizer_file = "tokenizer" + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.tokenizer = None + self.feature_extractor = None + self.runner = None + files_names = list(self.download_urls.keys()) + [self.tokenizer_file] + + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _download_model(self, path: str): + try: + file_name = os.path.basename(path) + + if file_name in self.download_urls: + ModelDownloader.download_from_url(self.download_urls[file_name], path) + elif file_name == self.tokenizer_file: + if not os.path.exists(path + "/" + self.model_name): + logger.info(f"Downloading {self.model_name} tokenizer") + + tokenizer = AutoTokenizer.from_pretrained( + self.model_name, + trust_remote_code=True, + cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer", + clean_up_tokenization_spaces=True, + ) + tokenizer.save_pretrained(path) + + self.downloader.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": f"{self.model_name}-{file_name}", + "state": ModelStatusTypesEnum.downloaded, + }, + ) + except Exception: + self.downloader.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": f"{self.model_name}-{file_name}", + "state": ModelStatusTypesEnum.error, + }, + ) + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + tokenizer_path = os.path.join( + f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer" + ) + self.tokenizer = AutoTokenizer.from_pretrained( + self.model_name, + cache_dir=tokenizer_path, + trust_remote_code=True, + clean_up_tokenization_spaces=True, + ) + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_inputs(self, raw_inputs): + max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs) + return [ + self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=max_length, + return_tensors="np", + ) + for text in raw_inputs + ] + + +class JinaV1ImageEmbedding(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + ): + model_file = ( + "vision_model_fp16.onnx" + if model_size == "large" + else "vision_model_quantized.onnx" + ) + super().__init__( + model_name="jinaai/jina-clip-v1", + model_file=model_file, + download_urls={ + model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}", + "preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json", + }, + ) + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.feature_extractor = None + self.runner: ONNXModelRunner | None = None + files_names = list(self.download_urls.keys()) + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.feature_extractor = AutoFeatureExtractor.from_pretrained( + f"{MODEL_CACHE_DIR}/{self.model_name}", + ) + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_inputs(self, raw_inputs): + processed_images = [self._process_image(img) for img in raw_inputs] + return [ + self.feature_extractor(images=image, return_tensors="np") + for image in processed_images + ] diff --git a/frigate/embeddings/onnx/lpr_embedding.py b/frigate/embeddings/onnx/lpr_embedding.py new file mode 100644 index 000000000..c3b9a8771 --- /dev/null +++ b/frigate/embeddings/onnx/lpr_embedding.py @@ -0,0 +1,297 @@ +import logging +import os +import warnings + +import cv2 +import numpy as np + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import MODEL_CACHE_DIR +from frigate.types import ModelStatusTypesEnum +from frigate.util.downloader import ModelDownloader + +from .base_embedding import BaseEmbedding +from .runner import ONNXModelRunner + +warnings.filterwarnings( + "ignore", + category=FutureWarning, + message="The class CLIPFeatureExtractor is deprecated", +) + +logger = logging.getLogger(__name__) + +LPR_EMBEDDING_SIZE = 256 + + +class PaddleOCRDetection(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + ): + super().__init__( + model_name="paddleocr-onnx", + model_file="detection.onnx", + download_urls={ + "detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx" + }, + ) + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.runner: ONNXModelRunner | None = None + files_names = list(self.download_urls.keys()) + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_inputs(self, raw_inputs): + preprocessed = [] + for x in raw_inputs: + preprocessed.append(x) + return [{"x": preprocessed[0]}] + + +class PaddleOCRClassification(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + ): + super().__init__( + model_name="paddleocr-onnx", + model_file="classification.onnx", + download_urls={ + "classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx" + }, + ) + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.runner: ONNXModelRunner | None = None + files_names = list(self.download_urls.keys()) + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_inputs(self, raw_inputs): + processed = [] + for img in raw_inputs: + processed.append({"x": img}) + return processed + + +class PaddleOCRRecognition(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + ): + super().__init__( + model_name="paddleocr-onnx", + model_file="recognition.onnx", + download_urls={ + "recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx" + }, + ) + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.runner: ONNXModelRunner | None = None + files_names = list(self.download_urls.keys()) + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_inputs(self, raw_inputs): + processed = [] + for img in raw_inputs: + processed.append({"x": img}) + return processed + + +class LicensePlateDetector(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + ): + super().__init__( + model_name="yolov9_license_plate", + model_file="yolov9-256-license-plates.onnx", + download_urls={ + "yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx" + }, + ) + + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.runner: ONNXModelRunner | None = None + files_names = list(self.download_urls.keys()) + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_inputs(self, raw_inputs): + if isinstance(raw_inputs, list): + raise ValueError("License plate embedding does not support batch inputs.") + # Get image as numpy array + img = self._process_image(raw_inputs) + height, width, channels = img.shape + + # Resize maintaining aspect ratio + if width > height: + new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4) + img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height)) + else: + new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4) + img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE)) + + # Get new dimensions after resize + og_h, og_w, channels = img.shape + + # Create black square frame + frame = np.full( + (LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels), + (0, 0, 0), + dtype=np.float32, + ) + + # Center the resized image in the square frame + x_center = (LPR_EMBEDDING_SIZE - og_w) // 2 + y_center = (LPR_EMBEDDING_SIZE - og_h) // 2 + frame[y_center : y_center + og_h, x_center : x_center + og_w] = img + + # Normalize to 0-1 + frame = frame / 255.0 + + # Convert from HWC to CHW format and add batch dimension + frame = np.transpose(frame, (2, 0, 1)) + frame = np.expand_dims(frame, axis=0) + return [{"images": frame}] diff --git a/frigate/embeddings/onnx/runner.py b/frigate/embeddings/onnx/runner.py new file mode 100644 index 000000000..d380f45c1 --- /dev/null +++ b/frigate/embeddings/onnx/runner.py @@ -0,0 +1,79 @@ +"""Convenience runner for onnx models.""" + +import logging +from typing import Any + +import onnxruntime as ort + +from frigate.util.model import get_ort_providers + +try: + import openvino as ov +except ImportError: + # openvino is not included + pass + +logger = logging.getLogger(__name__) + + +class ONNXModelRunner: + """Run onnx models optimally based on available hardware.""" + + def __init__(self, model_path: str, device: str, requires_fp16: bool = False): + self.model_path = model_path + self.ort: ort.InferenceSession = None + self.ov: ov.Core = None + providers, options = get_ort_providers(device == "CPU", device, requires_fp16) + self.interpreter = None + + if "OpenVINOExecutionProvider" in providers: + try: + # use OpenVINO directly + self.type = "ov" + self.ov = ov.Core() + self.ov.set_property( + {ov.properties.cache_dir: "/config/model_cache/openvino"} + ) + self.interpreter = self.ov.compile_model( + model=model_path, device_name=device + ) + except Exception as e: + logger.warning( + f"OpenVINO failed to build model, using CPU instead: {e}" + ) + self.interpreter = None + + # Use ONNXRuntime + if self.interpreter is None: + self.type = "ort" + self.ort = ort.InferenceSession( + model_path, + providers=providers, + provider_options=options, + ) + + def get_input_names(self) -> list[str]: + if self.type == "ov": + input_names = [] + + for input in self.interpreter.inputs: + input_names.extend(input.names) + + return input_names + elif self.type == "ort": + return [input.name for input in self.ort.get_inputs()] + + def run(self, input: dict[str, Any]) -> Any: + if self.type == "ov": + infer_request = self.interpreter.create_infer_request() + input_tensor = list(input.values()) + + if len(input_tensor) == 1: + input_tensor = ov.Tensor(array=input_tensor[0]) + else: + input_tensor = ov.Tensor(array=input_tensor) + + infer_request.infer(input_tensor) + return [infer_request.get_output_tensor().data] + elif self.type == "ort": + return self.ort.run(None, input) diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index 208948044..3c251b3b7 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -10,6 +10,7 @@ from frigate.const import ( FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_VAAPI, FFMPEG_HWACCEL_VULKAN, + LIBAVFORMAT_VERSION_MAJOR, ) from frigate.util.services import vainfo_hwaccel from frigate.version import VERSION @@ -51,9 +52,8 @@ class LibvaGpuSelector: return "" -LIBAV_VERSION = int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") -FPS_VFR_PARAM = "-fps_mode vfr" if LIBAV_VERSION >= 59 else "-vsync 2" -TIMEOUT_PARAM = "-timeout" if LIBAV_VERSION >= 59 else "-stimeout" +FPS_VFR_PARAM = "-fps_mode vfr" if LIBAVFORMAT_VERSION_MAJOR >= 59 else "-vsync 2" +TIMEOUT_PARAM = "-timeout" if LIBAVFORMAT_VERSION_MAJOR >= 59 else "-stimeout" _gpu_selector = LibvaGpuSelector() _user_agent_args = [ @@ -65,8 +65,8 @@ PRESETS_HW_ACCEL_DECODE = { "preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m", "preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m", FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi", - "preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17 - "preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17 + "preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAVFORMAT_VERSION_MAJOR >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17 + "preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAVFORMAT_VERSION_MAJOR >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17 FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda", "preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}", "preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}", diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index 559b432e2..77135a690 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -49,7 +49,7 @@ class ImprovedMotionDetector(MotionDetector): self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8) self.contrast_values[:, 1:2] = 255 self.contrast_values_index = 0 - self.config_subscriber = ConfigSubscriber(f"config/motion/{name}") + self.config_subscriber = ConfigSubscriber(f"config/motion/{name}", True) self.ptz_metrics = ptz_metrics self.last_stop_time = None diff --git a/frigate/output/preview.py b/frigate/output/preview.py index ae2ba4591..4f8796d39 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -172,7 +172,9 @@ class PreviewRecorder: # create communication for finished previews self.requestor = InterProcessRequestor() - self.config_subscriber = ConfigSubscriber(f"config/record/{self.config.name}") + self.config_subscriber = ConfigSubscriber( + f"config/record/{self.config.name}", True + ) y, u1, u2, v1, v2 = get_yuv_crop( self.config.frame_shape_yuv, diff --git a/frigate/util/model.py b/frigate/util/model.py index 75b545cfb..da7b1a50a 100644 --- a/frigate/util/model.py +++ b/frigate/util/model.py @@ -2,18 +2,11 @@ import logging import os -from typing import Any import cv2 import numpy as np import onnxruntime as ort -try: - import openvino as ov -except ImportError: - # openvino is not included - pass - logger = logging.getLogger(__name__) ### Post Processing @@ -124,66 +117,3 @@ def get_ort_providers( options.append({}) return (providers, options) - - -class ONNXModelRunner: - """Run onnx models optimally based on available hardware.""" - - def __init__(self, model_path: str, device: str, requires_fp16: bool = False): - self.model_path = model_path - self.ort: ort.InferenceSession = None - self.ov: ov.Core = None - providers, options = get_ort_providers(device == "CPU", device, requires_fp16) - self.interpreter = None - - if "OpenVINOExecutionProvider" in providers: - try: - # use OpenVINO directly - self.type = "ov" - self.ov = ov.Core() - self.ov.set_property( - {ov.properties.cache_dir: "/config/model_cache/openvino"} - ) - self.interpreter = self.ov.compile_model( - model=model_path, device_name=device - ) - except Exception as e: - logger.warning( - f"OpenVINO failed to build model, using CPU instead: {e}" - ) - self.interpreter = None - - # Use ONNXRuntime - if self.interpreter is None: - self.type = "ort" - self.ort = ort.InferenceSession( - model_path, - providers=providers, - provider_options=options, - ) - - def get_input_names(self) -> list[str]: - if self.type == "ov": - input_names = [] - - for input in self.interpreter.inputs: - input_names.extend(input.names) - - return input_names - elif self.type == "ort": - return [input.name for input in self.ort.get_inputs()] - - def run(self, input: dict[str, Any]) -> Any: - if self.type == "ov": - infer_request = self.interpreter.create_infer_request() - input_tensor = list(input.values()) - - if len(input_tensor) == 1: - input_tensor = ov.Tensor(array=input_tensor[0]) - else: - input_tensor = ov.Tensor(array=input_tensor) - - infer_request.infer(input_tensor) - return [infer_request.get_output_tensor().data] - elif self.type == "ort": - return self.ort.run(None, input) diff --git a/frigate/video.py b/frigate/video.py index f82d86648..233cebb9e 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -539,7 +539,7 @@ def process_frames( exit_on_empty: bool = False, ): next_region_update = get_tomorrow_at_time(2) - config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}") + config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True) fps_tracker = EventsPerSecond() fps_tracker.start() diff --git a/web/package-lock.json b/web/package-lock.json index 119fc79ea..f2b186312 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -25,7 +25,7 @@ "@radix-ui/react-select": "^2.1.2", "@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-slider": "^1.2.1", - "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-slot": "^1.1.2", "@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-toggle": "^1.1.0", @@ -1176,6 +1176,24 @@ } } }, + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-arrow": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.0.tgz", @@ -1293,6 +1311,24 @@ } } }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-compose-refs": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz", @@ -1417,6 +1453,24 @@ } } }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-direction": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz", @@ -1685,6 +1739,24 @@ } } }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-popover": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.2.tgz", @@ -1737,6 +1809,24 @@ } } }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-popper": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.0.tgz", @@ -1840,6 +1930,24 @@ } } }, + "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-radio-group": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.2.1.tgz", @@ -2022,6 +2130,24 @@ } } }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-separator": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.0.tgz", @@ -2094,12 +2220,12 @@ } }, "node_modules/@radix-ui/react-slot": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", - "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", + "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-compose-refs": "1.1.0" + "@radix-ui/react-compose-refs": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -2111,6 +2237,21 @@ } } }, + "node_modules/@radix-ui/react-slot/node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.1.tgz", + "integrity": "sha512-Y9VzoRDSJtgFMUCoiZBDVo084VQ5hfpXxVE+NgkdNsjiDBByiImMZKKhxMwCbdHvhlENG6a833CbFkOQvTricw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-switch": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.1.1.tgz", @@ -2303,6 +2444,24 @@ } } }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-use-callback-ref": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz", diff --git a/web/package.json b/web/package.json index d0bdd01d4..700fd12d7 100644 --- a/web/package.json +++ b/web/package.json @@ -31,7 +31,7 @@ "@radix-ui/react-select": "^2.1.2", "@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-slider": "^1.2.1", - "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-slot": "^1.1.2", "@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-toggle": "^1.1.0", diff --git a/web/src/components/overlay/detail/ObjectPath.tsx b/web/src/components/overlay/detail/ObjectPath.tsx index d85750ee7..80f454470 100644 --- a/web/src/components/overlay/detail/ObjectPath.tsx +++ b/web/src/components/overlay/detail/ObjectPath.tsx @@ -15,6 +15,7 @@ type ObjectPathProps = { pointRadius?: number; imgRef: React.RefObject; onPointClick?: (index: number) => void; + visible?: boolean; }; const typeColorMap: Partial< @@ -37,6 +38,7 @@ export function ObjectPath({ pointRadius = 4, imgRef, onPointClick, + visible = true, }: ObjectPathProps) { const getAbsolutePositions = useCallback(() => { if (!imgRef.current || !positions) return []; @@ -69,7 +71,7 @@ export function ObjectPath({ return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`; }; - if (!imgRef.current) return null; + if (!imgRef.current || !visible) return null; const absolutePositions = getAbsolutePositions(); const lineColor = `rgb(${color.join(",")})`; diff --git a/web/src/components/overlay/detail/ObjectPathPlotter.tsx b/web/src/components/overlay/detail/ObjectPathPlotter.tsx new file mode 100644 index 000000000..40cf1728e --- /dev/null +++ b/web/src/components/overlay/detail/ObjectPathPlotter.tsx @@ -0,0 +1,281 @@ +import { useState, useEffect, useMemo, useRef } from "react"; +import useSWR from "swr"; +import { useApiHost } from "@/api"; +import type { SearchResult } from "@/types/search"; +import { ObjectPath } from "./ObjectPath"; +import type { FrigateConfig } from "@/types/frigateConfig"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Card, CardContent } from "@/components/ui/card"; +import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; +import { useTimezone } from "@/hooks/use-date-utils"; +import { Button } from "@/components/ui/button"; +import { LuX } from "react-icons/lu"; +import { + Pagination, + PaginationContent, + PaginationItem, + PaginationLink, + PaginationNext, + PaginationPrevious, +} from "@/components/ui/pagination"; + +export default function ObjectPathPlotter() { + const apiHost = useApiHost(); + const [timeRange, setTimeRange] = useState("1d"); + const { data: config } = useSWR("config"); + const imgRef = useRef(null); + const timezone = useTimezone(config); + const [selectedCamera, setSelectedCamera] = useState(""); + const [selectedEvent, setSelectedEvent] = useState(null); + const [currentPage, setCurrentPage] = useState(1); + const eventsPerPage = 20; + + useEffect(() => { + if (config && !selectedCamera) { + setSelectedCamera(Object.keys(config.cameras)[0]); + } + }, [config, selectedCamera]); + + const searchQuery = useMemo(() => { + if (!selectedCamera) return null; + return [ + "events", + { + cameras: selectedCamera, + after: Math.floor(Date.now() / 1000) - getTimeRangeInSeconds(timeRange), + before: Math.floor(Date.now() / 1000), + has_clip: 1, + include_thumbnails: 0, + limit: 1000, + timezone, + }, + ]; + }, [selectedCamera, timeRange, timezone]); + + const { data: events } = useSWR(searchQuery); + + const aspectRatio = useMemo(() => { + if (!config || !selectedCamera) return 16 / 9; + return ( + config.cameras[selectedCamera].detect.width / + config.cameras[selectedCamera].detect.height + ); + }, [config, selectedCamera]); + + const pathPoints = useMemo(() => { + if (!events) return []; + return events.flatMap( + (event) => + event.data.path_data?.map( + ([coords, timestamp]: [number[], number]) => ({ + x: coords[0], + y: coords[1], + timestamp, + event, + }), + ) || [], + ); + }, [events]); + + const getRandomColor = () => { + return [ + Math.floor(Math.random() * 256), + Math.floor(Math.random() * 256), + Math.floor(Math.random() * 256), + ]; + }; + + const eventColors = useMemo(() => { + if (!events) return {}; + return events.reduce( + (acc, event) => { + acc[event.id] = getRandomColor(); + return acc; + }, + {} as Record, + ); + }, [events]); + + const [imageLoaded, setImageLoaded] = useState(false); + + useEffect(() => { + if (!selectedCamera) return; + const img = new Image(); + img.src = selectedEvent + ? `${apiHost}api/${selectedCamera}/recordings/${selectedEvent.start_time}/snapshot.jpg` + : `${apiHost}api/${selectedCamera}/latest.jpg?h=500`; + img.onload = () => { + if (imgRef.current) { + imgRef.current.src = img.src; + setImageLoaded(true); + } + }; + }, [apiHost, selectedCamera, selectedEvent]); + + const handleEventClick = (event: SearchResult) => { + setSelectedEvent(event.id === selectedEvent?.id ? null : event); + }; + + const clearSelectedEvent = () => { + setSelectedEvent(null); + }; + + const totalPages = Math.ceil((events?.length || 0) / eventsPerPage); + const paginatedEvents = events?.slice( + (currentPage - 1) * eventsPerPage, + currentPage * eventsPerPage, + ); + + return ( + + +
+

Tracked Object Paths

+
+ + +
+
+
+ {`Latest + {imgRef.current && imageLoaded && ( + + {events?.map((event) => ( + point.event.id === event.id, + )} + color={eventColors[event.id]} + width={2} + imgRef={imgRef} + visible={ + selectedEvent === null || selectedEvent.id === event.id + } + /> + ))} + + )} +
+
+
+

Legend

+ {selectedEvent && ( + + )} +
+
+ {paginatedEvents?.map((event) => ( +
handleEventClick(event)} + > +
+ + {event.label} + {formatUnixTimestampToDateTime(event.start_time, { + timezone: config?.ui.timezone, + })} + +
+ ))} +
+ + + + + setCurrentPage((prev) => Math.max(prev - 1, 1)) + } + /> + + {[...Array(totalPages)].map((_, index) => ( + + setCurrentPage(index + 1)} + isActive={currentPage === index + 1} + > + {index + 1} + + + ))} + + + setCurrentPage((prev) => Math.min(prev + 1, totalPages)) + } + /> + + + +
+ + + ); +} + +function getTimeRangeInSeconds(range: string): number { + switch (range) { + case "1h": + return 60 * 60; + case "6h": + return 6 * 60 * 60; + case "12h": + return 12 * 60 * 60; + case "1d": + return 24 * 60 * 60; + default: + return 24 * 60 * 60; + } +} diff --git a/web/src/components/ui/pagination.tsx b/web/src/components/ui/pagination.tsx new file mode 100644 index 000000000..ea40d196d --- /dev/null +++ b/web/src/components/ui/pagination.tsx @@ -0,0 +1,117 @@ +import * as React from "react" +import { ChevronLeft, ChevronRight, MoreHorizontal } from "lucide-react" + +import { cn } from "@/lib/utils" +import { ButtonProps, buttonVariants } from "@/components/ui/button" + +const Pagination = ({ className, ...props }: React.ComponentProps<"nav">) => ( +