diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile
index 83c8416ff..42002fc37 100644
--- a/docker/main/Dockerfile
+++ b/docker/main/Dockerfile
@@ -1,356 +1,377 @@
-# syntax=docker/dockerfile:1.6
-
-# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
-ARG DEBIAN_FRONTEND=noninteractive
-
-# Globally set pip break-system-packages option to avoid having to specify it every time
-ARG PIP_BREAK_SYSTEM_PACKAGES=1
-
-ARG BASE_IMAGE=debian:12
-ARG SLIM_BASE=debian:12-slim
-
-# A hook that allows us to inject commands right after the base images
-ARG BASE_HOOK=
-
-FROM ${BASE_IMAGE} AS base
-ARG PIP_BREAK_SYSTEM_PACKAGES
-ARG BASE_HOOK
-
-RUN sh -c "$BASE_HOOK"
-
-FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
-ARG PIP_BREAK_SYSTEM_PACKAGES
-
-FROM ${SLIM_BASE} AS slim-base
-ARG PIP_BREAK_SYSTEM_PACKAGES
-ARG BASE_HOOK
-
-RUN sh -c "$BASE_HOOK"
-
-FROM slim-base AS wget
-ARG DEBIAN_FRONTEND
-RUN apt-get update \
- && apt-get install -y wget xz-utils \
- && rm -rf /var/lib/apt/lists/*
-WORKDIR /rootfs
-
-FROM base AS nginx
-ARG DEBIAN_FRONTEND
-ENV CCACHE_DIR /root/.ccache
-ENV CCACHE_MAXSIZE 2G
-
-RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
- /deps/build_nginx.sh
-
-FROM wget AS sqlite-vec
-ARG DEBIAN_FRONTEND
-
-# Build sqlite_vec from source
-COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
-RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
- --mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
- --mount=type=cache,target=/root/.ccache \
- /deps/build_sqlite_vec.sh
-
-FROM scratch AS go2rtc
-ARG TARGETARCH
-WORKDIR /rootfs/usr/local/go2rtc/bin
-ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
-
-FROM wget AS tempio
-ARG TARGETARCH
-RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
- /deps/install_tempio.sh
-
-####
-#
-# OpenVino Support
-#
-# 1. Download and convert a model from Intel's Public Open Model Zoo
-#
-####
-# Download and Convert OpenVino model
-FROM base_host AS ov-converter
-ARG DEBIAN_FRONTEND
-
-# Install OpenVino Runtime and Dev library
-COPY docker/main/requirements-ov.txt /requirements-ov.txt
-RUN apt-get -qq update \
- && apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
- && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
- && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
- && python3 get-pip.py "pip" \
- && pip3 install -r /requirements-ov.txt
-
-# Get OpenVino Model
-RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
- mkdir /models && cd /models \
- && wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
- && tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
- && python3 /build_ov_model.py
-
-####
-#
-# Coral Compatibility
-#
-# Builds libusb without udev. Needed for synology and other devices with USB coral
-####
-# libUSB - No Udev
-FROM wget as libusb-build
-ARG TARGETARCH
-ARG DEBIAN_FRONTEND
-ENV CCACHE_DIR /root/.ccache
-ENV CCACHE_MAXSIZE 2G
-
-# Build libUSB without udev. Needed for Openvino NCS2 support
-WORKDIR /opt
-RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config
-RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \
- unzip v1.0.26.zip && cd libusb-1.0.26 && \
- ./bootstrap.sh && \
- ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
- make -j $(nproc --all)
-RUN apt-get update && \
- apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
- rm -rf /var/lib/apt/lists/*
-WORKDIR /opt/libusb-1.0.26/libusb
-RUN /bin/mkdir -p '/usr/local/lib' && \
- /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
- /bin/mkdir -p '/usr/local/include/libusb-1.0' && \
- /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
- /bin/mkdir -p '/usr/local/lib/pkgconfig' && \
- cd /opt/libusb-1.0.26/ && \
- /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
- ldconfig
-
-FROM wget AS models
-
-# Get model and labels
-RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
-RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
-COPY labelmap.txt .
-# Copy OpenVino model
-COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/
-COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/
-RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
- sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
-# Get Audio Model and labels
-RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite
-COPY audio-labelmap.txt .
-
-
-FROM wget AS s6-overlay
-ARG TARGETARCH
-RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
- /deps/install_s6_overlay.sh
-
-
-FROM base AS wheels
-ARG DEBIAN_FRONTEND
-ARG TARGETARCH
-ARG DEBUG=false
-
-# Use a separate container to build wheels to prevent build dependencies in final image
-RUN apt-get -qq update \
- && apt-get -qq install -y \
- apt-transport-https wget unzip \
- && apt-get -qq update \
- && apt-get -qq install -y \
- python3.11 \
- python3.11-dev \
- # opencv dependencies
- build-essential cmake git pkg-config libgtk-3-dev \
- libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
- libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
- gfortran openexr libatlas-base-dev libssl-dev\
- libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
- libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
- # sqlite3 dependencies
- tclsh \
- # scipy dependencies
- gcc gfortran libopenblas-dev liblapack-dev && \
- rm -rf /var/lib/apt/lists/*
-
-RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
-
-RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
- && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
- && python3 get-pip.py "pip"
-
-COPY docker/main/requirements.txt /requirements.txt
-COPY docker/main/requirements-dev.txt /requirements-dev.txt
-
-RUN pip3 install -r /requirements.txt
-
-# Build pysqlite3 from source
-COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
-RUN /build_pysqlite3.sh
-
-COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
-RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
- if [ "$DEBUG" = "true" ]; then \
- pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
- fi
-
-# Install HailoRT & Wheels
-RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
- /deps/install_hailort.sh
-
-# Collect deps in a single layer
-FROM scratch AS deps-rootfs
-COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
-COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
-COPY --from=go2rtc /rootfs/ /
-COPY --from=libusb-build /usr/local/lib /usr/local/lib
-COPY --from=tempio /rootfs/ /
-COPY --from=s6-overlay /rootfs/ /
-COPY --from=models /rootfs/ /
-COPY --from=wheels /rootfs/ /
-COPY docker/main/rootfs/ /
-
-
-# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
-FROM slim-base AS deps
-ARG TARGETARCH
-ARG BASE_IMAGE
-
-ARG DEBIAN_FRONTEND
-# http://stackoverflow.com/questions/48162574/ddg#49462622
-ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
-
-# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
-ENV NVIDIA_VISIBLE_DEVICES=all
-ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
-
-# Disable tokenizer parallelism warning
-# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
-ENV TOKENIZERS_PARALLELISM=true
-# https://github.com/huggingface/transformers/issues/27214
-ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
-
-# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
-ENV OPENCV_FFMPEG_LOGLEVEL=8
-
-# Set NumPy to ignore getlimits warning
-ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
-
-# Set HailoRT to disable logging
-ENV HAILORT_LOGGER_PATH=NONE
-
-# TensorFlow C++ logging suppression (must be set before import)
-# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
-ENV TF_CPP_MIN_LOG_LEVEL=3
-# Suppress verbose logging from TensorFlow C++ code
-ENV TF_CPP_MIN_VLOG_LEVEL=3
-# Disable oneDNN optimization messages ("optimized with oneDNN...")
-ENV TF_ENABLE_ONEDNN_OPTS=0
-# Suppress AutoGraph verbosity during conversion
-ENV AUTOGRAPH_VERBOSITY=0
-# Google Logging (GLOG) suppression for TensorFlow components
-ENV GLOG_minloglevel=3
-ENV GLOG_logtostderr=0
-
-ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
-
-# Install dependencies
-RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
- /deps/install_deps.sh
-
-ENV DEFAULT_FFMPEG_VERSION="7.0"
-ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
-
-RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
- && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
- && python3 get-pip.py "pip"
-
-RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
- pip3 install -U /deps/wheels/*.whl
-
-# Install Axera Engine
-RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
-
-ENV PATH="${PATH}:/usr/bin/axcl"
-ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
-
-# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
-RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
- bash -c "bash /deps/install_memryx.sh"
-
-COPY --from=deps-rootfs / /
-
-RUN ldconfig
-
-EXPOSE 5000
-EXPOSE 8554
-EXPOSE 8555/tcp 8555/udp
-
-# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
-ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
-# Do not fail on long-running download scripts
-ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
-
-ENTRYPOINT ["/init"]
-CMD []
-
-HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
- CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
-
-# Frigate deps with Node.js and NPM for devcontainer
-FROM deps AS devcontainer
-
-# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
-# But start a fake service for simulating the logs
-COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
-
-# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
-RUN mkdir -p /opt/frigate \
- && ln -svf /workspace/frigate/frigate /opt/frigate/frigate
-
-# Install Node 20
-RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \
- chmod 500 nsolid_setup_deb.sh && \
- ./nsolid_setup_deb.sh 20 && \
- apt-get install nodejs -y \
- && rm -rf /var/lib/apt/lists/* \
- && npm install -g npm@10
-
-WORKDIR /workspace/frigate
-
-RUN apt-get update \
- && apt-get install make -y \
- && rm -rf /var/lib/apt/lists/*
-
-RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
- pip3 install -r requirements-dev.txt
-
-HEALTHCHECK NONE
-
-CMD ["sleep", "infinity"]
-
-
-# Frigate web build
-# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
-FROM --platform=$BUILDPLATFORM node:20 AS web-build
-
-WORKDIR /work
-COPY web/package.json web/package-lock.json ./
-RUN npm install
-
-COPY web/ ./
-RUN npm run build \
- && mv dist/BASE_PATH/monacoeditorwork/* dist/assets/ \
- && rm -rf dist/BASE_PATH
-
-# Collect final files in a single layer
-FROM scratch AS rootfs
-
-WORKDIR /opt/frigate/
-COPY frigate frigate/
-COPY migrations migrations/
-COPY --from=web-build /work/dist/ web/
-
-# Frigate final container
-FROM deps AS frigate
-
-WORKDIR /opt/frigate/
-COPY --from=rootfs / /
+# syntax=docker/dockerfile:1.6
+
+# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
+ARG DEBIAN_FRONTEND=noninteractive
+
+# Globally set pip break-system-packages option to avoid having to specify it every time
+ARG PIP_BREAK_SYSTEM_PACKAGES=1
+
+ARG BASE_IMAGE=debian:12
+ARG SLIM_BASE=debian:12-slim
+
+# A hook that allows us to inject commands right after the base images
+ARG BASE_HOOK=
+
+FROM ${BASE_IMAGE} AS base
+ARG PIP_BREAK_SYSTEM_PACKAGES
+ARG BASE_HOOK
+
+RUN if [ -n "$BASE_HOOK" ]; then \
+ printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
+ fi
+
+FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
+ARG PIP_BREAK_SYSTEM_PACKAGES
+
+FROM ${SLIM_BASE} AS slim-base
+ARG PIP_BREAK_SYSTEM_PACKAGES
+ARG BASE_HOOK
+
+RUN if [ -n "$BASE_HOOK" ]; then \
+ printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
+ fi
+
+FROM slim-base AS wget
+ARG DEBIAN_FRONTEND
+RUN apt-get update \
+ && apt-get install -y wget xz-utils \
+ && rm -rf /var/lib/apt/lists/*
+WORKDIR /rootfs
+
+FROM base AS nginx
+ARG DEBIAN_FRONTEND
+ENV CCACHE_DIR /root/.ccache
+ENV CCACHE_MAXSIZE 2G
+
+RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
+ tr -d '\r' /tmp/build_nginx.sh \
+ && bash /tmp/build_nginx.sh
+
+FROM wget AS sqlite-vec
+ARG DEBIAN_FRONTEND
+
+# Build sqlite_vec from source
+COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
+RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
+ --mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
+ --mount=type=cache,target=/root/.ccache \
+ tr -d '\r' /tmp/build_sqlite_vec.sh \
+ && bash /tmp/build_sqlite_vec.sh
+
+FROM scratch AS go2rtc
+ARG TARGETARCH
+WORKDIR /rootfs/usr/local/go2rtc/bin
+ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
+
+FROM wget AS tempio
+ARG TARGETARCH
+RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
+ tr -d '\r' /tmp/install_tempio.sh \
+ && bash /tmp/install_tempio.sh
+
+####
+#
+# OpenVino Support
+#
+# 1. Download and convert a model from Intel's Public Open Model Zoo
+#
+####
+# Download and Convert OpenVino model
+FROM base_host AS ov-converter
+ARG DEBIAN_FRONTEND
+
+# Install OpenVino Runtime and Dev library
+COPY docker/main/requirements-ov.txt /requirements-ov.txt
+RUN apt-get -qq update \
+ && apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
+ && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
+ && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
+ && python3 get-pip.py "pip" \
+ && pip3 install -r /requirements-ov.txt
+
+# Get OpenVino Model
+RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
+ mkdir /models && cd /models \
+ && wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
+ && tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
+ && python3 /build_ov_model.py
+
+####
+#
+# Coral Compatibility
+#
+# Builds libusb without udev. Needed for synology and other devices with USB coral
+####
+# libUSB - No Udev
+FROM wget as libusb-build
+ARG TARGETARCH
+ARG DEBIAN_FRONTEND
+ENV CCACHE_DIR /root/.ccache
+ENV CCACHE_MAXSIZE 2G
+
+# Build libUSB without udev. Needed for Openvino NCS2 support
+WORKDIR /opt
+RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config
+RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \
+ unzip v1.0.26.zip && cd libusb-1.0.26 && \
+ ./bootstrap.sh && \
+ ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
+ make -j $(nproc --all)
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
+ rm -rf /var/lib/apt/lists/*
+WORKDIR /opt/libusb-1.0.26/libusb
+RUN /bin/mkdir -p '/usr/local/lib' && \
+ /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
+ /bin/mkdir -p '/usr/local/include/libusb-1.0' && \
+ /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
+ /bin/mkdir -p '/usr/local/lib/pkgconfig' && \
+ cd /opt/libusb-1.0.26/ && \
+ /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
+ ldconfig
+
+FROM wget AS models
+
+# Get model and labels
+RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
+RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
+COPY labelmap.txt .
+# Copy OpenVino model
+COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/
+COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/
+RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
+ sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
+# Get Audio Model and labels
+RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite
+COPY audio-labelmap.txt .
+
+
+FROM wget AS s6-overlay
+ARG TARGETARCH
+RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
+ tr -d '\r' /tmp/install_s6_overlay.sh \
+ && bash /tmp/install_s6_overlay.sh
+
+
+FROM base AS wheels
+ARG DEBIAN_FRONTEND
+ARG TARGETARCH
+ARG DEBUG=false
+
+# Use a separate container to build wheels to prevent build dependencies in final image
+RUN apt-get -qq update \
+ && apt-get -qq install -y \
+ apt-transport-https wget unzip \
+ && apt-get -qq update \
+ && apt-get -qq install -y \
+ python3.11 \
+ python3.11-dev \
+ # opencv dependencies
+ build-essential cmake git pkg-config libgtk-3-dev \
+ libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
+ libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
+ gfortran openexr libatlas-base-dev libssl-dev\
+ libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
+ libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
+ # sqlite3 dependencies
+ tclsh \
+ # scipy dependencies
+ gcc gfortran libopenblas-dev liblapack-dev && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
+
+RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
+ && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
+ && python3 get-pip.py "pip"
+
+COPY docker/main/requirements.txt /requirements.txt
+COPY docker/main/requirements-dev.txt /requirements-dev.txt
+
+RUN pip3 install -r /requirements.txt
+
+# Build pysqlite3 from source
+COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
+RUN tr -d '\r' /tmp/build_pysqlite3.sh \
+ && bash /tmp/build_pysqlite3.sh
+
+COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
+RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
+ if [ "$DEBUG" = "true" ]; then \
+ pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
+ fi
+
+# Install HailoRT & Wheels
+RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
+ tr -d '\r' /tmp/install_hailort.sh \
+ && bash /tmp/install_hailort.sh
+
+# Collect deps in a single layer
+FROM scratch AS deps-rootfs
+COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
+COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
+COPY --from=go2rtc /rootfs/ /
+COPY --from=libusb-build /usr/local/lib /usr/local/lib
+COPY --from=tempio /rootfs/ /
+COPY --from=s6-overlay /rootfs/ /
+COPY --from=models /rootfs/ /
+COPY --from=wheels /rootfs/ /
+COPY docker/main/rootfs/ /
+
+
+# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
+FROM slim-base AS deps
+ARG TARGETARCH
+ARG BASE_IMAGE
+
+ARG DEBIAN_FRONTEND
+# http://stackoverflow.com/questions/48162574/ddg#49462622
+ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
+
+# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
+ENV NVIDIA_VISIBLE_DEVICES=all
+ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
+
+# Disable tokenizer parallelism warning
+# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
+ENV TOKENIZERS_PARALLELISM=true
+# https://github.com/huggingface/transformers/issues/27214
+ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
+
+# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
+ENV OPENCV_FFMPEG_LOGLEVEL=8
+
+# Set NumPy to ignore getlimits warning
+ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
+
+# Set HailoRT to disable logging
+ENV HAILORT_LOGGER_PATH=NONE
+
+# TensorFlow C++ logging suppression (must be set before import)
+# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
+ENV TF_CPP_MIN_LOG_LEVEL=3
+# Suppress verbose logging from TensorFlow C++ code
+ENV TF_CPP_MIN_VLOG_LEVEL=3
+# Disable oneDNN optimization messages ("optimized with oneDNN...")
+ENV TF_ENABLE_ONEDNN_OPTS=0
+# Suppress AutoGraph verbosity during conversion
+ENV AUTOGRAPH_VERBOSITY=0
+# Google Logging (GLOG) suppression for TensorFlow components
+ENV GLOG_minloglevel=3
+ENV GLOG_logtostderr=0
+
+ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
+
+# Install dependencies
+RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
+ tr -d '\r' /tmp/install_deps.sh \
+ && bash /tmp/install_deps.sh
+
+ENV DEFAULT_FFMPEG_VERSION="7.0"
+ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
+
+RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
+ && sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
+ && python3 get-pip.py "pip"
+
+RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
+ pip3 install -U /deps/wheels/*.whl
+
+# Install Axera Engine
+RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
+
+ENV PATH="${PATH}:/usr/bin/axcl"
+ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
+
+# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
+RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
+ tr -d '\r' /tmp/install_memryx.sh \
+ && bash /tmp/install_memryx.sh
+
+COPY --from=deps-rootfs / /
+
+RUN find /etc/s6-overlay/s6-rc.d -type f -exec sed -i 's/\r$//' {} +
+
+RUN find /etc/s6-overlay/s6-rc.d -type f \
+ \( -name run -o -name up \) \
+ -exec chmod +x {} +
+
+RUN ldconfig
+
+EXPOSE 5000
+EXPOSE 5010
+EXPOSE 8554
+EXPOSE 8555/tcp 8555/udp
+
+# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
+ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
+# Do not fail on long-running download scripts
+ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
+
+ENTRYPOINT ["/init"]
+CMD []
+
+HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
+ CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
+
+# Frigate deps with Node.js and NPM for devcontainer
+FROM deps AS devcontainer
+
+# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
+# But start a fake service for simulating the logs
+COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
+
+# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
+RUN mkdir -p /opt/frigate \
+ && ln -svf /workspace/frigate/frigate /opt/frigate/frigate
+
+# Install Node 20
+RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \
+ chmod 500 nsolid_setup_deb.sh && \
+ ./nsolid_setup_deb.sh 20 && \
+ apt-get install nodejs -y \
+ && rm -rf /var/lib/apt/lists/* \
+ && npm install -g npm@10
+
+WORKDIR /workspace/frigate
+
+RUN apt-get update \
+ && apt-get install make -y \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
+ pip3 install -r requirements-dev.txt
+
+HEALTHCHECK NONE
+
+CMD ["sleep", "infinity"]
+
+
+# Frigate web build
+# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
+FROM --platform=$BUILDPLATFORM node:20 AS web-build
+
+WORKDIR /work
+COPY web/package.json web/package-lock.json ./
+RUN npm install
+
+COPY web/ ./
+RUN npm run build \
+ && mv dist/BASE_PATH/monacoeditorwork/* dist/assets/ \
+ && rm -rf dist/BASE_PATH
+
+# Collect final files in a single layer
+FROM scratch AS rootfs
+
+WORKDIR /opt/frigate/
+COPY frigate frigate/
+COPY migrations migrations/
+COPY transcode_proxy transcode_proxy/
+COPY --from=web-build /work/dist/ web/
+
+# Frigate final container
+FROM deps AS frigate
+
+WORKDIR /opt/frigate/
+COPY --from=rootfs / /
+RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
index 9c84c20d5..0865b34ab 100755
--- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run
@@ -1,33 +1,56 @@
-#!/command/with-contenv bash
-# shellcheck shell=bash
-# Start the Frigate service
-
-set -o errexit -o nounset -o pipefail
-
-# opt out of openvino telemetry
-if [ -e /usr/local/bin/opt_in_out ]; then
- /usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1
-fi
-
-# Logs should be sent to stdout so that s6 can collect them
-
-# Tell S6-Overlay not to restart this service
-s6-svc -O .
-
-function set_libva_version() {
- local ffmpeg_path
- ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
- LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
- export LIBAVFORMAT_VERSION_MAJOR
-}
-
-echo "[INFO] Preparing Frigate..."
-set_libva_version
-
-echo "[INFO] Starting Frigate..."
-
-cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
-
-# Replace the bash process with the Frigate process, redirecting stderr to stdout
-exec 2>&1
-exec python3 -u -m frigate
+#!/command/with-contenv bash
+# shellcheck shell=bash
+# Start the Frigate service
+
+set -o errexit -o nounset -o pipefail
+
+# opt out of openvino telemetry
+if [ -e /usr/local/bin/opt_in_out ]; then
+ /usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1
+fi
+
+# Logs should be sent to stdout so that s6 can collect them
+
+# Tell S6-Overlay not to restart this service
+s6-svc -O .
+
+function set_libva_version() {
+ local ffmpeg_path
+ ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
+ LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
+ export LIBAVFORMAT_VERSION_MAJOR
+}
+
+function start_transcode_proxy() {
+ (
+ export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
+ export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
+
+ if [[ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]]; then
+ TRANSCODE_PROXY_FFMPEG=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
+ export TRANSCODE_PROXY_FFMPEG
+ fi
+
+ until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
+ sleep 1
+ done
+
+ echo "[INFO] Starting transcode proxy..."
+ exec python3 -m uvicorn transcode_proxy.main:app \
+ --host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
+ --port "${TRANSCODE_PROXY_PORT:-5010}"
+ ) &
+}
+
+echo "[INFO] Preparing Frigate..."
+set_libva_version
+
+start_transcode_proxy
+
+echo "[INFO] Starting Frigate..."
+
+cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
+
+# Replace the bash process with the Frigate process, redirecting stderr to stdout
+exec 2>&1
+exec python3 -u -m frigate
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run
index c493e320e..d450472c9 100755
--- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run
@@ -1,11 +1,11 @@
-#!/command/with-contenv bash
-# shellcheck shell=bash
-# Prepare the logs folder for s6-log
-
-set -o errexit -o nounset -o pipefail
-
-dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync)
-
-mkdir -p "${dirs[@]}"
-chown nobody:nogroup "${dirs[@]}"
-chmod 02755 "${dirs[@]}"
+#!/command/with-contenv bash
+# shellcheck shell=bash
+# Prepare the logs folder for s6-log
+
+set -o errexit -o nounset -o pipefail
+
+dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync /dev/shm/logs/transcode-proxy)
+
+mkdir -p "${dirs[@]}"
+chown nobody:nogroup "${dirs[@]}"
+chmod 02755 "${dirs[@]}"
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/consumer-for
new file mode 100644
index 000000000..80d5087e0
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/consumer-for
@@ -0,0 +1 @@
+transcode-proxy
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/pipeline-name
new file mode 100644
index 000000000..975eafe7e
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/pipeline-name
@@ -0,0 +1 @@
+transcode-proxy-pipeline
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/run
new file mode 100644
index 000000000..04e821993
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/run
@@ -0,0 +1,4 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+exec logutil-service /dev/shm/logs/transcode-proxy
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/type
new file mode 100644
index 000000000..5b9368f7c
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy-log/type
@@ -0,0 +1 @@
+longrun
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/dependencies.d/nginx b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/dependencies.d/nginx
new file mode 100644
index 000000000..d3f5a12fa
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/dependencies.d/nginx
@@ -0,0 +1 @@
+
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/producer-for
new file mode 100644
index 000000000..1a26f6ba1
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/producer-for
@@ -0,0 +1 @@
+transcode-proxy-log
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/run
new file mode 100644
index 000000000..c441bdbb4
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/run
@@ -0,0 +1,32 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+# Start the transcode proxy (in-process with Frigate container)
+
+set -o errexit -o nounset -o pipefail
+
+# Logs should be sent to stdout so that s6 can collect them
+
+echo "[INFO] Starting transcode proxy..."
+
+# Default upstream to nginx internal port when not set
+export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
+
+# Use Frigate's FFmpeg when not set
+if [ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]; then
+ export TRANSCODE_PROXY_FFMPEG="$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)"
+fi
+
+# Wait for nginx/API to be ready so proxy can reach upstream
+until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
+ echo "[INFO] Waiting for upstream ${TRANSCODE_PROXY_UPSTREAM}..."
+ sleep 1
+done
+
+echo "[INFO] Upstream ready, starting transcode proxy on port ${TRANSCODE_PROXY_PORT:-5010}"
+
+export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
+
+exec 2>&1
+exec python3 -m uvicorn transcode_proxy.main:app \
+ --host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
+ --port "${TRANSCODE_PROXY_PORT:-5010}"
diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/type
new file mode 100644
index 000000000..5b9368f7c
--- /dev/null
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/transcode-proxy/type
@@ -0,0 +1 @@
+longrun
diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf
index bcd3c6bda..726ad4108 100644
--- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf
+++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf
@@ -1,365 +1,375 @@
-daemon off;
-user root;
-worker_processes auto;
-
-error_log /dev/stdout warn;
-pid /var/run/nginx.pid;
-
-events {
- worker_connections 1024;
-}
-
-http {
- map_hash_bucket_size 256;
-
- include mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for" '
- 'request_time="$request_time" upstream_response_time="$upstream_response_time"';
-
-
- access_log /dev/stdout main;
-
- # send headers in one piece, it is better than sending them one by one
- tcp_nopush on;
-
- sendfile on;
-
- keepalive_timeout 65;
-
- gzip on;
- gzip_comp_level 6;
- gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
- gzip_proxied no-cache no-store private expired auth;
- gzip_vary on;
-
- proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
-
- map $sent_http_content_type $should_not_cache {
- 'application/json' 0;
- default 1;
- }
-
- upstream frigate_api {
- server 127.0.0.1:5001;
- keepalive 1024;
- }
-
- upstream mqtt_ws {
- server 127.0.0.1:5002;
- keepalive 1024;
- }
-
- upstream jsmpeg {
- server 127.0.0.1:8082;
- keepalive 1024;
- }
-
- include go2rtc_upstream.conf;
-
- server {
- include listen.conf;
-
- # enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
- http2 on;
-
- # vod settings
- vod_base_url '';
- vod_segments_base_url '';
- vod_mode mapped;
- vod_max_mapping_response_size 1m;
- vod_upstream_location /api;
- vod_align_segments_to_key_frames on;
- vod_manifest_segment_durations_mode accurate;
- vod_ignore_edit_list on;
- vod_segment_duration 10000;
-
- # MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
- vod_hls_mpegts_align_frames off;
- vod_hls_mpegts_interleave_frames on;
-
- # file handle caching / aio
- open_file_cache max=1000 inactive=5m;
- open_file_cache_valid 2m;
- open_file_cache_min_uses 1;
- open_file_cache_errors on;
- aio on;
-
- # file upload size
- client_max_body_size 20M;
-
- # https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
- vod_open_file_thread_pool default;
-
- # vod caches
- vod_metadata_cache metadata_cache 512m;
- vod_mapping_cache mapping_cache 5m 10m;
-
- # gzip manifests
- gzip on;
- gzip_types application/vnd.apple.mpegurl;
-
- include auth_location.conf;
- include base_path.conf;
-
- location /vod/ {
- include auth_request.conf;
- aio threads;
- vod hls;
-
- # Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
- # Smaller segments, faster generation, better browser compatibility
- vod_hls_container_format fmp4;
-
- secure_token $args;
- secure_token_types application/vnd.apple.mpegurl;
-
- add_header Cache-Control "no-store";
- expires off;
-
- keepalive_disable safari;
-
- # vod module returns 502 for non-existent media
- # https://github.com/kaltura/nginx-vod-module/issues/468
- error_page 502 =404 /vod-not-found;
- }
-
- location = /vod-not-found {
- return 404;
- }
-
- location /stream/ {
- include auth_request.conf;
- add_header Cache-Control "no-store";
- expires off;
-
- types {
- application/dash+xml mpd;
- application/vnd.apple.mpegurl m3u8;
- video/mp2t ts;
- image/jpeg jpg;
- }
-
- root /tmp;
- }
-
- location /clips/ {
- include auth_request.conf;
- types {
- video/mp4 mp4;
- image/jpeg jpg;
- }
-
- expires 7d;
- add_header Cache-Control "public";
- autoindex on;
- root /media/frigate;
- }
-
- location /cache/ {
- internal; # This tells nginx it's not accessible from the outside
- alias /tmp/cache/;
- }
-
- location /recordings/ {
- include auth_request.conf;
- types {
- video/mp4 mp4;
- }
-
- autoindex on;
- autoindex_format json;
- root /media/frigate;
- }
-
- location /exports/ {
- include auth_request.conf;
- types {
- video/mp4 mp4;
- }
-
- autoindex on;
- autoindex_format json;
- root /media/frigate;
- }
-
- location /ws {
- include auth_request.conf;
- proxy_pass http://mqtt_ws/;
- include proxy.conf;
- }
-
- location /live/jsmpeg/ {
- include auth_request.conf;
- proxy_pass http://jsmpeg/;
- include proxy.conf;
- }
-
- # frigate lovelace card uses this path
- location /live/mse/api/ws {
- include auth_request.conf;
- limit_except GET {
- deny all;
- }
- proxy_pass http://go2rtc/api/ws;
- include proxy.conf;
- }
-
- location /live/webrtc/api/ws {
- include auth_request.conf;
- limit_except GET {
- deny all;
- }
- proxy_pass http://go2rtc/api/ws;
- include proxy.conf;
- }
-
- # pass through go2rtc player
- location /live/webrtc/webrtc.html {
- include auth_request.conf;
- limit_except GET {
- deny all;
- }
- proxy_pass http://go2rtc/webrtc.html;
- include proxy.conf;
- }
-
- # frontend uses this to fetch the version
- location /api/go2rtc/api {
- include auth_request.conf;
- limit_except GET {
- deny all;
- }
- proxy_pass http://go2rtc/api;
- include proxy.conf;
- }
-
- # integration uses this to add webrtc candidate
- location /api/go2rtc/webrtc {
- include auth_request.conf;
- limit_except POST {
- deny all;
- }
- proxy_pass http://go2rtc/api/webrtc;
- include proxy.conf;
- }
-
- location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
- include auth_request.conf;
- rewrite ^/api/(.*)$ /$1 break;
- proxy_pass http://frigate_api;
- include proxy.conf;
- }
-
- location /api/ {
- include auth_request.conf;
- add_header Cache-Control "no-store";
- expires off;
- proxy_pass http://frigate_api/;
- include proxy.conf;
-
- proxy_cache api_cache;
- proxy_cache_lock on;
- proxy_cache_use_stale updating;
- proxy_cache_valid 200 5s;
- proxy_cache_bypass $http_x_cache_bypass;
- proxy_no_cache $should_not_cache;
- add_header X-Cache-Status $upstream_cache_status;
-
- location /api/vod/ {
- include auth_request.conf;
- proxy_pass http://frigate_api/vod/;
- include proxy.conf;
- proxy_cache off;
- }
-
- location /api/login {
- auth_request off;
- rewrite ^/api(/.*)$ $1 break;
- proxy_pass http://frigate_api;
- include proxy.conf;
- }
-
- # Allow unauthenticated access to the first_time_login endpoint
- # so the login page can load help text before authentication.
- location /api/auth/first_time_login {
- auth_request off;
- limit_except GET {
- deny all;
- }
- rewrite ^/api(/.*)$ $1 break;
- proxy_pass http://frigate_api;
- include proxy.conf;
- }
-
- location /api/stats {
- include auth_request.conf;
- access_log off;
- rewrite ^/api(/.*)$ $1 break;
- proxy_pass http://frigate_api;
- include proxy.conf;
- }
-
- location /api/version {
- include auth_request.conf;
- access_log off;
- rewrite ^/api(/.*)$ $1 break;
- proxy_pass http://frigate_api;
- include proxy.conf;
- }
- }
-
- location / {
- # do not require auth for static assets
- add_header Cache-Control "no-store";
- expires off;
-
- location /assets/ {
- access_log off;
- expires 1y;
- add_header Cache-Control "public";
- }
-
- location /fonts/ {
- access_log off;
- expires 1y;
- add_header Cache-Control "public";
- }
-
- location /locales/ {
- access_log off;
- add_header Cache-Control "public";
- }
-
- location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
- access_log off;
- expires 1y;
- add_header Cache-Control "public";
- default_type application/json;
- proxy_set_header Accept-Encoding "";
- sub_filter_once off;
- sub_filter_types application/json;
- sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
- sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
- }
-
- sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
- sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
- sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
- sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
- sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
- sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
- sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
- sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
- sub_filter '
' '';
- sub_filter_types text/css application/javascript;
- sub_filter_once off;
-
- root /opt/frigate/web;
- try_files $uri $uri.html $uri/ /index.html;
- }
- }
-}
+daemon off;
+user root;
+worker_processes auto;
+
+error_log /dev/stdout warn;
+pid /var/run/nginx.pid;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ map_hash_bucket_size 256;
+
+ include mime.types;
+ default_type application/octet-stream;
+
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for" '
+ 'request_time="$request_time" upstream_response_time="$upstream_response_time"';
+
+
+ access_log /dev/stdout main;
+
+ # send headers in one piece, it is better than sending them one by one
+ tcp_nopush on;
+
+ sendfile on;
+
+ keepalive_timeout 65;
+
+ gzip on;
+ gzip_comp_level 6;
+ gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
+ gzip_proxied no-cache no-store private expired auth;
+ gzip_vary on;
+
+ proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
+
+ map $sent_http_content_type $should_not_cache {
+ 'application/json' 0;
+ default 1;
+ }
+
+ upstream frigate_api {
+ server 127.0.0.1:5001;
+ keepalive 1024;
+ }
+
+ upstream mqtt_ws {
+ server 127.0.0.1:5002;
+ keepalive 1024;
+ }
+
+ upstream jsmpeg {
+ server 127.0.0.1:8082;
+ keepalive 1024;
+ }
+
+ include go2rtc_upstream.conf;
+
+ server {
+ include listen.conf;
+
+ # enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
+ http2 on;
+
+ # vod settings
+ vod_base_url '';
+ vod_segments_base_url '';
+ vod_mode mapped;
+ vod_max_mapping_response_size 1m;
+ vod_upstream_location /api;
+ vod_align_segments_to_key_frames on;
+ vod_manifest_segment_durations_mode accurate;
+ vod_ignore_edit_list on;
+ vod_segment_duration 10000;
+
+ # MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
+ vod_hls_mpegts_align_frames off;
+ vod_hls_mpegts_interleave_frames on;
+
+ # file handle caching / aio
+ open_file_cache max=1000 inactive=5m;
+ open_file_cache_valid 2m;
+ open_file_cache_min_uses 1;
+ open_file_cache_errors on;
+ aio on;
+
+ # file upload size
+ client_max_body_size 20M;
+
+ # https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
+ vod_open_file_thread_pool default;
+
+ # vod caches
+ vod_metadata_cache metadata_cache 512m;
+ vod_mapping_cache mapping_cache 5m 10m;
+
+ # gzip manifests
+ gzip on;
+ gzip_types application/vnd.apple.mpegurl;
+
+ include auth_location.conf;
+ include base_path.conf;
+
+ location = /vod-transcoded {
+ return 302 /vod-transcoded/;
+ }
+
+ location /vod-transcoded/ {
+ include auth_request.conf;
+ proxy_pass http://127.0.0.1:5010;
+ include proxy.conf;
+ }
+
+ location /vod/ {
+ include auth_request.conf;
+ aio threads;
+ vod hls;
+
+ # Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
+ # Smaller segments, faster generation, better browser compatibility
+ vod_hls_container_format fmp4;
+
+ secure_token $args;
+ secure_token_types application/vnd.apple.mpegurl;
+
+ add_header Cache-Control "no-store";
+ expires off;
+
+ keepalive_disable safari;
+
+ # vod module returns 502 for non-existent media
+ # https://github.com/kaltura/nginx-vod-module/issues/468
+ error_page 502 =404 /vod-not-found;
+ }
+
+ location = /vod-not-found {
+ return 404;
+ }
+
+ location /stream/ {
+ include auth_request.conf;
+ add_header Cache-Control "no-store";
+ expires off;
+
+ types {
+ application/dash+xml mpd;
+ application/vnd.apple.mpegurl m3u8;
+ video/mp2t ts;
+ image/jpeg jpg;
+ }
+
+ root /tmp;
+ }
+
+ location /clips/ {
+ include auth_request.conf;
+ types {
+ video/mp4 mp4;
+ image/jpeg jpg;
+ }
+
+ expires 7d;
+ add_header Cache-Control "public";
+ autoindex on;
+ root /media/frigate;
+ }
+
+ location /cache/ {
+ internal; # This tells nginx it's not accessible from the outside
+ alias /tmp/cache/;
+ }
+
+ location /recordings/ {
+ include auth_request.conf;
+ types {
+ video/mp4 mp4;
+ }
+
+ autoindex on;
+ autoindex_format json;
+ root /media/frigate;
+ }
+
+ location /exports/ {
+ include auth_request.conf;
+ types {
+ video/mp4 mp4;
+ }
+
+ autoindex on;
+ autoindex_format json;
+ root /media/frigate;
+ }
+
+ location /ws {
+ include auth_request.conf;
+ proxy_pass http://mqtt_ws/;
+ include proxy.conf;
+ }
+
+ location /live/jsmpeg/ {
+ include auth_request.conf;
+ proxy_pass http://jsmpeg/;
+ include proxy.conf;
+ }
+
+ # frigate lovelace card uses this path
+ location /live/mse/api/ws {
+ include auth_request.conf;
+ limit_except GET {
+ deny all;
+ }
+ proxy_pass http://go2rtc/api/ws;
+ include proxy.conf;
+ }
+
+ location /live/webrtc/api/ws {
+ include auth_request.conf;
+ limit_except GET {
+ deny all;
+ }
+ proxy_pass http://go2rtc/api/ws;
+ include proxy.conf;
+ }
+
+ # pass through go2rtc player
+ location /live/webrtc/webrtc.html {
+ include auth_request.conf;
+ limit_except GET {
+ deny all;
+ }
+ proxy_pass http://go2rtc/webrtc.html;
+ include proxy.conf;
+ }
+
+ # frontend uses this to fetch the version
+ location /api/go2rtc/api {
+ include auth_request.conf;
+ limit_except GET {
+ deny all;
+ }
+ proxy_pass http://go2rtc/api;
+ include proxy.conf;
+ }
+
+ # integration uses this to add webrtc candidate
+ location /api/go2rtc/webrtc {
+ include auth_request.conf;
+ limit_except POST {
+ deny all;
+ }
+ proxy_pass http://go2rtc/api/webrtc;
+ include proxy.conf;
+ }
+
+ location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
+ include auth_request.conf;
+ rewrite ^/api/(.*)$ /$1 break;
+ proxy_pass http://frigate_api;
+ include proxy.conf;
+ }
+
+ location /api/ {
+ include auth_request.conf;
+ add_header Cache-Control "no-store";
+ expires off;
+ proxy_pass http://frigate_api/;
+ include proxy.conf;
+
+ proxy_cache api_cache;
+ proxy_cache_lock on;
+ proxy_cache_use_stale updating;
+ proxy_cache_valid 200 5s;
+ proxy_cache_bypass $http_x_cache_bypass;
+ proxy_no_cache $should_not_cache;
+ add_header X-Cache-Status $upstream_cache_status;
+
+ location /api/vod/ {
+ include auth_request.conf;
+ proxy_pass http://frigate_api/vod/;
+ include proxy.conf;
+ proxy_cache off;
+ }
+
+ location /api/login {
+ auth_request off;
+ rewrite ^/api(/.*)$ $1 break;
+ proxy_pass http://frigate_api;
+ include proxy.conf;
+ }
+
+ # Allow unauthenticated access to the first_time_login endpoint
+ # so the login page can load help text before authentication.
+ location /api/auth/first_time_login {
+ auth_request off;
+ limit_except GET {
+ deny all;
+ }
+ rewrite ^/api(/.*)$ $1 break;
+ proxy_pass http://frigate_api;
+ include proxy.conf;
+ }
+
+ location /api/stats {
+ include auth_request.conf;
+ access_log off;
+ rewrite ^/api(/.*)$ $1 break;
+ proxy_pass http://frigate_api;
+ include proxy.conf;
+ }
+
+ location /api/version {
+ include auth_request.conf;
+ access_log off;
+ rewrite ^/api(/.*)$ $1 break;
+ proxy_pass http://frigate_api;
+ include proxy.conf;
+ }
+ }
+
+ location / {
+ # do not require auth for static assets
+ add_header Cache-Control "no-store";
+ expires off;
+
+ location /assets/ {
+ access_log off;
+ expires 1y;
+ add_header Cache-Control "public";
+ }
+
+ location /fonts/ {
+ access_log off;
+ expires 1y;
+ add_header Cache-Control "public";
+ }
+
+ location /locales/ {
+ access_log off;
+ add_header Cache-Control "public";
+ }
+
+ location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
+ access_log off;
+ expires 1y;
+ add_header Cache-Control "public";
+ default_type application/json;
+ proxy_set_header Accept-Encoding "";
+ sub_filter_once off;
+ sub_filter_types application/json;
+ sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
+ sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
+ }
+
+ sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
+ sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
+ sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
+ sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
+ sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
+ sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
+ sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
+ sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
+ sub_filter '' '';
+ sub_filter_types text/css application/javascript;
+ sub_filter_once off;
+
+ root /opt/frigate/web;
+ try_files $uri $uri.html $uri/ /index.html;
+ }
+ }
+}
diff --git a/docker/tensorrt/Dockerfile.amd64 b/docker/tensorrt/Dockerfile.amd64
index cdf5df9ff..d62670dcc 100644
--- a/docker/tensorrt/Dockerfile.amd64
+++ b/docker/tensorrt/Dockerfile.amd64
@@ -1,37 +1,38 @@
-# syntax=docker/dockerfile:1.4
-
-# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
-ARG DEBIAN_FRONTEND=noninteractive
-
-# Globally set pip break-system-packages option to avoid having to specify it every time
-ARG PIP_BREAK_SYSTEM_PACKAGES=1
-
-FROM wheels AS trt-wheels
-ARG PIP_BREAK_SYSTEM_PACKAGES
-
-# Install TensorRT wheels
-COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
-COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
-
-# remove dependencies from the requirements that have type constraints
-RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
- && pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
-
-FROM deps AS frigate-tensorrt
-ARG PIP_BREAK_SYSTEM_PACKAGES
-
-RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
- pip3 uninstall -y onnxruntime \
- && pip3 install -U /deps/trt-wheels/*.whl
-
-COPY --from=rootfs / /
-COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
-RUN ldconfig
-
-WORKDIR /opt/frigate/
-
-# Dev Container w/ TRT
-FROM devcontainer AS devcontainer-trt
-
-RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
- pip3 install -U /deps/trt-wheels/*.whl
+# syntax=docker/dockerfile:1.4
+
+# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
+ARG DEBIAN_FRONTEND=noninteractive
+
+# Globally set pip break-system-packages option to avoid having to specify it every time
+ARG PIP_BREAK_SYSTEM_PACKAGES=1
+
+FROM wheels AS trt-wheels
+ARG PIP_BREAK_SYSTEM_PACKAGES
+
+# Install TensorRT wheels
+COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
+COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
+
+# remove dependencies from the requirements that have type constraints
+RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
+ && pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
+
+FROM deps AS frigate-tensorrt
+ARG PIP_BREAK_SYSTEM_PACKAGES
+
+RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
+ pip3 uninstall -y onnxruntime \
+ && pip3 install -U /deps/trt-wheels/*.whl
+
+COPY --from=rootfs / /
+RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
+COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
+RUN ldconfig
+
+WORKDIR /opt/frigate/
+
+# Dev Container w/ TRT
+FROM devcontainer AS devcontainer-trt
+
+RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
+ pip3 install -U /deps/trt-wheels/*.whl
diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl
index 501e871e9..73b1b79b2 100644
--- a/docker/tensorrt/trt.hcl
+++ b/docker/tensorrt/trt.hcl
@@ -1,105 +1,105 @@
-variable "ARCH" {
- default = "amd64"
-}
-variable "BASE_IMAGE" {
- default = null
-}
-variable "SLIM_BASE" {
- default = null
-}
-variable "TRT_BASE" {
- default = null
-}
-variable "COMPUTE_LEVEL" {
- default = ""
-}
-variable "BASE_HOOK" {
- # Ensure an up-to-date python 3.11 is available in jetson images
- default = <> /etc/apt/sources.list.d/deadsnakes.list
- echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
-
- # Add deadsnakes signing key
- apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
-fi
-EOT
-}
-
-target "_build_args" {
- args = {
- BASE_IMAGE = BASE_IMAGE,
- SLIM_BASE = SLIM_BASE,
- TRT_BASE = TRT_BASE,
- COMPUTE_LEVEL = COMPUTE_LEVEL,
- BASE_HOOK = BASE_HOOK
- }
- platforms = ["linux/${ARCH}"]
-}
-
-target wget {
- dockerfile = "docker/main/Dockerfile"
- target = "wget"
- inherits = ["_build_args"]
-}
-
-target deps {
- dockerfile = "docker/main/Dockerfile"
- target = "deps"
- inherits = ["_build_args"]
-}
-
-target rootfs {
- dockerfile = "docker/main/Dockerfile"
- target = "rootfs"
- inherits = ["_build_args"]
-}
-
-target wheels {
- dockerfile = "docker/main/Dockerfile"
- target = "wheels"
- inherits = ["_build_args"]
-}
-
-target devcontainer {
- dockerfile = "docker/main/Dockerfile"
- platforms = ["linux/amd64"]
- target = "devcontainer"
-}
-
-target "trt-deps" {
- dockerfile = "docker/tensorrt/Dockerfile.base"
- context = "."
- contexts = {
- deps = "target:deps",
- }
- inherits = ["_build_args"]
-}
-
-target "tensorrt" {
- dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
- context = "."
- contexts = {
- wget = "target:wget",
- wheels = "target:wheels",
- deps = "target:deps",
- rootfs = "target:rootfs"
- }
- target = "frigate-tensorrt"
- inherits = ["_build_args"]
-}
-
-target "devcontainer-trt" {
- dockerfile = "docker/tensorrt/Dockerfile.amd64"
- context = "."
- contexts = {
- wheels = "target:wheels",
- trt-deps = "target:trt-deps",
- devcontainer = "target:devcontainer"
- }
- platforms = ["linux/amd64"]
- target = "devcontainer-trt"
-}
+variable "ARCH" {
+ default = "amd64"
+}
+variable "BASE_IMAGE" {
+ default = null
+}
+variable "SLIM_BASE" {
+ default = null
+}
+variable "TRT_BASE" {
+ default = null
+}
+variable "COMPUTE_LEVEL" {
+ default = ""
+}
+variable "BASE_HOOK" {
+ # Ensure an up-to-date python 3.11 is available in jetson images
+ default = <> /etc/apt/sources.list.d/deadsnakes.list
+ echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
+
+ # Add deadsnakes signing key
+ apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
+fi
+EOT
+}
+
+target "_build_args" {
+ args = {
+ BASE_IMAGE = BASE_IMAGE,
+ SLIM_BASE = SLIM_BASE,
+ TRT_BASE = TRT_BASE,
+ COMPUTE_LEVEL = COMPUTE_LEVEL,
+ BASE_HOOK = BASE_HOOK
+ }
+ platforms = ["linux/${ARCH}"]
+}
+
+target wget {
+ dockerfile = "docker/main/Dockerfile"
+ target = "wget"
+ inherits = ["_build_args"]
+}
+
+target deps {
+ dockerfile = "docker/main/Dockerfile"
+ target = "deps"
+ inherits = ["_build_args"]
+}
+
+target rootfs {
+ dockerfile = "docker/main/Dockerfile"
+ target = "rootfs"
+ inherits = ["_build_args"]
+}
+
+target wheels {
+ dockerfile = "docker/main/Dockerfile"
+ target = "wheels"
+ inherits = ["_build_args"]
+}
+
+target devcontainer {
+ dockerfile = "docker/main/Dockerfile"
+ platforms = ["linux/amd64"]
+ target = "devcontainer"
+}
+
+target "trt-deps" {
+ dockerfile = "docker/tensorrt/Dockerfile.base"
+ context = "."
+ contexts = {
+ deps = "target:deps",
+ }
+ inherits = ["_build_args"]
+}
+
+target "tensorrt" {
+ dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
+ context = "."
+ contexts = {
+ wget = "target:wget",
+ wheels = "target:wheels",
+ deps = "target:deps",
+ rootfs = "target:rootfs"
+ }
+ target = "frigate-tensorrt"
+ inherits = ["_build_args"]
+}
+
+target "devcontainer-trt" {
+ dockerfile = "docker/tensorrt/Dockerfile.amd64"
+ context = "."
+ contexts = {
+ wheels = "target:wheels",
+ trt-deps = "target:trt-deps",
+ devcontainer = "target:devcontainer"
+ }
+ platforms = ["linux/amd64"]
+ target = "devcontainer-trt"
+}
diff --git a/frigate/api/media.py b/frigate/api/media.py
index 903cf60c0..fc422c9b3 100644
--- a/frigate/api/media.py
+++ b/frigate/api/media.py
@@ -1,1702 +1,1725 @@
-"""Image and video apis."""
-
-import asyncio
-import glob
-import logging
-import math
-import os
-import subprocess as sp
-import time
-from datetime import datetime, timedelta, timezone
-from pathlib import Path as FilePath
-from typing import Any
-from urllib.parse import unquote
-
-import cv2
-import numpy as np
-import pytz
-from fastapi import APIRouter, Depends, Path, Query, Request, Response
-from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
-from pathvalidate import sanitize_filename
-from peewee import DoesNotExist, fn
-from tzlocal import get_localzone_name
-
-from frigate.api.auth import (
- allow_any_authenticated,
- require_camera_access,
- require_role,
-)
-from frigate.api.defs.query.media_query_parameters import (
- Extension,
- MediaEventsSnapshotQueryParams,
- MediaLatestFrameQueryParams,
- MediaMjpegFeedQueryParams,
-)
-from frigate.api.defs.tags import Tags
-from frigate.camera.state import CameraState
-from frigate.config import FrigateConfig
-from frigate.const import (
- CACHE_DIR,
- CLIPS_DIR,
- INSTALL_DIR,
- MAX_SEGMENT_DURATION,
- PREVIEW_FRAME_TYPE,
-)
-from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
-from frigate.output.preview import get_most_recent_preview_frame
-from frigate.track.object_processing import TrackedObjectProcessor
-from frigate.util.file import get_event_thumbnail_bytes
-from frigate.util.image import get_image_from_recording
-
-logger = logging.getLogger(__name__)
-
-router = APIRouter(tags=[Tags.media])
-
-
-@router.get("/{camera_name}", dependencies=[Depends(require_camera_access)])
-async def mjpeg_feed(
- request: Request,
- camera_name: str,
- params: MediaMjpegFeedQueryParams = Depends(),
-):
- draw_options = {
- "bounding_boxes": params.bbox,
- "timestamp": params.timestamp,
- "zones": params.zones,
- "mask": params.mask,
- "motion_boxes": params.motion,
- "regions": params.regions,
- }
- if camera_name in request.app.frigate_config.cameras:
- # return a multipart response
- return StreamingResponse(
- imagestream(
- request.app.detected_frames_processor,
- camera_name,
- params.fps,
- params.height,
- draw_options,
- ),
- media_type="multipart/x-mixed-replace;boundary=frame",
- )
- else:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
-
-
-def imagestream(
- detected_frames_processor: TrackedObjectProcessor,
- camera_name: str,
- fps: int,
- height: int,
- draw_options: dict[str, Any],
-):
- while True:
- # max out at specified FPS
- time.sleep(1 / fps)
- frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
- if frame is None:
- frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)
-
- width = int(height * frame.shape[1] / frame.shape[0])
- frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
-
- ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
- yield (
- b"--frame\r\n"
- b"Content-Type: image/jpeg\r\n\r\n" + bytearray(jpg.tobytes()) + b"\r\n\r\n"
- )
-
-
-@router.get("/{camera_name}/ptz/info", dependencies=[Depends(require_camera_access)])
-async def camera_ptz_info(request: Request, camera_name: str):
- if camera_name in request.app.frigate_config.cameras:
- # Schedule get_camera_info in the OnvifController's event loop
- future = asyncio.run_coroutine_threadsafe(
- request.app.onvif.get_camera_info(camera_name), request.app.onvif.loop
- )
- result = future.result()
- return JSONResponse(content=result)
- else:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
-
-
-@router.get(
- "/{camera_name}/latest.{extension}",
- dependencies=[Depends(require_camera_access)],
- description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.",
-)
-async def latest_frame(
- request: Request,
- camera_name: str,
- extension: Extension,
- params: MediaLatestFrameQueryParams = Depends(),
-):
- frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
- draw_options = {
- "bounding_boxes": params.bbox,
- "timestamp": params.timestamp,
- "zones": params.zones,
- "mask": params.mask,
- "motion_boxes": params.motion,
- "paths": params.paths,
- "regions": params.regions,
- }
- quality = params.quality
-
- if extension == Extension.png:
- quality_params = None
- elif extension == Extension.webp:
- quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality]
- else: # jpg or jpeg
- quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
-
- if camera_name in request.app.frigate_config.cameras:
- frame = frame_processor.get_current_frame(camera_name, draw_options)
- retry_interval = float(
- request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
- or 10
- )
-
- is_offline = False
- if frame is None or datetime.now().timestamp() > (
- frame_processor.get_current_frame_time(camera_name) + retry_interval
- ):
- last_frame_time = frame_processor.get_current_frame_time(camera_name)
- preview_path = get_most_recent_preview_frame(
- camera_name, before=last_frame_time
- )
-
- if preview_path:
- logger.debug(f"Using most recent preview frame for {camera_name}")
- frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED)
-
- if frame is not None:
- is_offline = True
-
- if frame is None or not is_offline:
- logger.debug(
- f"No live or preview frame available for {camera_name}. Using error image."
- )
- if request.app.camera_error_image is None:
- error_image = glob.glob(
- os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
- )
-
- if len(error_image) > 0:
- request.app.camera_error_image = cv2.imread(
- error_image[0], cv2.IMREAD_UNCHANGED
- )
-
- frame = request.app.camera_error_image
-
- height = int(params.height or str(frame.shape[0]))
- width = int(height * frame.shape[1] / frame.shape[0])
-
- if frame is None:
- return JSONResponse(
- content={"success": False, "message": "Unable to get valid frame"},
- status_code=500,
- )
-
- if height < 1 or width < 1:
- return JSONResponse(
- content="Invalid height / width requested :: {} / {}".format(
- height, width
- ),
- status_code=400,
- )
-
- frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
-
- _, img = cv2.imencode(f".{extension.value}", frame, quality_params)
-
- headers = {
- "Cache-Control": "no-store" if not params.store else "private, max-age=60",
- }
-
- if is_offline:
- headers["X-Frigate-Offline"] = "true"
-
- return Response(
- content=img.tobytes(),
- media_type=extension.get_mime_type(),
- headers=headers,
- )
- elif (
- camera_name == "birdseye"
- and request.app.frigate_config.birdseye.enabled
- and request.app.frigate_config.birdseye.restream
- ):
- frame = cv2.cvtColor(
- frame_processor.get_current_frame(camera_name),
- cv2.COLOR_YUV2BGR_I420,
- )
-
- height = int(params.height or str(frame.shape[0]))
- width = int(height * frame.shape[1] / frame.shape[0])
-
- frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
-
- _, img = cv2.imencode(f".{extension.value}", frame, quality_params)
- return Response(
- content=img.tobytes(),
- media_type=extension.get_mime_type(),
- headers={
- "Cache-Control": "no-store"
- if not params.store
- else "private, max-age=60",
- },
- )
- else:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
-
-
-@router.get(
- "/{camera_name}/recordings/{frame_time}/snapshot.{format}",
- dependencies=[Depends(require_camera_access)],
-)
-async def get_snapshot_from_recording(
- request: Request,
- camera_name: str,
- frame_time: float,
- format: str = Path(enum=["png", "jpg"]),
- height: int = None,
-):
- if camera_name not in request.app.frigate_config.cameras:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
- recording: Recordings | None = None
-
- try:
- recording = (
- Recordings.select(
- Recordings.path,
- Recordings.start_time,
- )
- .where(
- (
- (frame_time >= Recordings.start_time)
- & (frame_time <= Recordings.end_time)
- )
- )
- .where(Recordings.camera == camera_name)
- .order_by(Recordings.start_time.desc())
- .limit(1)
- .get()
- )
- except DoesNotExist:
- # try again with a rounded frame time as it may be between
- # the rounded segment start time
- frame_time = math.ceil(frame_time)
- try:
- recording = (
- Recordings.select(
- Recordings.path,
- Recordings.start_time,
- )
- .where(
- (
- (frame_time >= Recordings.start_time)
- & (frame_time <= Recordings.end_time)
- )
- )
- .where(Recordings.camera == camera_name)
- .order_by(Recordings.start_time.desc())
- .limit(1)
- .get()
- )
- except DoesNotExist:
- pass
-
- if recording is not None:
- time_in_segment = frame_time - recording.start_time
- codec = "png" if format == "png" else "mjpeg"
- mime_type = "png" if format == "png" else "jpeg"
- config: FrigateConfig = request.app.frigate_config
-
- image_data = get_image_from_recording(
- config.ffmpeg, recording.path, time_in_segment, codec, height
- )
-
- if not image_data:
- return JSONResponse(
- content=(
- {
- "success": False,
- "message": f"Unable to parse frame at time {frame_time}",
- }
- ),
- status_code=404,
- )
- return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
- else:
- return JSONResponse(
- content={
- "success": False,
- "message": "Recording not found at {}".format(frame_time),
- },
- status_code=404,
- )
-
-
-@router.post(
- "/{camera_name}/plus/{frame_time}", dependencies=[Depends(require_camera_access)]
-)
-async def submit_recording_snapshot_to_plus(
- request: Request, camera_name: str, frame_time: str
-):
- if camera_name not in request.app.frigate_config.cameras:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
-
- frame_time = float(frame_time)
- recording_query = (
- Recordings.select(
- Recordings.path,
- Recordings.start_time,
- )
- .where(
- (
- (frame_time >= Recordings.start_time)
- & (frame_time <= Recordings.end_time)
- )
- )
- .where(Recordings.camera == camera_name)
- .order_by(Recordings.start_time.desc())
- .limit(1)
- )
-
- try:
- config: FrigateConfig = request.app.frigate_config
- recording: Recordings = recording_query.get()
- time_in_segment = frame_time - recording.start_time
- image_data = get_image_from_recording(
- config.ffmpeg, recording.path, time_in_segment, "png"
- )
-
- if not image_data:
- return JSONResponse(
- content={
- "success": False,
- "message": f"Unable to parse frame at time {frame_time}",
- },
- status_code=404,
- )
-
- nd = cv2.imdecode(np.frombuffer(image_data, dtype=np.int8), cv2.IMREAD_COLOR)
- request.app.frigate_config.plus_api.upload_image(nd, camera_name)
-
- return JSONResponse(
- content={
- "success": True,
- "message": "Successfully submitted image.",
- },
- status_code=200,
- )
- except DoesNotExist:
- return JSONResponse(
- content={
- "success": False,
- "message": "Recording not found at {}".format(frame_time),
- },
- status_code=404,
- )
-
-
-@router.get(
- "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
- dependencies=[Depends(require_camera_access)],
- description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.",
-)
-async def recording_clip(
- request: Request,
- camera_name: str,
- start_ts: float,
- end_ts: float,
-):
- def run_download(ffmpeg_cmd: list[str], file_path: str):
- with sp.Popen(
- ffmpeg_cmd,
- stderr=sp.PIPE,
- stdout=sp.PIPE,
- text=False,
- ) as ffmpeg:
- while True:
- data = ffmpeg.stdout.read(8192)
- if data is not None and len(data) > 0:
- yield data
- else:
- if ffmpeg.returncode and ffmpeg.returncode != 0:
- logger.error(
- f"Failed to generate clip, ffmpeg logs: {ffmpeg.stderr.read()}"
- )
- else:
- FilePath(file_path).unlink(missing_ok=True)
- break
-
- recordings = (
- Recordings.select(
- Recordings.path,
- Recordings.start_time,
- Recordings.end_time,
- )
- .where(
- (Recordings.start_time.between(start_ts, end_ts))
- | (Recordings.end_time.between(start_ts, end_ts))
- | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
- )
- .where(Recordings.camera == camera_name)
- .order_by(Recordings.start_time.asc())
- )
-
- if recordings.count() == 0:
- return JSONResponse(
- content={
- "success": False,
- "message": "No recordings found for the specified time range",
- },
- status_code=400,
- )
-
- file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt")
- file_path = os.path.join(CACHE_DIR, file_name)
- with open(file_path, "w") as file:
- clip: Recordings
- for clip in recordings:
- file.write(f"file '{clip.path}'\n")
-
- # if this is the starting clip, add an inpoint
- if clip.start_time < start_ts:
- file.write(f"inpoint {int(start_ts - clip.start_time)}\n")
-
- # if this is the ending clip, add an outpoint
- if clip.end_time > end_ts:
- file.write(f"outpoint {int(end_ts - clip.start_time)}\n")
-
- if len(file_name) > 1000:
- return JSONResponse(
- content={
- "success": False,
- "message": "Filename exceeded max length of 1000",
- },
- status_code=403,
- )
-
- config: FrigateConfig = request.app.frigate_config
-
- ffmpeg_cmd = [
- config.ffmpeg.ffmpeg_path,
- "-hide_banner",
- "-y",
- "-protocol_whitelist",
- "pipe,file",
- "-f",
- "concat",
- "-safe",
- "0",
- "-i",
- file_path,
- "-c",
- "copy",
- "-movflags",
- "frag_keyframe+empty_moov",
- "-f",
- "mp4",
- "pipe:",
- ]
-
- return StreamingResponse(
- run_download(ffmpeg_cmd, file_path),
- media_type="video/mp4",
- )
-
-
-@router.get(
- "/vod/{camera_name}/start/{start_ts}/end/{end_ts}",
- dependencies=[Depends(require_camera_access)],
- description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
-)
-async def vod_ts(
- camera_name: str,
- start_ts: float,
- end_ts: float,
- force_discontinuity: bool = False,
-):
- logger.debug(
- "VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s",
- camera_name,
- start_ts,
- end_ts,
- force_discontinuity,
- )
- recordings = (
- Recordings.select(
- Recordings.path,
- Recordings.duration,
- Recordings.end_time,
- Recordings.start_time,
- )
- .where(
- Recordings.start_time.between(start_ts, end_ts)
- | Recordings.end_time.between(start_ts, end_ts)
- | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
- )
- .where(Recordings.camera == camera_name)
- .order_by(Recordings.start_time.asc())
- .iterator()
- )
-
- clips = []
- durations = []
- min_duration_ms = 100 # Minimum 100ms to ensure at least one video frame
- max_duration_ms = MAX_SEGMENT_DURATION * 1000
-
- recording: Recordings
- for recording in recordings:
- logger.debug(
- "VOD: processing recording: %s start=%s end=%s duration=%s",
- recording.path,
- recording.start_time,
- recording.end_time,
- recording.duration,
- )
-
- clip = {"type": "source", "path": recording.path}
- duration = int(recording.duration * 1000)
-
- # adjust start offset if start_ts is after recording.start_time
- if start_ts > recording.start_time:
- inpoint = int((start_ts - recording.start_time) * 1000)
- clip["clipFrom"] = inpoint
- duration -= inpoint
- logger.debug(
- "VOD: applied clipFrom %sms to %s",
- inpoint,
- recording.path,
- )
-
- # adjust end if recording.end_time is after end_ts
- if recording.end_time > end_ts:
- duration -= int((recording.end_time - end_ts) * 1000)
-
- if duration < min_duration_ms:
- # skip if the clip has no valid duration (too short to contain frames)
- logger.debug(
- "VOD: skipping recording %s - resulting duration %sms too short",
- recording.path,
- duration,
- )
- continue
-
- if min_duration_ms <= duration < max_duration_ms:
- clip["keyFrameDurations"] = [duration]
- clips.append(clip)
- durations.append(duration)
- logger.debug(
- "VOD: added clip %s duration_ms=%s clipFrom=%s",
- recording.path,
- duration,
- clip.get("clipFrom"),
- )
- else:
- logger.warning(f"Recording clip is missing or empty: {recording.path}")
-
- if not clips:
- logger.error(
- f"No recordings found for {camera_name} during the requested time range"
- )
- return JSONResponse(
- content={
- "success": False,
- "message": "No recordings found.",
- },
- status_code=404,
- )
-
- hour_ago = datetime.now() - timedelta(hours=1)
- return JSONResponse(
- content={
- "cache": hour_ago.timestamp() > start_ts,
- "discontinuity": force_discontinuity,
- "consistentSequenceMediaInfo": True,
- "durations": durations,
- "segment_duration": max(durations),
- "sequences": [{"clips": clips}],
- }
- )
-
-
-@router.get(
- "/vod/{year_month}/{day}/{hour}/{camera_name}",
- dependencies=[Depends(require_camera_access)],
- description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
-)
-async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
- """VOD for specific hour. Uses the default timezone (UTC)."""
- return await vod_hour(
- year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
- )
-
-
-@router.get(
- "/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
- dependencies=[Depends(require_camera_access)],
- description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
-)
-async def vod_hour(
- year_month: str, day: int, hour: int, camera_name: str, tz_name: str
-):
- parts = year_month.split("-")
- start_date = (
- datetime(int(parts[0]), int(parts[1]), day, hour, tzinfo=timezone.utc)
- - datetime.now(pytz.timezone(tz_name.replace(",", "/"))).utcoffset()
- )
- end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
- start_ts = start_date.timestamp()
- end_ts = end_date.timestamp()
-
- return await vod_ts(camera_name, start_ts, end_ts)
-
-
-@router.get(
- "/vod/event/{event_id}",
- dependencies=[Depends(allow_any_authenticated())],
- description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
-)
-async def vod_event(
- request: Request,
- event_id: str,
- padding: int = Query(0, description="Padding to apply to the vod."),
-):
- try:
- event: Event = Event.get(Event.id == event_id)
- except DoesNotExist:
- logger.error(f"Event not found: {event_id}")
- return JSONResponse(
- content={
- "success": False,
- "message": "Event not found.",
- },
- status_code=404,
- )
-
- await require_camera_access(event.camera, request=request)
-
- end_ts = (
- datetime.now().timestamp()
- if event.end_time is None
- else (event.end_time + padding)
- )
- vod_response = await vod_ts(event.camera, event.start_time - padding, end_ts)
-
- # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
- if (
- event.start_time < datetime.now().timestamp() - 300
- and type(vod_response) is tuple
- and len(vod_response) == 2
- and vod_response[1] == 404
- ):
- Event.update(has_clip=False).where(Event.id == event_id).execute()
-
- return vod_response
-
-
-@router.get(
- "/vod/clip/{camera_name}/start/{start_ts}/end/{end_ts}",
- dependencies=[Depends(require_camera_access)],
- description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.",
-)
-async def vod_clip(
- camera_name: str,
- start_ts: float,
- end_ts: float,
-):
- return await vod_ts(camera_name, start_ts, end_ts, force_discontinuity=True)
-
-
-@router.get(
- "/events/{event_id}/snapshot.jpg",
- description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
-)
-async def event_snapshot(
- request: Request,
- event_id: str,
- params: MediaEventsSnapshotQueryParams = Depends(),
-):
- event_complete = False
- jpg_bytes = None
- frame_time = 0
- try:
- event = Event.get(Event.id == event_id, Event.end_time != None)
- event_complete = True
- await require_camera_access(event.camera, request=request)
- if not event.has_snapshot:
- return JSONResponse(
- content={"success": False, "message": "Snapshot not available"},
- status_code=404,
- )
- # read snapshot from disk
- with open(
- os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"), "rb"
- ) as image_file:
- jpg_bytes = image_file.read()
- except DoesNotExist:
- # see if the object is currently being tracked
- try:
- camera_states: list[CameraState] = (
- request.app.detected_frames_processor.camera_states.values()
- )
- for camera_state in camera_states:
- if event_id in camera_state.tracked_objects:
- tracked_obj = camera_state.tracked_objects.get(event_id)
- if tracked_obj is not None:
- jpg_bytes, frame_time = tracked_obj.get_img_bytes(
- ext="jpg",
- timestamp=params.timestamp,
- bounding_box=params.bbox,
- crop=params.crop,
- height=params.height,
- quality=params.quality,
- )
- await require_camera_access(camera_state.name, request=request)
- except Exception:
- return JSONResponse(
- content={"success": False, "message": "Ongoing event not found"},
- status_code=404,
- )
- except Exception:
- return JSONResponse(
- content={"success": False, "message": "Unknown error occurred"},
- status_code=404,
- )
-
- if jpg_bytes is None:
- return JSONResponse(
- content={"success": False, "message": "Live frame not available"},
- status_code=404,
- )
-
- headers = {
- "Content-Type": "image/jpeg",
- "Cache-Control": "private, max-age=31536000" if event_complete else "no-store",
- "X-Frame-Time": str(frame_time),
- }
-
- if params.download:
- headers["Content-Disposition"] = f"attachment; filename=snapshot-{event_id}.jpg"
-
- return Response(
- jpg_bytes,
- media_type="image/jpeg",
- headers=headers,
- )
-
-
-@router.get(
- "/events/{event_id}/thumbnail.{extension}",
- dependencies=[Depends(require_camera_access)],
-)
-async def event_thumbnail(
- request: Request,
- event_id: str,
- extension: Extension,
- max_cache_age: int = Query(
- 2592000, description="Max cache age in seconds. Default 30 days in seconds."
- ),
- format: str = Query(default="ios", enum=["ios", "android"]),
-):
- thumbnail_bytes = None
- event_complete = False
- try:
- event: Event = Event.get(Event.id == event_id)
- await require_camera_access(event.camera, request=request)
- if event.end_time is not None:
- event_complete = True
-
- thumbnail_bytes = get_event_thumbnail_bytes(event)
- except DoesNotExist:
- thumbnail_bytes = None
-
- if thumbnail_bytes is None:
- # see if the object is currently being tracked
- try:
- camera_states = request.app.detected_frames_processor.camera_states.values()
- for camera_state in camera_states:
- if event_id in camera_state.tracked_objects:
- tracked_obj = camera_state.tracked_objects.get(event_id)
- if tracked_obj is not None:
- thumbnail_bytes = tracked_obj.get_thumbnail(extension.value)
- except Exception:
- return JSONResponse(
- content={"success": False, "message": "Event not found"},
- status_code=404,
- )
-
- if thumbnail_bytes is None:
- return JSONResponse(
- content={"success": False, "message": "Event not found"},
- status_code=404,
- )
-
- # android notifications prefer a 2:1 ratio
- if format == "android":
- img_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
- img = cv2.imdecode(img_as_np, flags=1)
- thumbnail = cv2.copyMakeBorder(
- img,
- 0,
- 0,
- int(img.shape[1] * 0.5),
- int(img.shape[1] * 0.5),
- cv2.BORDER_CONSTANT,
- (0, 0, 0),
- )
-
- quality_params = None
- if extension in (Extension.jpg, Extension.jpeg):
- quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), 70]
- elif extension == Extension.webp:
- quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60]
-
- _, img = cv2.imencode(f".{extension.value}", thumbnail, quality_params)
- thumbnail_bytes = img.tobytes()
-
- return Response(
- thumbnail_bytes,
- media_type=extension.get_mime_type(),
- headers={
- "Cache-Control": f"private, max-age={max_cache_age}"
- if event_complete
- else "no-store",
- },
- )
-
-
-@router.get("/{camera_name}/grid.jpg", dependencies=[Depends(require_camera_access)])
-def grid_snapshot(
- request: Request, camera_name: str, color: str = "green", font_scale: float = 0.5
-):
- if camera_name in request.app.frigate_config.cameras:
- detect = request.app.frigate_config.cameras[camera_name].detect
- frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
- frame = frame_processor.get_current_frame(camera_name, {})
- retry_interval = float(
- request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
- or 10
- )
-
- if frame is None or datetime.now().timestamp() > (
- frame_processor.get_current_frame_time(camera_name) + retry_interval
- ):
- return JSONResponse(
- content={"success": False, "message": "Unable to get valid frame"},
- status_code=500,
- )
-
- try:
- grid = (
- Regions.select(Regions.grid)
- .where(Regions.camera == camera_name)
- .get()
- .grid
- )
- except DoesNotExist:
- return JSONResponse(
- content={"success": False, "message": "Unable to get region grid"},
- status_code=500,
- )
-
- color_arg = color.lower()
-
- if color_arg == "red":
- draw_color = (0, 0, 255)
- elif color_arg == "blue":
- draw_color = (255, 0, 0)
- elif color_arg == "black":
- draw_color = (0, 0, 0)
- elif color_arg == "white":
- draw_color = (255, 255, 255)
- else:
- draw_color = (0, 255, 0) # green
-
- grid_size = len(grid)
- grid_coef = 1.0 / grid_size
- width = detect.width
- height = detect.height
- for x in range(grid_size):
- for y in range(grid_size):
- cell = grid[x][y]
-
- if len(cell["sizes"]) == 0:
- continue
-
- std_dev = round(cell["std_dev"] * width, 2)
- mean = round(cell["mean"] * width, 2)
- cv2.rectangle(
- frame,
- (int(x * grid_coef * width), int(y * grid_coef * height)),
- (
- int((x + 1) * grid_coef * width),
- int((y + 1) * grid_coef * height),
- ),
- draw_color,
- 2,
- )
- cv2.putText(
- frame,
- f"#: {len(cell['sizes'])}",
- (
- int(x * grid_coef * width + 10),
- int((y * grid_coef + 0.02) * height),
- ),
- cv2.FONT_HERSHEY_SIMPLEX,
- fontScale=font_scale,
- color=draw_color,
- thickness=2,
- )
- cv2.putText(
- frame,
- f"std: {std_dev}",
- (
- int(x * grid_coef * width + 10),
- int((y * grid_coef + 0.05) * height),
- ),
- cv2.FONT_HERSHEY_SIMPLEX,
- fontScale=font_scale,
- color=draw_color,
- thickness=2,
- )
- cv2.putText(
- frame,
- f"avg: {mean}",
- (
- int(x * grid_coef * width + 10),
- int((y * grid_coef + 0.08) * height),
- ),
- cv2.FONT_HERSHEY_SIMPLEX,
- fontScale=font_scale,
- color=draw_color,
- thickness=2,
- )
-
- ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
-
- return Response(
- jpg.tobytes(),
- media_type="image/jpeg",
- headers={"Cache-Control": "no-store"},
- )
- else:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
-
-
-@router.delete(
- "/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))]
-)
-def clear_region_grid(request: Request, camera_name: str):
- """Clear the region grid for a camera."""
- if camera_name not in request.app.frigate_config.cameras:
- return JSONResponse(
- content={"success": False, "message": "Camera not found"},
- status_code=404,
- )
-
- Regions.delete().where(Regions.camera == camera_name).execute()
- return JSONResponse(
- content={"success": True, "message": "Region grid cleared"},
- )
-
-
-@router.get(
- "/events/{event_id}/snapshot-clean.webp",
- dependencies=[Depends(require_camera_access)],
-)
-def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
- webp_bytes = None
- try:
- event = Event.get(Event.id == event_id)
- snapshot_config = request.app.frigate_config.cameras[event.camera].snapshots
- if not (snapshot_config.enabled and event.has_snapshot):
- return JSONResponse(
- content={
- "success": False,
- "message": "Snapshots and clean_copy must be enabled in the config",
- },
- status_code=404,
- )
- if event.end_time is None:
- # see if the object is currently being tracked
- try:
- camera_states = (
- request.app.detected_frames_processor.camera_states.values()
- )
- for camera_state in camera_states:
- if event_id in camera_state.tracked_objects:
- tracked_obj = camera_state.tracked_objects.get(event_id)
- if tracked_obj is not None:
- webp_bytes = tracked_obj.get_clean_webp()
- break
- except Exception:
- return JSONResponse(
- content={"success": False, "message": "Event not found"},
- status_code=404,
- )
- elif not event.has_snapshot:
- return JSONResponse(
- content={"success": False, "message": "Snapshot not available"},
- status_code=404,
- )
- except DoesNotExist:
- return JSONResponse(
- content={"success": False, "message": "Event not found"}, status_code=404
- )
- if webp_bytes is None:
- try:
- # webp
- clean_snapshot_path_webp = os.path.join(
- CLIPS_DIR, f"{event.camera}-{event.id}-clean.webp"
- )
- # png (legacy)
- clean_snapshot_path_png = os.path.join(
- CLIPS_DIR, f"{event.camera}-{event.id}-clean.png"
- )
-
- if os.path.exists(clean_snapshot_path_webp):
- with open(clean_snapshot_path_webp, "rb") as image_file:
- webp_bytes = image_file.read()
- elif os.path.exists(clean_snapshot_path_png):
- # convert png to webp and save for future use
- png_image = cv2.imread(clean_snapshot_path_png, cv2.IMREAD_UNCHANGED)
- if png_image is None:
- return JSONResponse(
- content={
- "success": False,
- "message": "Invalid png snapshot",
- },
- status_code=400,
- )
-
- ret, webp_data = cv2.imencode(
- ".webp", png_image, [int(cv2.IMWRITE_WEBP_QUALITY), 60]
- )
- if not ret:
- return JSONResponse(
- content={
- "success": False,
- "message": "Unable to convert png to webp",
- },
- status_code=400,
- )
-
- webp_bytes = webp_data.tobytes()
-
- # save the converted webp for future requests
- try:
- with open(clean_snapshot_path_webp, "wb") as f:
- f.write(webp_bytes)
- except Exception as e:
- logger.warning(
- f"Failed to save converted webp for event {event.id}: {e}"
- )
- # continue since we now have the data to return
- else:
- return JSONResponse(
- content={
- "success": False,
- "message": "Clean snapshot not available",
- },
- status_code=404,
- )
- except Exception:
- logger.error(f"Unable to load clean snapshot for event: {event.id}")
- return JSONResponse(
- content={
- "success": False,
- "message": "Unable to load clean snapshot for event",
- },
- status_code=400,
- )
-
- headers = {
- "Content-Type": "image/webp",
- "Cache-Control": "private, max-age=31536000",
- }
-
- if download:
- headers["Content-Disposition"] = (
- f"attachment; filename=snapshot-{event_id}-clean.webp"
- )
-
- return Response(
- webp_bytes,
- media_type="image/webp",
- headers=headers,
- )
-
-
-@router.get(
- "/events/{event_id}/clip.mp4", dependencies=[Depends(require_camera_access)]
-)
-async def event_clip(
- request: Request,
- event_id: str,
- padding: int = Query(0, description="Padding to apply to clip."),
-):
- try:
- event: Event = Event.get(Event.id == event_id)
- except DoesNotExist:
- return JSONResponse(
- content={"success": False, "message": "Event not found"}, status_code=404
- )
-
- if not event.has_clip:
- return JSONResponse(
- content={"success": False, "message": "Clip not available"}, status_code=404
- )
-
- end_ts = (
- datetime.now().timestamp()
- if event.end_time is None
- else event.end_time + padding
- )
- return await recording_clip(
- request, event.camera, event.start_time - padding, end_ts
- )
-
-
-@router.get(
- "/events/{event_id}/preview.gif", dependencies=[Depends(require_camera_access)]
-)
-def event_preview(request: Request, event_id: str):
- try:
- event: Event = Event.get(Event.id == event_id)
- except DoesNotExist:
- return JSONResponse(
- content={"success": False, "message": "Event not found"}, status_code=404
- )
-
- start_ts = event.start_time
- end_ts = start_ts + (
- min(event.end_time - event.start_time, 20) if event.end_time else 20
- )
- return preview_gif(request, event.camera, start_ts, end_ts)
-
-
-@router.get(
- "/{camera_name}/start/{start_ts}/end/{end_ts}/preview.gif",
- dependencies=[Depends(require_camera_access)],
-)
-def preview_gif(
- request: Request,
- camera_name: str,
- start_ts: float,
- end_ts: float,
- max_cache_age: int = Query(
- 2592000, description="Max cache age in seconds. Default 30 days in seconds."
- ),
-):
- if datetime.fromtimestamp(start_ts) < datetime.now().replace(minute=0, second=0):
- # has preview mp4
- preview: Previews = (
- Previews.select(
- Previews.camera,
- Previews.path,
- Previews.duration,
- Previews.start_time,
- Previews.end_time,
- )
- .where(
- Previews.start_time.between(start_ts, end_ts)
- | Previews.end_time.between(start_ts, end_ts)
- | ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
- )
- .where(Previews.camera == camera_name)
- .limit(1)
- .get()
- )
-
- if not preview:
- return JSONResponse(
- content={"success": False, "message": "Preview not found"},
- status_code=404,
- )
-
- diff = start_ts - preview.start_time
- minutes = int(diff / 60)
- seconds = int(diff % 60)
- config: FrigateConfig = request.app.frigate_config
- ffmpeg_cmd = [
- config.ffmpeg.ffmpeg_path,
- "-hide_banner",
- "-loglevel",
- "warning",
- "-ss",
- f"00:{minutes}:{seconds}",
- "-t",
- f"{end_ts - start_ts}",
- "-i",
- preview.path,
- "-r",
- "8",
- "-vf",
- "setpts=0.12*PTS",
- "-loop",
- "0",
- "-c:v",
- "gif",
- "-f",
- "gif",
- "-",
- ]
-
- process = sp.run(
- ffmpeg_cmd,
- capture_output=True,
- )
-
- if process.returncode != 0:
- logger.error(process.stderr)
- return JSONResponse(
- content={"success": False, "message": "Unable to create preview gif"},
- status_code=500,
- )
-
- gif_bytes = process.stdout
- else:
- # need to generate from existing images
- preview_dir = os.path.join(CACHE_DIR, "preview_frames")
-
- if not os.path.isdir(preview_dir):
- return JSONResponse(
- content={"success": False, "message": "Preview not found"},
- status_code=404,
- )
-
- file_start = f"preview_{camera_name}"
- start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
- end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
- selected_previews = []
-
- for file in sorted(os.listdir(preview_dir)):
- if not file.startswith(file_start):
- continue
-
- if file < start_file:
- continue
-
- if file > end_file:
- break
-
- selected_previews.append(f"file '{os.path.join(preview_dir, file)}'")
- selected_previews.append("duration 0.12")
-
- if not selected_previews:
- return JSONResponse(
- content={"success": False, "message": "Preview not found"},
- status_code=404,
- )
-
- last_file = selected_previews[-2]
- selected_previews.append(last_file)
- config: FrigateConfig = request.app.frigate_config
-
- ffmpeg_cmd = [
- config.ffmpeg.ffmpeg_path,
- "-hide_banner",
- "-loglevel",
- "warning",
- "-f",
- "concat",
- "-y",
- "-protocol_whitelist",
- "pipe,file",
- "-safe",
- "0",
- "-i",
- "/dev/stdin",
- "-loop",
- "0",
- "-c:v",
- "gif",
- "-f",
- "gif",
- "-",
- ]
-
- process = sp.run(
- ffmpeg_cmd,
- input=str.encode("\n".join(selected_previews)),
- capture_output=True,
- )
-
- if process.returncode != 0:
- logger.error(process.stderr)
- return JSONResponse(
- content={"success": False, "message": "Unable to create preview gif"},
- status_code=500,
- )
-
- gif_bytes = process.stdout
-
- return Response(
- gif_bytes,
- media_type="image/gif",
- headers={
- "Cache-Control": f"private, max-age={max_cache_age}",
- "Content-Type": "image/gif",
- },
- )
-
-
-@router.get(
- "/{camera_name}/start/{start_ts}/end/{end_ts}/preview.mp4",
- dependencies=[Depends(require_camera_access)],
-)
-def preview_mp4(
- request: Request,
- camera_name: str,
- start_ts: float,
- end_ts: float,
- max_cache_age: int = Query(
- 604800, description="Max cache age in seconds. Default 7 days in seconds."
- ),
-):
- file_name = sanitize_filename(f"preview_{camera_name}_{start_ts}-{end_ts}.mp4")
-
- if len(file_name) > 1000:
- return JSONResponse(
- content=(
- {
- "success": False,
- "message": "Filename exceeded max length of 1000 characters.",
- }
- ),
- status_code=403,
- )
-
- path = os.path.join(CACHE_DIR, file_name)
-
- if datetime.fromtimestamp(start_ts) < datetime.now().replace(minute=0, second=0):
- # has preview mp4
- try:
- preview: Previews = (
- Previews.select(
- Previews.camera,
- Previews.path,
- Previews.duration,
- Previews.start_time,
- Previews.end_time,
- )
- .where(
- Previews.start_time.between(start_ts, end_ts)
- | Previews.end_time.between(start_ts, end_ts)
- | ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
- )
- .where(Previews.camera == camera_name)
- .limit(1)
- .get()
- )
- except DoesNotExist:
- preview = None
-
- if not preview:
- return JSONResponse(
- content={"success": False, "message": "Preview not found"},
- status_code=404,
- )
-
- diff = start_ts - preview.start_time
- minutes = int(diff / 60)
- seconds = int(diff % 60)
- config: FrigateConfig = request.app.frigate_config
- ffmpeg_cmd = [
- config.ffmpeg.ffmpeg_path,
- "-hide_banner",
- "-loglevel",
- "warning",
- "-y",
- "-ss",
- f"00:{minutes}:{seconds}",
- "-t",
- f"{end_ts - start_ts}",
- "-i",
- preview.path,
- "-r",
- "8",
- "-vf",
- "setpts=0.12*PTS",
- "-c:v",
- "libx264",
- "-movflags",
- "+faststart",
- path,
- ]
-
- process = sp.run(
- ffmpeg_cmd,
- capture_output=True,
- )
-
- if process.returncode != 0:
- logger.error(process.stderr)
- return JSONResponse(
- content={"success": False, "message": "Unable to create preview gif"},
- status_code=500,
- )
-
- else:
- # need to generate from existing images
- preview_dir = os.path.join(CACHE_DIR, "preview_frames")
-
- if not os.path.isdir(preview_dir):
- return JSONResponse(
- content={"success": False, "message": "Preview not found"},
- status_code=404,
- )
-
- file_start = f"preview_{camera_name}"
- start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
- end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
- selected_previews = []
-
- for file in sorted(os.listdir(preview_dir)):
- if not file.startswith(file_start):
- continue
-
- if file < start_file:
- continue
-
- if file > end_file:
- break
-
- selected_previews.append(f"file '{os.path.join(preview_dir, file)}'")
- selected_previews.append("duration 0.12")
-
- if not selected_previews:
- return JSONResponse(
- content={"success": False, "message": "Preview not found"},
- status_code=404,
- )
-
- last_file = selected_previews[-2]
- selected_previews.append(last_file)
- config: FrigateConfig = request.app.frigate_config
-
- ffmpeg_cmd = [
- config.ffmpeg.ffmpeg_path,
- "-hide_banner",
- "-loglevel",
- "warning",
- "-f",
- "concat",
- "-y",
- "-protocol_whitelist",
- "pipe,file",
- "-safe",
- "0",
- "-i",
- "/dev/stdin",
- "-c:v",
- "libx264",
- "-movflags",
- "+faststart",
- path,
- ]
-
- process = sp.run(
- ffmpeg_cmd,
- input=str.encode("\n".join(selected_previews)),
- capture_output=True,
- )
-
- if process.returncode != 0:
- logger.error(process.stderr)
- return JSONResponse(
- content={"success": False, "message": "Unable to create preview gif"},
- status_code=500,
- )
-
- headers = {
- "Content-Description": "File Transfer",
- "Cache-Control": f"private, max-age={max_cache_age}",
- "Content-Type": "video/mp4",
- "Content-Length": str(os.path.getsize(path)),
- # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
- "X-Accel-Redirect": f"/cache/{file_name}",
- }
-
- return FileResponse(
- path,
- media_type="video/mp4",
- filename=file_name,
- headers=headers,
- )
-
-
-@router.get("/review/{event_id}/preview", dependencies=[Depends(require_camera_access)])
-def review_preview(
- request: Request,
- event_id: str,
- format: str = Query(default="gif", enum=["gif", "mp4"]),
-):
- try:
- review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == event_id)
- except DoesNotExist:
- return JSONResponse(
- content=({"success": False, "message": "Review segment not found"}),
- status_code=404,
- )
-
- padding = 8
- start_ts = review.start_time - padding
- end_ts = (
- review.end_time + padding if review.end_time else datetime.now().timestamp()
- )
-
- if format == "gif":
- return preview_gif(request, review.camera, start_ts, end_ts)
- else:
- return preview_mp4(request, review.camera, start_ts, end_ts)
-
-
-@router.get(
- "/preview/{file_name}/thumbnail.jpg", dependencies=[Depends(require_camera_access)]
-)
-@router.get(
- "/preview/{file_name}/thumbnail.webp", dependencies=[Depends(require_camera_access)]
-)
-def preview_thumbnail(file_name: str):
- """Get a thumbnail from the cached preview frames."""
- if len(file_name) > 1000:
- return JSONResponse(
- content=(
- {"success": False, "message": "Filename exceeded max length of 1000"}
- ),
- status_code=403,
- )
-
- safe_file_name_current = sanitize_filename(file_name)
- preview_dir = os.path.join(CACHE_DIR, "preview_frames")
-
- try:
- with open(
- os.path.join(preview_dir, safe_file_name_current), "rb"
- ) as image_file:
- jpg_bytes = image_file.read()
- except FileNotFoundError:
- return JSONResponse(
- content=({"success": False, "message": "Image file not found"}),
- status_code=404,
- )
-
- return Response(
- jpg_bytes,
- media_type="image/webp",
- headers={
- "Content-Type": "image/webp",
- "Cache-Control": "private, max-age=31536000",
- },
- )
-
-
-####################### dynamic routes ###########################
-
-
-@router.get(
- "/{camera_name}/{label}/best.jpg", dependencies=[Depends(require_camera_access)]
-)
-@router.get(
- "/{camera_name}/{label}/thumbnail.jpg",
- dependencies=[Depends(require_camera_access)],
-)
-async def label_thumbnail(request: Request, camera_name: str, label: str):
- label = unquote(label)
- event_query = Event.select(fn.MAX(Event.id)).where(Event.camera == camera_name)
- if label != "any":
- event_query = event_query.where(Event.label == label)
-
- try:
- event_id = event_query.scalar()
-
- return await event_thumbnail(request, event_id, Extension.jpg, 60)
- except DoesNotExist:
- frame = np.zeros((175, 175, 3), np.uint8)
- ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
-
- return Response(
- jpg.tobytes(),
- media_type="image/jpeg",
- headers={"Cache-Control": "no-store"},
- )
-
-
-@router.get(
- "/{camera_name}/{label}/clip.mp4", dependencies=[Depends(require_camera_access)]
-)
-async def label_clip(request: Request, camera_name: str, label: str):
- label = unquote(label)
- event_query = Event.select(fn.MAX(Event.id)).where(
- Event.camera == camera_name, Event.has_clip == True
- )
- if label != "any":
- event_query = event_query.where(Event.label == label)
-
- try:
- event = event_query.get()
-
- return await event_clip(request, event.id, 0)
- except DoesNotExist:
- return JSONResponse(
- content={"success": False, "message": "Event not found"}, status_code=404
- )
-
-
-@router.get(
- "/{camera_name}/{label}/snapshot.jpg", dependencies=[Depends(require_camera_access)]
-)
-async def label_snapshot(request: Request, camera_name: str, label: str):
- """Returns the snapshot image from the latest event for the given camera and label combo"""
- label = unquote(label)
- if label == "any":
- event_query = (
- Event.select(Event.id)
- .where(Event.camera == camera_name)
- .where(Event.has_snapshot == True)
- .order_by(Event.start_time.desc())
- )
- else:
- event_query = (
- Event.select(Event.id)
- .where(Event.camera == camera_name)
- .where(Event.label == label)
- .where(Event.has_snapshot == True)
- .order_by(Event.start_time.desc())
- )
-
- try:
- event: Event = event_query.get()
- return await event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
- except DoesNotExist:
- frame = np.zeros((720, 1280, 3), np.uint8)
- _, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
-
- return Response(
- jpg.tobytes(),
- media_type="image/jpeg",
- )
+"""Image and video apis."""
+
+import asyncio
+import glob
+import logging
+import math
+import os
+import subprocess as sp
+import time
+from datetime import datetime, timedelta, timezone
+from pathlib import Path as FilePath
+from typing import Any
+from urllib.parse import unquote
+
+import cv2
+import numpy as np
+import pytz
+from fastapi import APIRouter, Depends, Path, Query, Request, Response
+from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
+from pathvalidate import sanitize_filename
+from peewee import DoesNotExist, fn
+from tzlocal import get_localzone_name
+
+from frigate.api.auth import (
+ allow_any_authenticated,
+ require_camera_access,
+ require_role,
+)
+from frigate.api.defs.query.media_query_parameters import (
+ Extension,
+ MediaEventsSnapshotQueryParams,
+ MediaLatestFrameQueryParams,
+ MediaMjpegFeedQueryParams,
+)
+from frigate.api.defs.tags import Tags
+from frigate.camera.state import CameraState
+from frigate.config import FrigateConfig
+from frigate.const import (
+ CACHE_DIR,
+ CLIPS_DIR,
+ INSTALL_DIR,
+ MAX_SEGMENT_DURATION,
+ PREVIEW_FRAME_TYPE,
+)
+from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
+from frigate.output.preview import get_most_recent_preview_frame
+from frigate.track.object_processing import TrackedObjectProcessor
+from frigate.util.file import get_event_thumbnail_bytes
+from frigate.util.image import get_image_from_recording
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(tags=[Tags.media])
+
+
+@router.get("/{camera_name}", dependencies=[Depends(require_camera_access)])
+async def mjpeg_feed(
+ request: Request,
+ camera_name: str,
+ params: MediaMjpegFeedQueryParams = Depends(),
+):
+ draw_options = {
+ "bounding_boxes": params.bbox,
+ "timestamp": params.timestamp,
+ "zones": params.zones,
+ "mask": params.mask,
+ "motion_boxes": params.motion,
+ "regions": params.regions,
+ }
+ if camera_name in request.app.frigate_config.cameras:
+ # return a multipart response
+ return StreamingResponse(
+ imagestream(
+ request.app.detected_frames_processor,
+ camera_name,
+ params.fps,
+ params.height,
+ draw_options,
+ ),
+ media_type="multipart/x-mixed-replace;boundary=frame",
+ )
+ else:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+
+
+def imagestream(
+ detected_frames_processor: TrackedObjectProcessor,
+ camera_name: str,
+ fps: int,
+ height: int,
+ draw_options: dict[str, Any],
+):
+ while True:
+ # max out at specified FPS
+ time.sleep(1 / fps)
+ frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
+ if frame is None:
+ frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)
+
+ width = int(height * frame.shape[1] / frame.shape[0])
+ frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
+
+ ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+ yield (
+ b"--frame\r\n"
+ b"Content-Type: image/jpeg\r\n\r\n" + bytearray(jpg.tobytes()) + b"\r\n\r\n"
+ )
+
+
+@router.get("/{camera_name}/ptz/info", dependencies=[Depends(require_camera_access)])
+async def camera_ptz_info(request: Request, camera_name: str):
+ if camera_name in request.app.frigate_config.cameras:
+ # Schedule get_camera_info in the OnvifController's event loop
+ future = asyncio.run_coroutine_threadsafe(
+ request.app.onvif.get_camera_info(camera_name), request.app.onvif.loop
+ )
+ result = future.result()
+ return JSONResponse(content=result)
+ else:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+
+
+@router.get(
+ "/{camera_name}/latest.{extension}",
+ dependencies=[Depends(require_camera_access)],
+ description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.",
+)
+async def latest_frame(
+ request: Request,
+ camera_name: str,
+ extension: Extension,
+ params: MediaLatestFrameQueryParams = Depends(),
+):
+ frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
+ draw_options = {
+ "bounding_boxes": params.bbox,
+ "timestamp": params.timestamp,
+ "zones": params.zones,
+ "mask": params.mask,
+ "motion_boxes": params.motion,
+ "paths": params.paths,
+ "regions": params.regions,
+ }
+ quality = params.quality
+
+ if extension == Extension.png:
+ quality_params = None
+ elif extension == Extension.webp:
+ quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality]
+ else: # jpg or jpeg
+ quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
+
+ if camera_name in request.app.frigate_config.cameras:
+ frame = frame_processor.get_current_frame(camera_name, draw_options)
+ retry_interval = float(
+ request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
+ or 10
+ )
+
+ is_offline = False
+ if frame is None or datetime.now().timestamp() > (
+ frame_processor.get_current_frame_time(camera_name) + retry_interval
+ ):
+ last_frame_time = frame_processor.get_current_frame_time(camera_name)
+ preview_path = get_most_recent_preview_frame(
+ camera_name, before=last_frame_time
+ )
+
+ if preview_path:
+ logger.debug(f"Using most recent preview frame for {camera_name}")
+ frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED)
+
+ if frame is not None:
+ is_offline = True
+
+ if frame is None or not is_offline:
+ logger.debug(
+ f"No live or preview frame available for {camera_name}. Using error image."
+ )
+ if request.app.camera_error_image is None:
+ error_image = glob.glob(
+ os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
+ )
+
+ if len(error_image) > 0:
+ request.app.camera_error_image = cv2.imread(
+ error_image[0], cv2.IMREAD_UNCHANGED
+ )
+
+ frame = request.app.camera_error_image
+
+ height = int(params.height or str(frame.shape[0]))
+ width = int(height * frame.shape[1] / frame.shape[0])
+
+ if frame is None:
+ return JSONResponse(
+ content={"success": False, "message": "Unable to get valid frame"},
+ status_code=500,
+ )
+
+ if height < 1 or width < 1:
+ return JSONResponse(
+ content="Invalid height / width requested :: {} / {}".format(
+ height, width
+ ),
+ status_code=400,
+ )
+
+ frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
+
+ _, img = cv2.imencode(f".{extension.value}", frame, quality_params)
+
+ headers = {
+ "Cache-Control": "no-store" if not params.store else "private, max-age=60",
+ }
+
+ if is_offline:
+ headers["X-Frigate-Offline"] = "true"
+
+ return Response(
+ content=img.tobytes(),
+ media_type=extension.get_mime_type(),
+ headers=headers,
+ )
+ elif (
+ camera_name == "birdseye"
+ and request.app.frigate_config.birdseye.enabled
+ and request.app.frigate_config.birdseye.restream
+ ):
+ frame = cv2.cvtColor(
+ frame_processor.get_current_frame(camera_name),
+ cv2.COLOR_YUV2BGR_I420,
+ )
+
+ height = int(params.height or str(frame.shape[0]))
+ width = int(height * frame.shape[1] / frame.shape[0])
+
+ frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
+
+ _, img = cv2.imencode(f".{extension.value}", frame, quality_params)
+ return Response(
+ content=img.tobytes(),
+ media_type=extension.get_mime_type(),
+ headers={
+ "Cache-Control": "no-store"
+ if not params.store
+ else "private, max-age=60",
+ },
+ )
+ else:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+
+
+@router.get(
+ "/{camera_name}/recordings/{frame_time}/snapshot.{format}",
+ dependencies=[Depends(require_camera_access)],
+)
+async def get_snapshot_from_recording(
+ request: Request,
+ camera_name: str,
+ frame_time: float,
+ format: str = Path(enum=["png", "jpg"]),
+ height: int = None,
+):
+ if camera_name not in request.app.frigate_config.cameras:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+ recording: Recordings | None = None
+
+ try:
+ recording = (
+ Recordings.select(
+ Recordings.path,
+ Recordings.start_time,
+ )
+ .where(
+ (
+ (frame_time >= Recordings.start_time)
+ & (frame_time <= Recordings.end_time)
+ )
+ )
+ .where(Recordings.camera == camera_name)
+ .order_by(Recordings.start_time.desc())
+ .limit(1)
+ .get()
+ )
+ except DoesNotExist:
+ # try again with a rounded frame time as it may be between
+ # the rounded segment start time
+ frame_time = math.ceil(frame_time)
+ try:
+ recording = (
+ Recordings.select(
+ Recordings.path,
+ Recordings.start_time,
+ )
+ .where(
+ (
+ (frame_time >= Recordings.start_time)
+ & (frame_time <= Recordings.end_time)
+ )
+ )
+ .where(Recordings.camera == camera_name)
+ .order_by(Recordings.start_time.desc())
+ .limit(1)
+ .get()
+ )
+ except DoesNotExist:
+ pass
+
+ if recording is not None:
+ time_in_segment = frame_time - recording.start_time
+ codec = "png" if format == "png" else "mjpeg"
+ mime_type = "png" if format == "png" else "jpeg"
+ config: FrigateConfig = request.app.frigate_config
+
+ image_data = get_image_from_recording(
+ config.ffmpeg, recording.path, time_in_segment, codec, height
+ )
+
+ if not image_data:
+ return JSONResponse(
+ content=(
+ {
+ "success": False,
+ "message": f"Unable to parse frame at time {frame_time}",
+ }
+ ),
+ status_code=404,
+ )
+ return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
+ else:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Recording not found at {}".format(frame_time),
+ },
+ status_code=404,
+ )
+
+
+@router.post(
+ "/{camera_name}/plus/{frame_time}", dependencies=[Depends(require_camera_access)]
+)
+async def submit_recording_snapshot_to_plus(
+ request: Request, camera_name: str, frame_time: str
+):
+ if camera_name not in request.app.frigate_config.cameras:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+
+ frame_time = float(frame_time)
+ recording_query = (
+ Recordings.select(
+ Recordings.path,
+ Recordings.start_time,
+ )
+ .where(
+ (
+ (frame_time >= Recordings.start_time)
+ & (frame_time <= Recordings.end_time)
+ )
+ )
+ .where(Recordings.camera == camera_name)
+ .order_by(Recordings.start_time.desc())
+ .limit(1)
+ )
+
+ try:
+ config: FrigateConfig = request.app.frigate_config
+ recording: Recordings = recording_query.get()
+ time_in_segment = frame_time - recording.start_time
+ image_data = get_image_from_recording(
+ config.ffmpeg, recording.path, time_in_segment, "png"
+ )
+
+ if not image_data:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": f"Unable to parse frame at time {frame_time}",
+ },
+ status_code=404,
+ )
+
+ nd = cv2.imdecode(np.frombuffer(image_data, dtype=np.int8), cv2.IMREAD_COLOR)
+ request.app.frigate_config.plus_api.upload_image(nd, camera_name)
+
+ return JSONResponse(
+ content={
+ "success": True,
+ "message": "Successfully submitted image.",
+ },
+ status_code=200,
+ )
+ except DoesNotExist:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Recording not found at {}".format(frame_time),
+ },
+ status_code=404,
+ )
+
+
+@router.get(
+ "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
+ dependencies=[Depends(require_camera_access)],
+ description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.",
+)
+async def recording_clip(
+ request: Request,
+ camera_name: str,
+ start_ts: float,
+ end_ts: float,
+ variant: str = Query("main", description="Recording variant to use for playback."),
+):
+ def run_download(ffmpeg_cmd: list[str], file_path: str):
+ with sp.Popen(
+ ffmpeg_cmd,
+ stderr=sp.PIPE,
+ stdout=sp.PIPE,
+ text=False,
+ ) as ffmpeg:
+ while True:
+ data = ffmpeg.stdout.read(8192)
+ if data is not None and len(data) > 0:
+ yield data
+ else:
+ if ffmpeg.returncode and ffmpeg.returncode != 0:
+ logger.error(
+ f"Failed to generate clip, ffmpeg logs: {ffmpeg.stderr.read()}"
+ )
+ else:
+ FilePath(file_path).unlink(missing_ok=True)
+ break
+
+ recordings = (
+ Recordings.select(
+ Recordings.path,
+ Recordings.start_time,
+ Recordings.end_time,
+ )
+ .where(
+ (Recordings.start_time.between(start_ts, end_ts))
+ | (Recordings.end_time.between(start_ts, end_ts))
+ | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
+ )
+ .where(Recordings.camera == camera_name)
+ .where(Recordings.variant == variant)
+ .order_by(Recordings.start_time.asc())
+ )
+
+ if recordings.count() == 0:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "No recordings found for the specified time range",
+ },
+ status_code=400,
+ )
+
+ file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt")
+ file_path = os.path.join(CACHE_DIR, file_name)
+ with open(file_path, "w") as file:
+ clip: Recordings
+ for clip in recordings:
+ file.write(f"file '{clip.path}'\n")
+
+ # if this is the starting clip, add an inpoint
+ if clip.start_time < start_ts:
+ file.write(f"inpoint {int(start_ts - clip.start_time)}\n")
+
+ # if this is the ending clip, add an outpoint
+ if clip.end_time > end_ts:
+ file.write(f"outpoint {int(end_ts - clip.start_time)}\n")
+
+ if len(file_name) > 1000:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Filename exceeded max length of 1000",
+ },
+ status_code=403,
+ )
+
+ config: FrigateConfig = request.app.frigate_config
+
+ ffmpeg_cmd = [
+ config.ffmpeg.ffmpeg_path,
+ "-hide_banner",
+ "-y",
+ "-protocol_whitelist",
+ "pipe,file",
+ "-f",
+ "concat",
+ "-safe",
+ "0",
+ "-i",
+ file_path,
+ "-c",
+ "copy",
+ "-movflags",
+ "frag_keyframe+empty_moov",
+ "-f",
+ "mp4",
+ "pipe:",
+ ]
+
+ return StreamingResponse(
+ run_download(ffmpeg_cmd, file_path),
+ media_type="video/mp4",
+ )
+
+
+@router.get(
+ "/vod/{camera_name}/start/{start_ts}/end/{end_ts}",
+ dependencies=[Depends(require_camera_access)],
+ description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
+)
+async def vod_ts(
+ camera_name: str,
+ start_ts: float,
+ end_ts: float,
+ force_discontinuity: bool = False,
+ variant: str = "main",
+):
+ logger.debug(
+ "VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s variant=%s",
+ camera_name,
+ start_ts,
+ end_ts,
+ force_discontinuity,
+ variant,
+ )
+ recordings = (
+ Recordings.select(
+ Recordings.path,
+ Recordings.duration,
+ Recordings.end_time,
+ Recordings.start_time,
+ )
+ .where(
+ Recordings.start_time.between(start_ts, end_ts)
+ | Recordings.end_time.between(start_ts, end_ts)
+ | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
+ )
+ .where(Recordings.camera == camera_name)
+ .where(Recordings.variant == variant)
+ .order_by(Recordings.start_time.asc())
+ .iterator()
+ )
+
+ clips = []
+ durations = []
+ min_duration_ms = 100 # Minimum 100ms to ensure at least one video frame
+ max_duration_ms = MAX_SEGMENT_DURATION * 1000
+
+ recording: Recordings
+ for recording in recordings:
+ logger.debug(
+ "VOD: processing recording: %s start=%s end=%s duration=%s",
+ recording.path,
+ recording.start_time,
+ recording.end_time,
+ recording.duration,
+ )
+
+ clip = {"type": "source", "path": recording.path}
+ duration = int(recording.duration * 1000)
+
+ # adjust start offset if start_ts is after recording.start_time
+ if start_ts > recording.start_time:
+ inpoint = int((start_ts - recording.start_time) * 1000)
+ clip["clipFrom"] = inpoint
+ duration -= inpoint
+ logger.debug(
+ "VOD: applied clipFrom %sms to %s",
+ inpoint,
+ recording.path,
+ )
+
+ # adjust end if recording.end_time is after end_ts
+ if recording.end_time > end_ts:
+ duration -= int((recording.end_time - end_ts) * 1000)
+
+ if duration < min_duration_ms:
+ # skip if the clip has no valid duration (too short to contain frames)
+ logger.debug(
+ "VOD: skipping recording %s - resulting duration %sms too short",
+ recording.path,
+ duration,
+ )
+ continue
+
+ if min_duration_ms <= duration < max_duration_ms:
+ clip["keyFrameDurations"] = [duration]
+ clips.append(clip)
+ durations.append(duration)
+ logger.debug(
+ "VOD: added clip %s duration_ms=%s clipFrom=%s",
+ recording.path,
+ duration,
+ clip.get("clipFrom"),
+ )
+ else:
+ logger.warning(f"Recording clip is missing or empty: {recording.path}")
+
+ if not clips:
+ logger.error(
+ f"No recordings found for {camera_name} during the requested time range"
+ )
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "No recordings found.",
+ },
+ status_code=404,
+ )
+
+ hour_ago = datetime.now() - timedelta(hours=1)
+ return JSONResponse(
+ content={
+ "cache": hour_ago.timestamp() > start_ts,
+ "discontinuity": force_discontinuity,
+ "consistentSequenceMediaInfo": True,
+ "durations": durations,
+ "segment_duration": max(durations),
+ "sequences": [{"clips": clips}],
+ }
+ )
+
+
+@router.get(
+ "/vod/{year_month}/{day}/{hour}/{camera_name}",
+ dependencies=[Depends(require_camera_access)],
+ description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
+)
+async def vod_hour_no_timezone(
+ year_month: str, day: int, hour: int, camera_name: str, variant: str = "main"
+):
+ """VOD for specific hour. Uses the default timezone (UTC)."""
+ return await vod_hour(
+ year_month,
+ day,
+ hour,
+ camera_name,
+ get_localzone_name().replace("/", ","),
+ variant,
+ )
+
+
+@router.get(
+ "/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
+ dependencies=[Depends(require_camera_access)],
+ description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
+)
+async def vod_hour(
+ year_month: str,
+ day: int,
+ hour: int,
+ camera_name: str,
+ tz_name: str,
+ variant: str = "main",
+):
+ parts = year_month.split("-")
+ start_date = (
+ datetime(int(parts[0]), int(parts[1]), day, hour, tzinfo=timezone.utc)
+ - datetime.now(pytz.timezone(tz_name.replace(",", "/"))).utcoffset()
+ )
+ end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
+ start_ts = start_date.timestamp()
+ end_ts = end_date.timestamp()
+
+ return await vod_ts(camera_name, start_ts, end_ts, variant=variant)
+
+
+@router.get(
+ "/vod/event/{event_id}",
+ dependencies=[Depends(allow_any_authenticated())],
+ description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
+)
+async def vod_event(
+ request: Request,
+ event_id: str,
+ padding: int = Query(0, description="Padding to apply to the vod."),
+ variant: str = Query("main", description="Recording variant to use for playback."),
+):
+ try:
+ event: Event = Event.get(Event.id == event_id)
+ except DoesNotExist:
+ logger.error(f"Event not found: {event_id}")
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Event not found.",
+ },
+ status_code=404,
+ )
+
+ await require_camera_access(event.camera, request=request)
+
+ end_ts = (
+ datetime.now().timestamp()
+ if event.end_time is None
+ else (event.end_time + padding)
+ )
+ vod_response = await vod_ts(
+ event.camera, event.start_time - padding, end_ts, variant=variant
+ )
+
+ # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
+ if (
+ event.start_time < datetime.now().timestamp() - 300
+ and type(vod_response) is tuple
+ and len(vod_response) == 2
+ and vod_response[1] == 404
+ ):
+ Event.update(has_clip=False).where(Event.id == event_id).execute()
+
+ return vod_response
+
+
+@router.get(
+ "/vod/clip/{camera_name}/start/{start_ts}/end/{end_ts}",
+ dependencies=[Depends(require_camera_access)],
+ description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.",
+)
+async def vod_clip(
+ camera_name: str,
+ start_ts: float,
+ end_ts: float,
+ variant: str = Query("main", description="Recording variant to use for playback."),
+):
+ return await vod_ts(
+ camera_name, start_ts, end_ts, force_discontinuity=True, variant=variant
+ )
+
+
+@router.get(
+ "/events/{event_id}/snapshot.jpg",
+ description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
+)
+async def event_snapshot(
+ request: Request,
+ event_id: str,
+ params: MediaEventsSnapshotQueryParams = Depends(),
+):
+ event_complete = False
+ jpg_bytes = None
+ frame_time = 0
+ try:
+ event = Event.get(Event.id == event_id, Event.end_time != None)
+ event_complete = True
+ await require_camera_access(event.camera, request=request)
+ if not event.has_snapshot:
+ return JSONResponse(
+ content={"success": False, "message": "Snapshot not available"},
+ status_code=404,
+ )
+ # read snapshot from disk
+ with open(
+ os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"), "rb"
+ ) as image_file:
+ jpg_bytes = image_file.read()
+ except DoesNotExist:
+ # see if the object is currently being tracked
+ try:
+ camera_states: list[CameraState] = (
+ request.app.detected_frames_processor.camera_states.values()
+ )
+ for camera_state in camera_states:
+ if event_id in camera_state.tracked_objects:
+ tracked_obj = camera_state.tracked_objects.get(event_id)
+ if tracked_obj is not None:
+ jpg_bytes, frame_time = tracked_obj.get_img_bytes(
+ ext="jpg",
+ timestamp=params.timestamp,
+ bounding_box=params.bbox,
+ crop=params.crop,
+ height=params.height,
+ quality=params.quality,
+ )
+ await require_camera_access(camera_state.name, request=request)
+ except Exception:
+ return JSONResponse(
+ content={"success": False, "message": "Ongoing event not found"},
+ status_code=404,
+ )
+ except Exception:
+ return JSONResponse(
+ content={"success": False, "message": "Unknown error occurred"},
+ status_code=404,
+ )
+
+ if jpg_bytes is None:
+ return JSONResponse(
+ content={"success": False, "message": "Live frame not available"},
+ status_code=404,
+ )
+
+ headers = {
+ "Content-Type": "image/jpeg",
+ "Cache-Control": "private, max-age=31536000" if event_complete else "no-store",
+ "X-Frame-Time": str(frame_time),
+ }
+
+ if params.download:
+ headers["Content-Disposition"] = f"attachment; filename=snapshot-{event_id}.jpg"
+
+ return Response(
+ jpg_bytes,
+ media_type="image/jpeg",
+ headers=headers,
+ )
+
+
+@router.get(
+ "/events/{event_id}/thumbnail.{extension}",
+ dependencies=[Depends(require_camera_access)],
+)
+async def event_thumbnail(
+ request: Request,
+ event_id: str,
+ extension: Extension,
+ max_cache_age: int = Query(
+ 2592000, description="Max cache age in seconds. Default 30 days in seconds."
+ ),
+ format: str = Query(default="ios", enum=["ios", "android"]),
+):
+ thumbnail_bytes = None
+ event_complete = False
+ try:
+ event: Event = Event.get(Event.id == event_id)
+ await require_camera_access(event.camera, request=request)
+ if event.end_time is not None:
+ event_complete = True
+
+ thumbnail_bytes = get_event_thumbnail_bytes(event)
+ except DoesNotExist:
+ thumbnail_bytes = None
+
+ if thumbnail_bytes is None:
+ # see if the object is currently being tracked
+ try:
+ camera_states = request.app.detected_frames_processor.camera_states.values()
+ for camera_state in camera_states:
+ if event_id in camera_state.tracked_objects:
+ tracked_obj = camera_state.tracked_objects.get(event_id)
+ if tracked_obj is not None:
+ thumbnail_bytes = tracked_obj.get_thumbnail(extension.value)
+ except Exception:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"},
+ status_code=404,
+ )
+
+ if thumbnail_bytes is None:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"},
+ status_code=404,
+ )
+
+ # android notifications prefer a 2:1 ratio
+ if format == "android":
+ img_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
+ img = cv2.imdecode(img_as_np, flags=1)
+ thumbnail = cv2.copyMakeBorder(
+ img,
+ 0,
+ 0,
+ int(img.shape[1] * 0.5),
+ int(img.shape[1] * 0.5),
+ cv2.BORDER_CONSTANT,
+ (0, 0, 0),
+ )
+
+ quality_params = None
+ if extension in (Extension.jpg, Extension.jpeg):
+ quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), 70]
+ elif extension == Extension.webp:
+ quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60]
+
+ _, img = cv2.imencode(f".{extension.value}", thumbnail, quality_params)
+ thumbnail_bytes = img.tobytes()
+
+ return Response(
+ thumbnail_bytes,
+ media_type=extension.get_mime_type(),
+ headers={
+ "Cache-Control": f"private, max-age={max_cache_age}"
+ if event_complete
+ else "no-store",
+ },
+ )
+
+
+@router.get("/{camera_name}/grid.jpg", dependencies=[Depends(require_camera_access)])
+def grid_snapshot(
+ request: Request, camera_name: str, color: str = "green", font_scale: float = 0.5
+):
+ if camera_name in request.app.frigate_config.cameras:
+ detect = request.app.frigate_config.cameras[camera_name].detect
+ frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
+ frame = frame_processor.get_current_frame(camera_name, {})
+ retry_interval = float(
+ request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
+ or 10
+ )
+
+ if frame is None or datetime.now().timestamp() > (
+ frame_processor.get_current_frame_time(camera_name) + retry_interval
+ ):
+ return JSONResponse(
+ content={"success": False, "message": "Unable to get valid frame"},
+ status_code=500,
+ )
+
+ try:
+ grid = (
+ Regions.select(Regions.grid)
+ .where(Regions.camera == camera_name)
+ .get()
+ .grid
+ )
+ except DoesNotExist:
+ return JSONResponse(
+ content={"success": False, "message": "Unable to get region grid"},
+ status_code=500,
+ )
+
+ color_arg = color.lower()
+
+ if color_arg == "red":
+ draw_color = (0, 0, 255)
+ elif color_arg == "blue":
+ draw_color = (255, 0, 0)
+ elif color_arg == "black":
+ draw_color = (0, 0, 0)
+ elif color_arg == "white":
+ draw_color = (255, 255, 255)
+ else:
+ draw_color = (0, 255, 0) # green
+
+ grid_size = len(grid)
+ grid_coef = 1.0 / grid_size
+ width = detect.width
+ height = detect.height
+ for x in range(grid_size):
+ for y in range(grid_size):
+ cell = grid[x][y]
+
+ if len(cell["sizes"]) == 0:
+ continue
+
+ std_dev = round(cell["std_dev"] * width, 2)
+ mean = round(cell["mean"] * width, 2)
+ cv2.rectangle(
+ frame,
+ (int(x * grid_coef * width), int(y * grid_coef * height)),
+ (
+ int((x + 1) * grid_coef * width),
+ int((y + 1) * grid_coef * height),
+ ),
+ draw_color,
+ 2,
+ )
+ cv2.putText(
+ frame,
+ f"#: {len(cell['sizes'])}",
+ (
+ int(x * grid_coef * width + 10),
+ int((y * grid_coef + 0.02) * height),
+ ),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale,
+ color=draw_color,
+ thickness=2,
+ )
+ cv2.putText(
+ frame,
+ f"std: {std_dev}",
+ (
+ int(x * grid_coef * width + 10),
+ int((y * grid_coef + 0.05) * height),
+ ),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale,
+ color=draw_color,
+ thickness=2,
+ )
+ cv2.putText(
+ frame,
+ f"avg: {mean}",
+ (
+ int(x * grid_coef * width + 10),
+ int((y * grid_coef + 0.08) * height),
+ ),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale,
+ color=draw_color,
+ thickness=2,
+ )
+
+ ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+
+ return Response(
+ jpg.tobytes(),
+ media_type="image/jpeg",
+ headers={"Cache-Control": "no-store"},
+ )
+ else:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+
+
+@router.delete(
+ "/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))]
+)
+def clear_region_grid(request: Request, camera_name: str):
+ """Clear the region grid for a camera."""
+ if camera_name not in request.app.frigate_config.cameras:
+ return JSONResponse(
+ content={"success": False, "message": "Camera not found"},
+ status_code=404,
+ )
+
+ Regions.delete().where(Regions.camera == camera_name).execute()
+ return JSONResponse(
+ content={"success": True, "message": "Region grid cleared"},
+ )
+
+
+@router.get(
+ "/events/{event_id}/snapshot-clean.webp",
+ dependencies=[Depends(require_camera_access)],
+)
+def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
+ webp_bytes = None
+ try:
+ event = Event.get(Event.id == event_id)
+ snapshot_config = request.app.frigate_config.cameras[event.camera].snapshots
+ if not (snapshot_config.enabled and event.has_snapshot):
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Snapshots and clean_copy must be enabled in the config",
+ },
+ status_code=404,
+ )
+ if event.end_time is None:
+ # see if the object is currently being tracked
+ try:
+ camera_states = (
+ request.app.detected_frames_processor.camera_states.values()
+ )
+ for camera_state in camera_states:
+ if event_id in camera_state.tracked_objects:
+ tracked_obj = camera_state.tracked_objects.get(event_id)
+ if tracked_obj is not None:
+ webp_bytes = tracked_obj.get_clean_webp()
+ break
+ except Exception:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"},
+ status_code=404,
+ )
+ elif not event.has_snapshot:
+ return JSONResponse(
+ content={"success": False, "message": "Snapshot not available"},
+ status_code=404,
+ )
+ except DoesNotExist:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"}, status_code=404
+ )
+ if webp_bytes is None:
+ try:
+ # webp
+ clean_snapshot_path_webp = os.path.join(
+ CLIPS_DIR, f"{event.camera}-{event.id}-clean.webp"
+ )
+ # png (legacy)
+ clean_snapshot_path_png = os.path.join(
+ CLIPS_DIR, f"{event.camera}-{event.id}-clean.png"
+ )
+
+ if os.path.exists(clean_snapshot_path_webp):
+ with open(clean_snapshot_path_webp, "rb") as image_file:
+ webp_bytes = image_file.read()
+ elif os.path.exists(clean_snapshot_path_png):
+ # convert png to webp and save for future use
+ png_image = cv2.imread(clean_snapshot_path_png, cv2.IMREAD_UNCHANGED)
+ if png_image is None:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Invalid png snapshot",
+ },
+ status_code=400,
+ )
+
+ ret, webp_data = cv2.imencode(
+ ".webp", png_image, [int(cv2.IMWRITE_WEBP_QUALITY), 60]
+ )
+ if not ret:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Unable to convert png to webp",
+ },
+ status_code=400,
+ )
+
+ webp_bytes = webp_data.tobytes()
+
+ # save the converted webp for future requests
+ try:
+ with open(clean_snapshot_path_webp, "wb") as f:
+ f.write(webp_bytes)
+ except Exception as e:
+ logger.warning(
+ f"Failed to save converted webp for event {event.id}: {e}"
+ )
+ # continue since we now have the data to return
+ else:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Clean snapshot not available",
+ },
+ status_code=404,
+ )
+ except Exception:
+ logger.error(f"Unable to load clean snapshot for event: {event.id}")
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Unable to load clean snapshot for event",
+ },
+ status_code=400,
+ )
+
+ headers = {
+ "Content-Type": "image/webp",
+ "Cache-Control": "private, max-age=31536000",
+ }
+
+ if download:
+ headers["Content-Disposition"] = (
+ f"attachment; filename=snapshot-{event_id}-clean.webp"
+ )
+
+ return Response(
+ webp_bytes,
+ media_type="image/webp",
+ headers=headers,
+ )
+
+
+@router.get(
+ "/events/{event_id}/clip.mp4", dependencies=[Depends(require_camera_access)]
+)
+async def event_clip(
+ request: Request,
+ event_id: str,
+ padding: int = Query(0, description="Padding to apply to clip."),
+):
+ try:
+ event: Event = Event.get(Event.id == event_id)
+ except DoesNotExist:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"}, status_code=404
+ )
+
+ if not event.has_clip:
+ return JSONResponse(
+ content={"success": False, "message": "Clip not available"}, status_code=404
+ )
+
+ end_ts = (
+ datetime.now().timestamp()
+ if event.end_time is None
+ else event.end_time + padding
+ )
+ return await recording_clip(
+ request, event.camera, event.start_time - padding, end_ts
+ )
+
+
+@router.get(
+ "/events/{event_id}/preview.gif", dependencies=[Depends(require_camera_access)]
+)
+def event_preview(request: Request, event_id: str):
+ try:
+ event: Event = Event.get(Event.id == event_id)
+ except DoesNotExist:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"}, status_code=404
+ )
+
+ start_ts = event.start_time
+ end_ts = start_ts + (
+ min(event.end_time - event.start_time, 20) if event.end_time else 20
+ )
+ return preview_gif(request, event.camera, start_ts, end_ts)
+
+
+@router.get(
+ "/{camera_name}/start/{start_ts}/end/{end_ts}/preview.gif",
+ dependencies=[Depends(require_camera_access)],
+)
+def preview_gif(
+ request: Request,
+ camera_name: str,
+ start_ts: float,
+ end_ts: float,
+ max_cache_age: int = Query(
+ 2592000, description="Max cache age in seconds. Default 30 days in seconds."
+ ),
+):
+ if datetime.fromtimestamp(start_ts) < datetime.now().replace(minute=0, second=0):
+ # has preview mp4
+ preview: Previews = (
+ Previews.select(
+ Previews.camera,
+ Previews.path,
+ Previews.duration,
+ Previews.start_time,
+ Previews.end_time,
+ )
+ .where(
+ Previews.start_time.between(start_ts, end_ts)
+ | Previews.end_time.between(start_ts, end_ts)
+ | ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
+ )
+ .where(Previews.camera == camera_name)
+ .limit(1)
+ .get()
+ )
+
+ if not preview:
+ return JSONResponse(
+ content={"success": False, "message": "Preview not found"},
+ status_code=404,
+ )
+
+ diff = start_ts - preview.start_time
+ minutes = int(diff / 60)
+ seconds = int(diff % 60)
+ config: FrigateConfig = request.app.frigate_config
+ ffmpeg_cmd = [
+ config.ffmpeg.ffmpeg_path,
+ "-hide_banner",
+ "-loglevel",
+ "warning",
+ "-ss",
+ f"00:{minutes}:{seconds}",
+ "-t",
+ f"{end_ts - start_ts}",
+ "-i",
+ preview.path,
+ "-r",
+ "8",
+ "-vf",
+ "setpts=0.12*PTS",
+ "-loop",
+ "0",
+ "-c:v",
+ "gif",
+ "-f",
+ "gif",
+ "-",
+ ]
+
+ process = sp.run(
+ ffmpeg_cmd,
+ capture_output=True,
+ )
+
+ if process.returncode != 0:
+ logger.error(process.stderr)
+ return JSONResponse(
+ content={"success": False, "message": "Unable to create preview gif"},
+ status_code=500,
+ )
+
+ gif_bytes = process.stdout
+ else:
+ # need to generate from existing images
+ preview_dir = os.path.join(CACHE_DIR, "preview_frames")
+
+ if not os.path.isdir(preview_dir):
+ return JSONResponse(
+ content={"success": False, "message": "Preview not found"},
+ status_code=404,
+ )
+
+ file_start = f"preview_{camera_name}"
+ start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
+ end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
+ selected_previews = []
+
+ for file in sorted(os.listdir(preview_dir)):
+ if not file.startswith(file_start):
+ continue
+
+ if file < start_file:
+ continue
+
+ if file > end_file:
+ break
+
+ selected_previews.append(f"file '{os.path.join(preview_dir, file)}'")
+ selected_previews.append("duration 0.12")
+
+ if not selected_previews:
+ return JSONResponse(
+ content={"success": False, "message": "Preview not found"},
+ status_code=404,
+ )
+
+ last_file = selected_previews[-2]
+ selected_previews.append(last_file)
+ config: FrigateConfig = request.app.frigate_config
+
+ ffmpeg_cmd = [
+ config.ffmpeg.ffmpeg_path,
+ "-hide_banner",
+ "-loglevel",
+ "warning",
+ "-f",
+ "concat",
+ "-y",
+ "-protocol_whitelist",
+ "pipe,file",
+ "-safe",
+ "0",
+ "-i",
+ "/dev/stdin",
+ "-loop",
+ "0",
+ "-c:v",
+ "gif",
+ "-f",
+ "gif",
+ "-",
+ ]
+
+ process = sp.run(
+ ffmpeg_cmd,
+ input=str.encode("\n".join(selected_previews)),
+ capture_output=True,
+ )
+
+ if process.returncode != 0:
+ logger.error(process.stderr)
+ return JSONResponse(
+ content={"success": False, "message": "Unable to create preview gif"},
+ status_code=500,
+ )
+
+ gif_bytes = process.stdout
+
+ return Response(
+ gif_bytes,
+ media_type="image/gif",
+ headers={
+ "Cache-Control": f"private, max-age={max_cache_age}",
+ "Content-Type": "image/gif",
+ },
+ )
+
+
+@router.get(
+ "/{camera_name}/start/{start_ts}/end/{end_ts}/preview.mp4",
+ dependencies=[Depends(require_camera_access)],
+)
+def preview_mp4(
+ request: Request,
+ camera_name: str,
+ start_ts: float,
+ end_ts: float,
+ max_cache_age: int = Query(
+ 604800, description="Max cache age in seconds. Default 7 days in seconds."
+ ),
+):
+ file_name = sanitize_filename(f"preview_{camera_name}_{start_ts}-{end_ts}.mp4")
+
+ if len(file_name) > 1000:
+ return JSONResponse(
+ content=(
+ {
+ "success": False,
+ "message": "Filename exceeded max length of 1000 characters.",
+ }
+ ),
+ status_code=403,
+ )
+
+ path = os.path.join(CACHE_DIR, file_name)
+
+ if datetime.fromtimestamp(start_ts) < datetime.now().replace(minute=0, second=0):
+ # has preview mp4
+ try:
+ preview: Previews = (
+ Previews.select(
+ Previews.camera,
+ Previews.path,
+ Previews.duration,
+ Previews.start_time,
+ Previews.end_time,
+ )
+ .where(
+ Previews.start_time.between(start_ts, end_ts)
+ | Previews.end_time.between(start_ts, end_ts)
+ | ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
+ )
+ .where(Previews.camera == camera_name)
+ .limit(1)
+ .get()
+ )
+ except DoesNotExist:
+ preview = None
+
+ if not preview:
+ return JSONResponse(
+ content={"success": False, "message": "Preview not found"},
+ status_code=404,
+ )
+
+ diff = start_ts - preview.start_time
+ minutes = int(diff / 60)
+ seconds = int(diff % 60)
+ config: FrigateConfig = request.app.frigate_config
+ ffmpeg_cmd = [
+ config.ffmpeg.ffmpeg_path,
+ "-hide_banner",
+ "-loglevel",
+ "warning",
+ "-y",
+ "-ss",
+ f"00:{minutes}:{seconds}",
+ "-t",
+ f"{end_ts - start_ts}",
+ "-i",
+ preview.path,
+ "-r",
+ "8",
+ "-vf",
+ "setpts=0.12*PTS",
+ "-c:v",
+ "libx264",
+ "-movflags",
+ "+faststart",
+ path,
+ ]
+
+ process = sp.run(
+ ffmpeg_cmd,
+ capture_output=True,
+ )
+
+ if process.returncode != 0:
+ logger.error(process.stderr)
+ return JSONResponse(
+ content={"success": False, "message": "Unable to create preview gif"},
+ status_code=500,
+ )
+
+ else:
+ # need to generate from existing images
+ preview_dir = os.path.join(CACHE_DIR, "preview_frames")
+
+ if not os.path.isdir(preview_dir):
+ return JSONResponse(
+ content={"success": False, "message": "Preview not found"},
+ status_code=404,
+ )
+
+ file_start = f"preview_{camera_name}"
+ start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
+ end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
+ selected_previews = []
+
+ for file in sorted(os.listdir(preview_dir)):
+ if not file.startswith(file_start):
+ continue
+
+ if file < start_file:
+ continue
+
+ if file > end_file:
+ break
+
+ selected_previews.append(f"file '{os.path.join(preview_dir, file)}'")
+ selected_previews.append("duration 0.12")
+
+ if not selected_previews:
+ return JSONResponse(
+ content={"success": False, "message": "Preview not found"},
+ status_code=404,
+ )
+
+ last_file = selected_previews[-2]
+ selected_previews.append(last_file)
+ config: FrigateConfig = request.app.frigate_config
+
+ ffmpeg_cmd = [
+ config.ffmpeg.ffmpeg_path,
+ "-hide_banner",
+ "-loglevel",
+ "warning",
+ "-f",
+ "concat",
+ "-y",
+ "-protocol_whitelist",
+ "pipe,file",
+ "-safe",
+ "0",
+ "-i",
+ "/dev/stdin",
+ "-c:v",
+ "libx264",
+ "-movflags",
+ "+faststart",
+ path,
+ ]
+
+ process = sp.run(
+ ffmpeg_cmd,
+ input=str.encode("\n".join(selected_previews)),
+ capture_output=True,
+ )
+
+ if process.returncode != 0:
+ logger.error(process.stderr)
+ return JSONResponse(
+ content={"success": False, "message": "Unable to create preview gif"},
+ status_code=500,
+ )
+
+ headers = {
+ "Content-Description": "File Transfer",
+ "Cache-Control": f"private, max-age={max_cache_age}",
+ "Content-Type": "video/mp4",
+ "Content-Length": str(os.path.getsize(path)),
+ # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
+ "X-Accel-Redirect": f"/cache/{file_name}",
+ }
+
+ return FileResponse(
+ path,
+ media_type="video/mp4",
+ filename=file_name,
+ headers=headers,
+ )
+
+
+@router.get("/review/{event_id}/preview", dependencies=[Depends(require_camera_access)])
+def review_preview(
+ request: Request,
+ event_id: str,
+ format: str = Query(default="gif", enum=["gif", "mp4"]),
+):
+ try:
+ review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == event_id)
+ except DoesNotExist:
+ return JSONResponse(
+ content=({"success": False, "message": "Review segment not found"}),
+ status_code=404,
+ )
+
+ padding = 8
+ start_ts = review.start_time - padding
+ end_ts = (
+ review.end_time + padding if review.end_time else datetime.now().timestamp()
+ )
+
+ if format == "gif":
+ return preview_gif(request, review.camera, start_ts, end_ts)
+ else:
+ return preview_mp4(request, review.camera, start_ts, end_ts)
+
+
+@router.get(
+ "/preview/{file_name}/thumbnail.jpg", dependencies=[Depends(require_camera_access)]
+)
+@router.get(
+ "/preview/{file_name}/thumbnail.webp", dependencies=[Depends(require_camera_access)]
+)
+def preview_thumbnail(file_name: str):
+ """Get a thumbnail from the cached preview frames."""
+ if len(file_name) > 1000:
+ return JSONResponse(
+ content=(
+ {"success": False, "message": "Filename exceeded max length of 1000"}
+ ),
+ status_code=403,
+ )
+
+ safe_file_name_current = sanitize_filename(file_name)
+ preview_dir = os.path.join(CACHE_DIR, "preview_frames")
+
+ try:
+ with open(
+ os.path.join(preview_dir, safe_file_name_current), "rb"
+ ) as image_file:
+ jpg_bytes = image_file.read()
+ except FileNotFoundError:
+ return JSONResponse(
+ content=({"success": False, "message": "Image file not found"}),
+ status_code=404,
+ )
+
+ return Response(
+ jpg_bytes,
+ media_type="image/webp",
+ headers={
+ "Content-Type": "image/webp",
+ "Cache-Control": "private, max-age=31536000",
+ },
+ )
+
+
+####################### dynamic routes ###########################
+
+
+@router.get(
+ "/{camera_name}/{label}/best.jpg", dependencies=[Depends(require_camera_access)]
+)
+@router.get(
+ "/{camera_name}/{label}/thumbnail.jpg",
+ dependencies=[Depends(require_camera_access)],
+)
+async def label_thumbnail(request: Request, camera_name: str, label: str):
+ label = unquote(label)
+ event_query = Event.select(fn.MAX(Event.id)).where(Event.camera == camera_name)
+ if label != "any":
+ event_query = event_query.where(Event.label == label)
+
+ try:
+ event_id = event_query.scalar()
+
+ return await event_thumbnail(request, event_id, Extension.jpg, 60)
+ except DoesNotExist:
+ frame = np.zeros((175, 175, 3), np.uint8)
+ ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+
+ return Response(
+ jpg.tobytes(),
+ media_type="image/jpeg",
+ headers={"Cache-Control": "no-store"},
+ )
+
+
+@router.get(
+ "/{camera_name}/{label}/clip.mp4", dependencies=[Depends(require_camera_access)]
+)
+async def label_clip(request: Request, camera_name: str, label: str):
+ label = unquote(label)
+ event_query = Event.select(fn.MAX(Event.id)).where(
+ Event.camera == camera_name, Event.has_clip == True
+ )
+ if label != "any":
+ event_query = event_query.where(Event.label == label)
+
+ try:
+ event = event_query.get()
+
+ return await event_clip(request, event.id, 0)
+ except DoesNotExist:
+ return JSONResponse(
+ content={"success": False, "message": "Event not found"}, status_code=404
+ )
+
+
+@router.get(
+ "/{camera_name}/{label}/snapshot.jpg", dependencies=[Depends(require_camera_access)]
+)
+async def label_snapshot(request: Request, camera_name: str, label: str):
+ """Returns the snapshot image from the latest event for the given camera and label combo"""
+ label = unquote(label)
+ if label == "any":
+ event_query = (
+ Event.select(Event.id)
+ .where(Event.camera == camera_name)
+ .where(Event.has_snapshot == True)
+ .order_by(Event.start_time.desc())
+ )
+ else:
+ event_query = (
+ Event.select(Event.id)
+ .where(Event.camera == camera_name)
+ .where(Event.label == label)
+ .where(Event.has_snapshot == True)
+ .order_by(Event.start_time.desc())
+ )
+
+ try:
+ event: Event = event_query.get()
+ return await event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
+ except DoesNotExist:
+ frame = np.zeros((720, 1280, 3), np.uint8)
+ _, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+
+ return Response(
+ jpg.tobytes(),
+ media_type="image/jpeg",
+ )
diff --git a/frigate/api/record.py b/frigate/api/record.py
index 4ab4b0af1..6ca2a5542 100644
--- a/frigate/api/record.py
+++ b/frigate/api/record.py
@@ -1,458 +1,468 @@
-"""Recording APIs."""
-
-import datetime as dt
-import logging
-from datetime import datetime, timedelta
-from functools import reduce
-from pathlib import Path
-from typing import List
-from urllib.parse import unquote
-
-from fastapi import APIRouter, Depends, Request
-from fastapi import Path as PathParam
-from fastapi.responses import JSONResponse
-from peewee import fn, operator
-
-from frigate.api.auth import (
- allow_any_authenticated,
- get_allowed_cameras_for_filter,
- require_camera_access,
- require_role,
-)
-from frigate.api.defs.query.recordings_query_parameters import (
- MediaRecordingsAvailabilityQueryParams,
- MediaRecordingsSummaryQueryParams,
- RecordingsDeleteQueryParams,
-)
-from frigate.api.defs.response.generic_response import GenericResponse
-from frigate.api.defs.tags import Tags
-from frigate.const import RECORD_DIR
-from frigate.models import Event, Recordings
-from frigate.util.time import get_dst_transitions
-
-logger = logging.getLogger(__name__)
-
-router = APIRouter(tags=[Tags.recordings])
-
-
-@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
-def get_recordings_storage_usage(request: Request):
- recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
- "storage"
- ][RECORD_DIR]
-
- if not recording_stats:
- return JSONResponse({})
-
- total_mb = recording_stats["total"]
-
- camera_usages: dict[str, dict] = (
- request.app.storage_maintainer.calculate_camera_usages()
- )
-
- for camera_name in camera_usages.keys():
- if camera_usages.get(camera_name, {}).get("usage"):
- camera_usages[camera_name]["usage_percent"] = (
- camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
- ) * 100
-
- return JSONResponse(content=camera_usages)
-
-
-@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
-def all_recordings_summary(
- request: Request,
- params: MediaRecordingsSummaryQueryParams = Depends(),
- allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
-):
- """Returns true/false by day indicating if recordings exist"""
-
- cameras = params.cameras
- if cameras != "all":
- requested = set(unquote(cameras).split(","))
- filtered = requested.intersection(allowed_cameras)
- if not filtered:
- return JSONResponse(content={})
- camera_list = list(filtered)
- else:
- camera_list = allowed_cameras
-
- time_range_query = (
- Recordings.select(
- fn.MIN(Recordings.start_time).alias("min_time"),
- fn.MAX(Recordings.start_time).alias("max_time"),
- )
- .where(Recordings.camera << camera_list)
- .dicts()
- .get()
- )
-
- min_time = time_range_query.get("min_time")
- max_time = time_range_query.get("max_time")
-
- if min_time is None or max_time is None:
- return JSONResponse(content={})
-
- dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
-
- days: dict[str, bool] = {}
-
- for period_start, period_end, period_offset in dst_periods:
- day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
-
- period_query = (
- Recordings.select(day_expr.alias("day_idx"))
- .where(
- (Recordings.camera << camera_list)
- & (Recordings.end_time >= period_start)
- & (Recordings.start_time <= period_end)
- )
- .distinct()
- .namedtuples()
- )
-
- for g in period_query:
- day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
- days[day_str] = True
-
- return JSONResponse(content=dict(sorted(days.items())))
-
-
-@router.get(
- "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
-)
-async def recordings_summary(camera_name: str, timezone: str = "utc"):
- """Returns hourly summary for recordings of given camera"""
-
- time_range_query = (
- Recordings.select(
- fn.MIN(Recordings.start_time).alias("min_time"),
- fn.MAX(Recordings.start_time).alias("max_time"),
- )
- .where(Recordings.camera == camera_name)
- .dicts()
- .get()
- )
-
- min_time = time_range_query.get("min_time")
- max_time = time_range_query.get("max_time")
-
- days: dict[str, dict] = {}
-
- if min_time is None or max_time is None:
- return JSONResponse(content=list(days.values()))
-
- dst_periods = get_dst_transitions(timezone, min_time, max_time)
-
- for period_start, period_end, period_offset in dst_periods:
- hours_offset = int(period_offset / 60 / 60)
- minutes_offset = int(period_offset / 60 - hours_offset * 60)
- period_hour_modifier = f"{hours_offset} hour"
- period_minute_modifier = f"{minutes_offset} minute"
-
- recording_groups = (
- Recordings.select(
- fn.strftime(
- "%Y-%m-%d %H",
- fn.datetime(
- Recordings.start_time,
- "unixepoch",
- period_hour_modifier,
- period_minute_modifier,
- ),
- ).alias("hour"),
- fn.SUM(Recordings.duration).alias("duration"),
- fn.SUM(Recordings.motion).alias("motion"),
- fn.SUM(Recordings.objects).alias("objects"),
- )
- .where(
- (Recordings.camera == camera_name)
- & (Recordings.end_time >= period_start)
- & (Recordings.start_time <= period_end)
- )
- .group_by((Recordings.start_time + period_offset).cast("int") / 3600)
- .order_by(Recordings.start_time.desc())
- .namedtuples()
- )
-
- event_groups = (
- Event.select(
- fn.strftime(
- "%Y-%m-%d %H",
- fn.datetime(
- Event.start_time,
- "unixepoch",
- period_hour_modifier,
- period_minute_modifier,
- ),
- ).alias("hour"),
- fn.COUNT(Event.id).alias("count"),
- )
- .where(Event.camera == camera_name, Event.has_clip)
- .where(
- (Event.start_time >= period_start) & (Event.start_time <= period_end)
- )
- .group_by((Event.start_time + period_offset).cast("int") / 3600)
- .namedtuples()
- )
-
- event_map = {g.hour: g.count for g in event_groups}
-
- for recording_group in recording_groups:
- parts = recording_group.hour.split()
- hour = parts[1]
- day = parts[0]
- events_count = event_map.get(recording_group.hour, 0)
- hour_data = {
- "hour": hour,
- "events": events_count,
- "motion": recording_group.motion,
- "objects": recording_group.objects,
- "duration": round(recording_group.duration),
- }
- if day in days:
- # merge counts if already present (edge-case at DST boundary)
- days[day]["events"] += events_count or 0
- days[day]["hours"].append(hour_data)
- else:
- days[day] = {
- "events": events_count or 0,
- "hours": [hour_data],
- "day": day,
- }
-
- return JSONResponse(content=list(days.values()))
-
-
-@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
-async def recordings(
- camera_name: str,
- after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
- before: float = datetime.now().timestamp(),
-):
- """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
- recordings = (
- Recordings.select(
- Recordings.id,
- Recordings.start_time,
- Recordings.end_time,
- Recordings.segment_size,
- Recordings.motion,
- Recordings.objects,
- Recordings.motion_heatmap,
- Recordings.duration,
- )
- .where(
- Recordings.camera == camera_name,
- Recordings.end_time >= after,
- Recordings.start_time <= before,
- )
- .order_by(Recordings.start_time)
- .dicts()
- .iterator()
- )
-
- return JSONResponse(content=list(recordings))
-
-
-@router.get(
- "/recordings/unavailable",
- response_model=list[dict],
- dependencies=[Depends(allow_any_authenticated())],
-)
-async def no_recordings(
- request: Request,
- params: MediaRecordingsAvailabilityQueryParams = Depends(),
- allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
-):
- """Get time ranges with no recordings."""
- cameras = params.cameras
- if cameras != "all":
- requested = set(unquote(cameras).split(","))
- filtered = requested.intersection(allowed_cameras)
- if not filtered:
- return JSONResponse(content=[])
- cameras = ",".join(filtered)
- else:
- cameras = allowed_cameras
-
- before = params.before or datetime.datetime.now().timestamp()
- after = (
- params.after
- or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
- )
- scale = params.scale
-
- clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
- if cameras != "all":
- camera_list = cameras.split(",")
- clauses.append((Recordings.camera << camera_list))
- else:
- camera_list = allowed_cameras
-
- # Get recording start times
- data: list[Recordings] = (
- Recordings.select(Recordings.start_time, Recordings.end_time)
- .where(reduce(operator.and_, clauses))
- .order_by(Recordings.start_time.asc())
- .dicts()
- .iterator()
- )
-
- # Convert recordings to list of (start, end) tuples
- recordings = [(r["start_time"], r["end_time"]) for r in data]
-
- # Iterate through time segments and check if each has any recording
- no_recording_segments = []
- current = after
- current_gap_start = None
-
- while current < before:
- segment_end = min(current + scale, before)
-
- # Check if this segment overlaps with any recording
- has_recording = any(
- rec_start < segment_end and rec_end > current
- for rec_start, rec_end in recordings
- )
-
- if not has_recording:
- # This segment has no recordings
- if current_gap_start is None:
- current_gap_start = current # Start a new gap
- else:
- # This segment has recordings
- if current_gap_start is not None:
- # End the current gap and append it
- no_recording_segments.append(
- {"start_time": int(current_gap_start), "end_time": int(current)}
- )
- current_gap_start = None
-
- current = segment_end
-
- # Append the last gap if it exists
- if current_gap_start is not None:
- no_recording_segments.append(
- {"start_time": int(current_gap_start), "end_time": int(before)}
- )
-
- return JSONResponse(content=no_recording_segments)
-
-
-@router.delete(
- "/recordings/start/{start}/end/{end}",
- response_model=GenericResponse,
- dependencies=[Depends(require_role(["admin"]))],
- summary="Delete recordings",
- description="""Deletes recordings within the specified time range.
- Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
- """,
-)
-async def delete_recordings(
- start: float = PathParam(..., description="Start timestamp (unix)"),
- end: float = PathParam(..., description="End timestamp (unix)"),
- params: RecordingsDeleteQueryParams = Depends(),
- allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
-):
- """Delete recordings in the specified time range."""
- if start >= end:
- return JSONResponse(
- content={
- "success": False,
- "message": "Start time must be less than end time.",
- },
- status_code=400,
- )
-
- cameras = params.cameras
-
- if cameras != "all":
- requested = set(cameras.split(","))
- filtered = requested.intersection(allowed_cameras)
-
- if not filtered:
- return JSONResponse(
- content={
- "success": False,
- "message": "No valid cameras found in the request.",
- },
- status_code=400,
- )
-
- camera_list = list(filtered)
- else:
- camera_list = allowed_cameras
-
- # Parse keep parameter
- keep_set = set()
-
- if params.keep:
- keep_set = set(params.keep.split(","))
-
- # Build query to find overlapping recordings
- clauses = [
- (
- Recordings.start_time.between(start, end)
- | Recordings.end_time.between(start, end)
- | ((start > Recordings.start_time) & (end < Recordings.end_time))
- ),
- (Recordings.camera << camera_list),
- ]
-
- keep_clauses = []
-
- if "motion" in keep_set:
- keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
-
- if "object" in keep_set:
- keep_clauses.append(
- Recordings.objects.is_null(False) & (Recordings.objects > 0)
- )
-
- if "audio" in keep_set:
- keep_clauses.append(Recordings.dBFS.is_null(False))
-
- if keep_clauses:
- keep_condition = reduce(operator.or_, keep_clauses)
- clauses.append(~keep_condition)
-
- recordings_to_delete = (
- Recordings.select(Recordings.id, Recordings.path)
- .where(reduce(operator.and_, clauses))
- .dicts()
- .iterator()
- )
-
- recording_ids = []
- deleted_count = 0
- error_count = 0
-
- for recording in recordings_to_delete:
- recording_ids.append(recording["id"])
-
- try:
- Path(recording["path"]).unlink(missing_ok=True)
- deleted_count += 1
- except Exception as e:
- logger.error(f"Failed to delete recording file {recording['path']}: {e}")
- error_count += 1
-
- if recording_ids:
- max_deletes = 100000
- recording_ids_list = list(recording_ids)
-
- for i in range(0, len(recording_ids_list), max_deletes):
- Recordings.delete().where(
- Recordings.id << recording_ids_list[i : i + max_deletes]
- ).execute()
-
- message = f"Successfully deleted {deleted_count} recording(s)."
-
- if error_count > 0:
- message += f" {error_count} file deletion error(s) occurred."
-
- return JSONResponse(
- content={"success": True, "message": message},
- status_code=200,
- )
+"""Recording APIs."""
+
+import datetime as dt
+import logging
+from datetime import datetime, timedelta
+from functools import reduce
+from pathlib import Path
+from typing import List
+from urllib.parse import unquote
+
+from fastapi import APIRouter, Depends, Request
+from fastapi import Path as PathParam
+from fastapi.responses import JSONResponse
+from peewee import fn, operator
+
+from frigate.api.auth import (
+ allow_any_authenticated,
+ get_allowed_cameras_for_filter,
+ require_camera_access,
+ require_role,
+)
+from frigate.api.defs.query.recordings_query_parameters import (
+ MediaRecordingsAvailabilityQueryParams,
+ MediaRecordingsSummaryQueryParams,
+ RecordingsDeleteQueryParams,
+)
+from frigate.api.defs.response.generic_response import GenericResponse
+from frigate.api.defs.tags import Tags
+from frigate.const import RECORD_DIR
+from frigate.models import Event, Recordings
+from frigate.util.time import get_dst_transitions
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(tags=[Tags.recordings])
+
+
+@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
+def get_recordings_storage_usage(request: Request):
+ recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
+ "storage"
+ ][RECORD_DIR]
+
+ if not recording_stats:
+ return JSONResponse({})
+
+ total_mb = recording_stats["total"]
+
+ camera_usages: dict[str, dict] = (
+ request.app.storage_maintainer.calculate_camera_usages()
+ )
+
+ for camera_name in camera_usages.keys():
+ if camera_usages.get(camera_name, {}).get("usage"):
+ camera_usages[camera_name]["usage_percent"] = (
+ camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
+ ) * 100
+
+ return JSONResponse(content=camera_usages)
+
+
+@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
+def all_recordings_summary(
+ request: Request,
+ params: MediaRecordingsSummaryQueryParams = Depends(),
+ allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
+):
+ """Returns true/false by day indicating if recordings exist"""
+
+ cameras = params.cameras
+ if cameras != "all":
+ requested = set(unquote(cameras).split(","))
+ filtered = requested.intersection(allowed_cameras)
+ if not filtered:
+ return JSONResponse(content={})
+ camera_list = list(filtered)
+ else:
+ camera_list = allowed_cameras
+
+ time_range_query = (
+ Recordings.select(
+ fn.MIN(Recordings.start_time).alias("min_time"),
+ fn.MAX(Recordings.start_time).alias("max_time"),
+ )
+ .where(Recordings.camera << camera_list)
+ .dicts()
+ .get()
+ )
+
+ min_time = time_range_query.get("min_time")
+ max_time = time_range_query.get("max_time")
+
+ if min_time is None or max_time is None:
+ return JSONResponse(content={})
+
+ dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
+
+ days: dict[str, bool] = {}
+
+ for period_start, period_end, period_offset in dst_periods:
+ day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
+
+ period_query = (
+ Recordings.select(day_expr.alias("day_idx"))
+ .where(
+ (Recordings.camera << camera_list)
+ & (Recordings.end_time >= period_start)
+ & (Recordings.start_time <= period_end)
+ )
+ .distinct()
+ .namedtuples()
+ )
+
+ for g in period_query:
+ day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
+ days[day_str] = True
+
+ return JSONResponse(content=dict(sorted(days.items())))
+
+
+@router.get(
+ "/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
+)
+async def recordings_summary(camera_name: str, timezone: str = "utc"):
+ """Returns hourly summary for recordings of given camera"""
+
+ time_range_query = (
+ Recordings.select(
+ fn.MIN(Recordings.start_time).alias("min_time"),
+ fn.MAX(Recordings.start_time).alias("max_time"),
+ )
+ .where(Recordings.camera == camera_name)
+ .dicts()
+ .get()
+ )
+
+ min_time = time_range_query.get("min_time")
+ max_time = time_range_query.get("max_time")
+
+ days: dict[str, dict] = {}
+
+ if min_time is None or max_time is None:
+ return JSONResponse(content=list(days.values()))
+
+ dst_periods = get_dst_transitions(timezone, min_time, max_time)
+
+ for period_start, period_end, period_offset in dst_periods:
+ hours_offset = int(period_offset / 60 / 60)
+ minutes_offset = int(period_offset / 60 - hours_offset * 60)
+ period_hour_modifier = f"{hours_offset} hour"
+ period_minute_modifier = f"{minutes_offset} minute"
+
+ recording_groups = (
+ Recordings.select(
+ fn.strftime(
+ "%Y-%m-%d %H",
+ fn.datetime(
+ Recordings.start_time,
+ "unixepoch",
+ period_hour_modifier,
+ period_minute_modifier,
+ ),
+ ).alias("hour"),
+ fn.SUM(Recordings.duration).alias("duration"),
+ fn.SUM(Recordings.motion).alias("motion"),
+ fn.SUM(Recordings.objects).alias("objects"),
+ )
+ .where(
+ (Recordings.camera == camera_name)
+ & (Recordings.end_time >= period_start)
+ & (Recordings.start_time <= period_end)
+ )
+ .group_by((Recordings.start_time + period_offset).cast("int") / 3600)
+ .order_by(Recordings.start_time.desc())
+ .namedtuples()
+ )
+
+ event_groups = (
+ Event.select(
+ fn.strftime(
+ "%Y-%m-%d %H",
+ fn.datetime(
+ Event.start_time,
+ "unixepoch",
+ period_hour_modifier,
+ period_minute_modifier,
+ ),
+ ).alias("hour"),
+ fn.COUNT(Event.id).alias("count"),
+ )
+ .where(Event.camera == camera_name, Event.has_clip)
+ .where(
+ (Event.start_time >= period_start) & (Event.start_time <= period_end)
+ )
+ .group_by((Event.start_time + period_offset).cast("int") / 3600)
+ .namedtuples()
+ )
+
+ event_map = {g.hour: g.count for g in event_groups}
+
+ for recording_group in recording_groups:
+ parts = recording_group.hour.split()
+ hour = parts[1]
+ day = parts[0]
+ events_count = event_map.get(recording_group.hour, 0)
+ hour_data = {
+ "hour": hour,
+ "events": events_count,
+ "motion": recording_group.motion,
+ "objects": recording_group.objects,
+ "duration": round(recording_group.duration),
+ }
+ if day in days:
+ # merge counts if already present (edge-case at DST boundary)
+ days[day]["events"] += events_count or 0
+ days[day]["hours"].append(hour_data)
+ else:
+ days[day] = {
+ "events": events_count or 0,
+ "hours": [hour_data],
+ "day": day,
+ }
+
+ return JSONResponse(content=list(days.values()))
+
+
+@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
+async def recordings(
+ camera_name: str,
+ after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
+ before: float = datetime.now().timestamp(),
+ variant: str = "main",
+):
+ """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
+ query = (
+ Recordings.select(
+ Recordings.id,
+ Recordings.camera,
+ Recordings.start_time,
+ Recordings.end_time,
+ Recordings.path,
+ Recordings.variant,
+ Recordings.segment_size,
+ Recordings.motion,
+ Recordings.objects,
+ Recordings.motion_heatmap,
+ Recordings.duration,
+ Recordings.codec_name,
+ Recordings.width,
+ Recordings.height,
+ Recordings.bitrate,
+ )
+ .where(
+ Recordings.camera == camera_name,
+ Recordings.end_time >= after,
+ Recordings.start_time <= before,
+ )
+ )
+
+ if variant != "all":
+ query = query.where(Recordings.variant == variant)
+
+ recordings = query.order_by(Recordings.start_time).dicts().iterator()
+
+ return JSONResponse(content=list(recordings))
+
+
+@router.get(
+ "/recordings/unavailable",
+ response_model=list[dict],
+ dependencies=[Depends(allow_any_authenticated())],
+)
+async def no_recordings(
+ request: Request,
+ params: MediaRecordingsAvailabilityQueryParams = Depends(),
+ allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
+):
+ """Get time ranges with no recordings."""
+ cameras = params.cameras
+ if cameras != "all":
+ requested = set(unquote(cameras).split(","))
+ filtered = requested.intersection(allowed_cameras)
+ if not filtered:
+ return JSONResponse(content=[])
+ cameras = ",".join(filtered)
+ else:
+ cameras = allowed_cameras
+
+ before = params.before or datetime.datetime.now().timestamp()
+ after = (
+ params.after
+ or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
+ )
+ scale = params.scale
+
+ clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
+ if cameras != "all":
+ camera_list = cameras.split(",")
+ clauses.append((Recordings.camera << camera_list))
+ else:
+ camera_list = allowed_cameras
+
+ # Get recording start times
+ data: list[Recordings] = (
+ Recordings.select(Recordings.start_time, Recordings.end_time)
+ .where(reduce(operator.and_, clauses))
+ .order_by(Recordings.start_time.asc())
+ .dicts()
+ .iterator()
+ )
+
+ # Convert recordings to list of (start, end) tuples
+ recordings = [(r["start_time"], r["end_time"]) for r in data]
+
+ # Iterate through time segments and check if each has any recording
+ no_recording_segments = []
+ current = after
+ current_gap_start = None
+
+ while current < before:
+ segment_end = min(current + scale, before)
+
+ # Check if this segment overlaps with any recording
+ has_recording = any(
+ rec_start < segment_end and rec_end > current
+ for rec_start, rec_end in recordings
+ )
+
+ if not has_recording:
+ # This segment has no recordings
+ if current_gap_start is None:
+ current_gap_start = current # Start a new gap
+ else:
+ # This segment has recordings
+ if current_gap_start is not None:
+ # End the current gap and append it
+ no_recording_segments.append(
+ {"start_time": int(current_gap_start), "end_time": int(current)}
+ )
+ current_gap_start = None
+
+ current = segment_end
+
+ # Append the last gap if it exists
+ if current_gap_start is not None:
+ no_recording_segments.append(
+ {"start_time": int(current_gap_start), "end_time": int(before)}
+ )
+
+ return JSONResponse(content=no_recording_segments)
+
+
+@router.delete(
+ "/recordings/start/{start}/end/{end}",
+ response_model=GenericResponse,
+ dependencies=[Depends(require_role(["admin"]))],
+ summary="Delete recordings",
+ description="""Deletes recordings within the specified time range.
+ Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
+ """,
+)
+async def delete_recordings(
+ start: float = PathParam(..., description="Start timestamp (unix)"),
+ end: float = PathParam(..., description="End timestamp (unix)"),
+ params: RecordingsDeleteQueryParams = Depends(),
+ allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
+):
+ """Delete recordings in the specified time range."""
+ if start >= end:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "Start time must be less than end time.",
+ },
+ status_code=400,
+ )
+
+ cameras = params.cameras
+
+ if cameras != "all":
+ requested = set(cameras.split(","))
+ filtered = requested.intersection(allowed_cameras)
+
+ if not filtered:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": "No valid cameras found in the request.",
+ },
+ status_code=400,
+ )
+
+ camera_list = list(filtered)
+ else:
+ camera_list = allowed_cameras
+
+ # Parse keep parameter
+ keep_set = set()
+
+ if params.keep:
+ keep_set = set(params.keep.split(","))
+
+ # Build query to find overlapping recordings
+ clauses = [
+ (
+ Recordings.start_time.between(start, end)
+ | Recordings.end_time.between(start, end)
+ | ((start > Recordings.start_time) & (end < Recordings.end_time))
+ ),
+ (Recordings.camera << camera_list),
+ ]
+
+ keep_clauses = []
+
+ if "motion" in keep_set:
+ keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
+
+ if "object" in keep_set:
+ keep_clauses.append(
+ Recordings.objects.is_null(False) & (Recordings.objects > 0)
+ )
+
+ if "audio" in keep_set:
+ keep_clauses.append(Recordings.dBFS.is_null(False))
+
+ if keep_clauses:
+ keep_condition = reduce(operator.or_, keep_clauses)
+ clauses.append(~keep_condition)
+
+ recordings_to_delete = (
+ Recordings.select(Recordings.id, Recordings.path)
+ .where(reduce(operator.and_, clauses))
+ .dicts()
+ .iterator()
+ )
+
+ recording_ids = []
+ deleted_count = 0
+ error_count = 0
+
+ for recording in recordings_to_delete:
+ recording_ids.append(recording["id"])
+
+ try:
+ Path(recording["path"]).unlink(missing_ok=True)
+ deleted_count += 1
+ except Exception as e:
+ logger.error(f"Failed to delete recording file {recording['path']}: {e}")
+ error_count += 1
+
+ if recording_ids:
+ max_deletes = 100000
+ recording_ids_list = list(recording_ids)
+
+ for i in range(0, len(recording_ids_list), max_deletes):
+ Recordings.delete().where(
+ Recordings.id << recording_ids_list[i : i + max_deletes]
+ ).execute()
+
+ message = f"Successfully deleted {deleted_count} recording(s)."
+
+ if error_count > 0:
+ message += f" {error_count} file deletion error(s) occurred."
+
+ return JSONResponse(
+ content={"success": True, "message": message},
+ status_code=200,
+ )
diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py
index 9960abdce..05b307213 100644
--- a/frigate/config/camera/camera.py
+++ b/frigate/config/camera/camera.py
@@ -1,337 +1,346 @@
-import os
-from enum import Enum
-from typing import Optional
-
-from pydantic import Field, PrivateAttr, model_validator
-
-from frigate.const import CACHE_DIR, CACHE_SEGMENT_FORMAT, REGEX_CAMERA_NAME
-from frigate.ffmpeg_presets import (
- parse_preset_hardware_acceleration_decode,
- parse_preset_hardware_acceleration_scale,
- parse_preset_input,
- parse_preset_output_record,
-)
-from frigate.util.builtin import (
- escape_special_characters,
- generate_color_palette,
- get_ffmpeg_arg_list,
-)
-
-from ..base import FrigateBaseModel
-from ..classification import (
- CameraAudioTranscriptionConfig,
- CameraFaceRecognitionConfig,
- CameraLicensePlateRecognitionConfig,
- CameraSemanticSearchConfig,
-)
-from .audio import AudioConfig
-from .birdseye import BirdseyeCameraConfig
-from .detect import DetectConfig
-from .ffmpeg import CameraFfmpegConfig, CameraInput
-from .live import CameraLiveConfig
-from .motion import MotionConfig
-from .mqtt import CameraMqttConfig
-from .notification import NotificationConfig
-from .objects import ObjectConfig
-from .onvif import OnvifConfig
-from .record import RecordConfig
-from .review import ReviewConfig
-from .snapshots import SnapshotsConfig
-from .timestamp import TimestampStyleConfig
-from .ui import CameraUiConfig
-from .zone import ZoneConfig
-
-__all__ = ["CameraConfig"]
-
-
-class CameraTypeEnum(str, Enum):
- generic = "generic"
- lpr = "lpr"
-
-
-class CameraConfig(FrigateBaseModel):
- name: Optional[str] = Field(
- None,
- title="Camera name",
- description="Camera name is required",
- pattern=REGEX_CAMERA_NAME,
- )
-
- friendly_name: Optional[str] = Field(
- None,
- title="Friendly name",
- description="Camera friendly name used in the Frigate UI",
- )
-
- @model_validator(mode="before")
- @classmethod
- def handle_friendly_name(cls, values):
- if isinstance(values, dict) and "friendly_name" in values:
- pass
- return values
-
- enabled: bool = Field(default=True, title="Enabled", description="Enabled")
-
- # Options with global fallback
- audio: AudioConfig = Field(
- default_factory=AudioConfig,
- title="Audio events",
- description="Settings for audio-based event detection for this camera.",
- )
- audio_transcription: CameraAudioTranscriptionConfig = Field(
- default_factory=CameraAudioTranscriptionConfig,
- title="Audio transcription",
- description="Settings for live and speech audio transcription used for events and live captions.",
- )
- birdseye: BirdseyeCameraConfig = Field(
- default_factory=BirdseyeCameraConfig,
- title="Birdseye",
- description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
- )
- detect: DetectConfig = Field(
- default_factory=DetectConfig,
- title="Object Detection",
- description="Settings for the detection/detect role used to run object detection and initialize trackers.",
- )
- face_recognition: CameraFaceRecognitionConfig = Field(
- default_factory=CameraFaceRecognitionConfig,
- title="Face recognition",
- description="Settings for face detection and recognition for this camera.",
- )
- ffmpeg: CameraFfmpegConfig = Field(
- title="FFmpeg",
- description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
- )
- live: CameraLiveConfig = Field(
- default_factory=CameraLiveConfig,
- title="Live playback",
- description="Settings used by the Web UI to control live stream selection, resolution and quality.",
- )
- lpr: CameraLicensePlateRecognitionConfig = Field(
- default_factory=CameraLicensePlateRecognitionConfig,
- title="License Plate Recognition",
- description="License plate recognition settings including detection thresholds, formatting, and known plates.",
- )
- motion: MotionConfig = Field(
- None,
- title="Motion detection",
- description="Default motion detection settings for this camera.",
- )
- objects: ObjectConfig = Field(
- default_factory=ObjectConfig,
- title="Objects",
- description="Object tracking defaults including which labels to track and per-object filters.",
- )
- record: RecordConfig = Field(
- default_factory=RecordConfig,
- title="Recording",
- description="Recording and retention settings for this camera.",
- )
- review: ReviewConfig = Field(
- default_factory=ReviewConfig,
- title="Review",
- description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
- )
- semantic_search: CameraSemanticSearchConfig = Field(
- default_factory=CameraSemanticSearchConfig,
- title="Semantic Search",
- description="Settings for semantic search which builds and queries object embeddings to find similar items.",
- )
- snapshots: SnapshotsConfig = Field(
- default_factory=SnapshotsConfig,
- title="Snapshots",
- description="Settings for saved JPEG snapshots of tracked objects for this camera.",
- )
- timestamp_style: TimestampStyleConfig = Field(
- default_factory=TimestampStyleConfig,
- title="Timestamp style",
- description="Styling options for in-feed timestamps applied to recordings and snapshots.",
- )
-
- # Options without global fallback
- best_image_timeout: int = Field(
- default=60,
- title="Best image timeout",
- description="How long to wait for the image with the highest confidence score.",
- )
- mqtt: CameraMqttConfig = Field(
- default_factory=CameraMqttConfig,
- title="MQTT",
- description="MQTT image publishing settings.",
- )
- notifications: NotificationConfig = Field(
- default_factory=NotificationConfig,
- title="Notifications",
- description="Settings to enable and control notifications for this camera.",
- )
- onvif: OnvifConfig = Field(
- default_factory=OnvifConfig,
- title="ONVIF",
- description="ONVIF connection and PTZ autotracking settings for this camera.",
- )
- type: CameraTypeEnum = Field(
- default=CameraTypeEnum.generic,
- title="Camera type",
- description="Camera Type",
- )
- ui: CameraUiConfig = Field(
- default_factory=CameraUiConfig,
- title="Camera UI",
- description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
- )
- webui_url: Optional[str] = Field(
- None,
- title="Camera URL",
- description="URL to visit the camera directly from system page",
- )
- zones: dict[str, ZoneConfig] = Field(
- default_factory=dict,
- title="Zones",
- description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
- )
- enabled_in_config: Optional[bool] = Field(
- default=None,
- title="Original camera state",
- description="Keep track of original state of camera.",
- )
-
- _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
-
- def __init__(self, **config):
- # Set zone colors
- if "zones" in config:
- colors = generate_color_palette(len(config["zones"]))
-
- config["zones"] = {
- name: {**z, "color": color}
- for (name, z), color in zip(config["zones"].items(), colors)
- }
-
- # add roles to the input if there is only one
- if len(config["ffmpeg"]["inputs"]) == 1:
- has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
-
- config["ffmpeg"]["inputs"][0]["roles"] = [
- "record",
- "detect",
- ]
-
- if has_audio:
- config["ffmpeg"]["inputs"][0]["roles"].append("audio")
-
- super().__init__(**config)
-
- @property
- def frame_shape(self) -> tuple[int, int]:
- return self.detect.height, self.detect.width
-
- @property
- def frame_shape_yuv(self) -> tuple[int, int]:
- return self.detect.height * 3 // 2, self.detect.width
-
- @property
- def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
- return self._ffmpeg_cmds
-
- def get_formatted_name(self) -> str:
- """Return the friendly name if set, otherwise return a formatted version of the camera name."""
- if self.friendly_name:
- return self.friendly_name
- return self.name.replace("_", " ").title() if self.name else ""
-
- def create_ffmpeg_cmds(self):
- if "_ffmpeg_cmds" in self:
- return
- self._build_ffmpeg_cmds()
-
- def recreate_ffmpeg_cmds(self):
- """Force regeneration of ffmpeg commands from current config."""
- self._build_ffmpeg_cmds()
-
- def _build_ffmpeg_cmds(self):
- """Build ffmpeg commands from the current ffmpeg config."""
- ffmpeg_cmds = []
- for ffmpeg_input in self.ffmpeg.inputs:
- ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
- if ffmpeg_cmd is None:
- continue
-
- ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
- self._ffmpeg_cmds = ffmpeg_cmds
-
- def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
- ffmpeg_output_args = []
- if "detect" in ffmpeg_input.roles:
- detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
- scale_detect_args = parse_preset_hardware_acceleration_scale(
- ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
- detect_args,
- self.detect.fps,
- self.detect.width,
- self.detect.height,
- )
-
- ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
-
- if "record" in ffmpeg_input.roles and self.record.enabled:
- record_args = get_ffmpeg_arg_list(
- parse_preset_output_record(
- self.ffmpeg.output_args.record,
- self.ffmpeg.apple_compatibility,
- )
- or self.ffmpeg.output_args.record
- )
-
- ffmpeg_output_args = (
- record_args
- + [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"]
- + ffmpeg_output_args
- )
-
- # if there aren't any outputs enabled for this input
- if len(ffmpeg_output_args) == 0:
- return None
-
- global_args = get_ffmpeg_arg_list(
- ffmpeg_input.global_args or self.ffmpeg.global_args
- )
-
- camera_arg = (
- self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
- )
- hwaccel_args = get_ffmpeg_arg_list(
- parse_preset_hardware_acceleration_decode(
- ffmpeg_input.hwaccel_args,
- self.detect.fps,
- self.detect.width,
- self.detect.height,
- self.ffmpeg.gpu,
- )
- or ffmpeg_input.hwaccel_args
- or parse_preset_hardware_acceleration_decode(
- camera_arg,
- self.detect.fps,
- self.detect.width,
- self.detect.height,
- self.ffmpeg.gpu,
- )
- or camera_arg
- or []
- )
- input_args = get_ffmpeg_arg_list(
- parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
- or ffmpeg_input.input_args
- or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
- or self.ffmpeg.input_args
- )
-
- cmd = (
- [self.ffmpeg.ffmpeg_path]
- + global_args
- + (hwaccel_args if "detect" in ffmpeg_input.roles else [])
- + input_args
- + ["-i", escape_special_characters(ffmpeg_input.path)]
- + ffmpeg_output_args
- )
-
- return [part for part in cmd if part != ""]
+import os
+from enum import Enum
+from typing import Optional
+
+from pydantic import Field, PrivateAttr, model_validator
+
+from frigate.const import CACHE_DIR, CACHE_SEGMENT_FORMAT, REGEX_CAMERA_NAME
+from frigate.ffmpeg_presets import (
+ parse_preset_hardware_acceleration_decode,
+ parse_preset_hardware_acceleration_scale,
+ parse_preset_input,
+ parse_preset_output_record,
+)
+from frigate.util.builtin import (
+ escape_special_characters,
+ generate_color_palette,
+ get_ffmpeg_arg_list,
+)
+
+from ..base import FrigateBaseModel
+from ..classification import (
+ CameraAudioTranscriptionConfig,
+ CameraFaceRecognitionConfig,
+ CameraLicensePlateRecognitionConfig,
+ CameraSemanticSearchConfig,
+)
+from .audio import AudioConfig
+from .birdseye import BirdseyeCameraConfig
+from .detect import DetectConfig
+from .ffmpeg import CameraFfmpegConfig, CameraInput
+from .live import CameraLiveConfig
+from .motion import MotionConfig
+from .mqtt import CameraMqttConfig
+from .notification import NotificationConfig
+from .objects import ObjectConfig
+from .onvif import OnvifConfig
+from .record import RecordConfig
+from .review import ReviewConfig
+from .snapshots import SnapshotsConfig
+from .timestamp import TimestampStyleConfig
+from .ui import CameraUiConfig
+from .zone import ZoneConfig
+
+__all__ = ["CameraConfig"]
+
+
+class CameraTypeEnum(str, Enum):
+ generic = "generic"
+ lpr = "lpr"
+
+
+class CameraConfig(FrigateBaseModel):
+ name: Optional[str] = Field(
+ None,
+ title="Camera name",
+ description="Camera name is required",
+ pattern=REGEX_CAMERA_NAME,
+ )
+
+ friendly_name: Optional[str] = Field(
+ None,
+ title="Friendly name",
+ description="Camera friendly name used in the Frigate UI",
+ )
+
+ @model_validator(mode="before")
+ @classmethod
+ def handle_friendly_name(cls, values):
+ if isinstance(values, dict) and "friendly_name" in values:
+ pass
+ return values
+
+ enabled: bool = Field(default=True, title="Enabled", description="Enabled")
+
+ # Options with global fallback
+ audio: AudioConfig = Field(
+ default_factory=AudioConfig,
+ title="Audio events",
+ description="Settings for audio-based event detection for this camera.",
+ )
+ audio_transcription: CameraAudioTranscriptionConfig = Field(
+ default_factory=CameraAudioTranscriptionConfig,
+ title="Audio transcription",
+ description="Settings for live and speech audio transcription used for events and live captions.",
+ )
+ birdseye: BirdseyeCameraConfig = Field(
+ default_factory=BirdseyeCameraConfig,
+ title="Birdseye",
+ description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
+ )
+ detect: DetectConfig = Field(
+ default_factory=DetectConfig,
+ title="Object Detection",
+ description="Settings for the detection/detect role used to run object detection and initialize trackers.",
+ )
+ face_recognition: CameraFaceRecognitionConfig = Field(
+ default_factory=CameraFaceRecognitionConfig,
+ title="Face recognition",
+ description="Settings for face detection and recognition for this camera.",
+ )
+ ffmpeg: CameraFfmpegConfig = Field(
+ title="FFmpeg",
+ description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
+ )
+ live: CameraLiveConfig = Field(
+ default_factory=CameraLiveConfig,
+ title="Live playback",
+ description="Settings used by the Web UI to control live stream selection, resolution and quality.",
+ )
+ lpr: CameraLicensePlateRecognitionConfig = Field(
+ default_factory=CameraLicensePlateRecognitionConfig,
+ title="License Plate Recognition",
+ description="License plate recognition settings including detection thresholds, formatting, and known plates.",
+ )
+ motion: MotionConfig = Field(
+ None,
+ title="Motion detection",
+ description="Default motion detection settings for this camera.",
+ )
+ objects: ObjectConfig = Field(
+ default_factory=ObjectConfig,
+ title="Objects",
+ description="Object tracking defaults including which labels to track and per-object filters.",
+ )
+ record: RecordConfig = Field(
+ default_factory=RecordConfig,
+ title="Recording",
+ description="Recording and retention settings for this camera.",
+ )
+ review: ReviewConfig = Field(
+ default_factory=ReviewConfig,
+ title="Review",
+ description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
+ )
+ semantic_search: CameraSemanticSearchConfig = Field(
+ default_factory=CameraSemanticSearchConfig,
+ title="Semantic Search",
+ description="Settings for semantic search which builds and queries object embeddings to find similar items.",
+ )
+ snapshots: SnapshotsConfig = Field(
+ default_factory=SnapshotsConfig,
+ title="Snapshots",
+ description="Settings for saved JPEG snapshots of tracked objects for this camera.",
+ )
+ timestamp_style: TimestampStyleConfig = Field(
+ default_factory=TimestampStyleConfig,
+ title="Timestamp style",
+ description="Styling options for in-feed timestamps applied to recordings and snapshots.",
+ )
+
+ # Options without global fallback
+ best_image_timeout: int = Field(
+ default=60,
+ title="Best image timeout",
+ description="How long to wait for the image with the highest confidence score.",
+ )
+ mqtt: CameraMqttConfig = Field(
+ default_factory=CameraMqttConfig,
+ title="MQTT",
+ description="MQTT image publishing settings.",
+ )
+ notifications: NotificationConfig = Field(
+ default_factory=NotificationConfig,
+ title="Notifications",
+ description="Settings to enable and control notifications for this camera.",
+ )
+ onvif: OnvifConfig = Field(
+ default_factory=OnvifConfig,
+ title="ONVIF",
+ description="ONVIF connection and PTZ autotracking settings for this camera.",
+ )
+ type: CameraTypeEnum = Field(
+ default=CameraTypeEnum.generic,
+ title="Camera type",
+ description="Camera Type",
+ )
+ ui: CameraUiConfig = Field(
+ default_factory=CameraUiConfig,
+ title="Camera UI",
+ description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
+ )
+ webui_url: Optional[str] = Field(
+ None,
+ title="Camera URL",
+ description="URL to visit the camera directly from system page",
+ )
+ zones: dict[str, ZoneConfig] = Field(
+ default_factory=dict,
+ title="Zones",
+ description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
+ )
+ enabled_in_config: Optional[bool] = Field(
+ default=None,
+ title="Original camera state",
+ description="Keep track of original state of camera.",
+ )
+
+ _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
+
+ def __init__(self, **config):
+ # Set zone colors
+ if "zones" in config:
+ colors = generate_color_palette(len(config["zones"]))
+
+ config["zones"] = {
+ name: {**z, "color": color}
+ for (name, z), color in zip(config["zones"].items(), colors)
+ }
+
+ # add roles to the input if there is only one
+ if len(config["ffmpeg"]["inputs"]) == 1:
+ has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
+
+ config["ffmpeg"]["inputs"][0]["roles"] = [
+ "record",
+ "detect",
+ ]
+
+ if has_audio:
+ config["ffmpeg"]["inputs"][0]["roles"].append("audio")
+
+ super().__init__(**config)
+
+ @property
+ def frame_shape(self) -> tuple[int, int]:
+ return self.detect.height, self.detect.width
+
+ @property
+ def frame_shape_yuv(self) -> tuple[int, int]:
+ return self.detect.height * 3 // 2, self.detect.width
+
+ @property
+ def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
+ return self._ffmpeg_cmds
+
+ def get_formatted_name(self) -> str:
+ """Return the friendly name if set, otherwise return a formatted version of the camera name."""
+ if self.friendly_name:
+ return self.friendly_name
+ return self.name.replace("_", " ").title() if self.name else ""
+
+ def create_ffmpeg_cmds(self):
+ if "_ffmpeg_cmds" in self:
+ return
+ self._build_ffmpeg_cmds()
+
+ def recreate_ffmpeg_cmds(self):
+ """Force regeneration of ffmpeg commands from current config."""
+ self._build_ffmpeg_cmds()
+
+ def _build_ffmpeg_cmds(self):
+ """Build ffmpeg commands from the current ffmpeg config."""
+ ffmpeg_cmds = []
+ for ffmpeg_input in self.ffmpeg.inputs:
+ ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
+ if ffmpeg_cmd is None:
+ continue
+
+ ffmpeg_cmds.append(
+ {
+ "roles": ffmpeg_input.roles,
+ "cmd": ffmpeg_cmd,
+ "record_variant": ffmpeg_input.record_variant,
+ }
+ )
+ self._ffmpeg_cmds = ffmpeg_cmds
+
+ def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
+ ffmpeg_output_args = []
+ if "detect" in ffmpeg_input.roles:
+ detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
+ scale_detect_args = parse_preset_hardware_acceleration_scale(
+ ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
+ detect_args,
+ self.detect.fps,
+ self.detect.width,
+ self.detect.height,
+ )
+
+ ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
+
+ if "record" in ffmpeg_input.roles and self.record.enabled:
+ record_args = get_ffmpeg_arg_list(
+ parse_preset_output_record(
+ self.ffmpeg.output_args.record,
+ self.ffmpeg.apple_compatibility,
+ )
+ or self.ffmpeg.output_args.record
+ )
+ record_variant = ffmpeg_input.record_variant or "main"
+ cache_prefix = os.path.join(CACHE_DIR, self.name)
+ cache_path = f"{cache_prefix}@{record_variant}@{CACHE_SEGMENT_FORMAT}.mp4"
+
+ ffmpeg_output_args = (
+ record_args
+ + [cache_path]
+ + ffmpeg_output_args
+ )
+
+ # if there aren't any outputs enabled for this input
+ if len(ffmpeg_output_args) == 0:
+ return None
+
+ global_args = get_ffmpeg_arg_list(
+ ffmpeg_input.global_args or self.ffmpeg.global_args
+ )
+
+ camera_arg = (
+ self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
+ )
+ hwaccel_args = get_ffmpeg_arg_list(
+ parse_preset_hardware_acceleration_decode(
+ ffmpeg_input.hwaccel_args,
+ self.detect.fps,
+ self.detect.width,
+ self.detect.height,
+ self.ffmpeg.gpu,
+ )
+ or ffmpeg_input.hwaccel_args
+ or parse_preset_hardware_acceleration_decode(
+ camera_arg,
+ self.detect.fps,
+ self.detect.width,
+ self.detect.height,
+ self.ffmpeg.gpu,
+ )
+ or camera_arg
+ or []
+ )
+ input_args = get_ffmpeg_arg_list(
+ parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
+ or ffmpeg_input.input_args
+ or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
+ or self.ffmpeg.input_args
+ )
+
+ cmd = (
+ [self.ffmpeg.ffmpeg_path]
+ + global_args
+ + (hwaccel_args if "detect" in ffmpeg_input.roles else [])
+ + input_args
+ + ["-i", escape_special_characters(ffmpeg_input.path)]
+ + ffmpeg_output_args
+ )
+
+ return [part for part in cmd if part != ""]
diff --git a/frigate/config/camera/ffmpeg.py b/frigate/config/camera/ffmpeg.py
index 05769dc66..80787d951 100644
--- a/frigate/config/camera/ffmpeg.py
+++ b/frigate/config/camera/ffmpeg.py
@@ -1,159 +1,192 @@
-from enum import Enum
-from typing import Union
-
-from pydantic import Field, field_validator
-
-from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
-
-from ..base import FrigateBaseModel
-from ..env import EnvString
-
-__all__ = [
- "CameraFfmpegConfig",
- "CameraInput",
- "CameraRoleEnum",
- "FfmpegConfig",
- "FfmpegOutputArgsConfig",
-]
-
-# Note: Setting threads to less than 2 caused several issues with recording segments
-# https://github.com/blakeblackshear/frigate/issues/5659
-FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
-FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
-
-RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
-DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
- "-threads",
- "2",
- "-f",
- "rawvideo",
- "-pix_fmt",
- "yuv420p",
-]
-
-
-class FfmpegOutputArgsConfig(FrigateBaseModel):
- detect: Union[str, list[str]] = Field(
- default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
- title="Detect output arguments",
- description="Default output arguments for detect role streams.",
- )
- record: Union[str, list[str]] = Field(
- default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
- title="Record output arguments",
- description="Default output arguments for record role streams.",
- )
-
-
-class FfmpegConfig(FrigateBaseModel):
- path: str = Field(
- default="default",
- title="FFmpeg path",
- description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
- )
- global_args: Union[str, list[str]] = Field(
- default=FFMPEG_GLOBAL_ARGS_DEFAULT,
- title="FFmpeg global arguments",
- description="Global arguments passed to FFmpeg processes.",
- )
- hwaccel_args: Union[str, list[str]] = Field(
- default="auto",
- title="Hardware acceleration arguments",
- description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
- )
- input_args: Union[str, list[str]] = Field(
- default=FFMPEG_INPUT_ARGS_DEFAULT,
- title="Input arguments",
- description="Input arguments applied to FFmpeg input streams.",
- )
- output_args: FfmpegOutputArgsConfig = Field(
- default_factory=FfmpegOutputArgsConfig,
- title="Output arguments",
- description="Default output arguments used for different FFmpeg roles such as detect and record.",
- )
- retry_interval: float = Field(
- default=10.0,
- title="FFmpeg retry time",
- description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
- gt=0.0,
- )
- apple_compatibility: bool = Field(
- default=False,
- title="Apple compatibility",
- description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
- )
- gpu: int = Field(
- default=0,
- title="GPU index",
- description="Default GPU index used for hardware acceleration if available.",
- )
-
- @property
- def ffmpeg_path(self) -> str:
- if self.path == "default":
- return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
- elif self.path in INCLUDED_FFMPEG_VERSIONS:
- return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
- else:
- return f"{self.path}/bin/ffmpeg"
-
- @property
- def ffprobe_path(self) -> str:
- if self.path == "default":
- return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
- elif self.path in INCLUDED_FFMPEG_VERSIONS:
- return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
- else:
- return f"{self.path}/bin/ffprobe"
-
-
-class CameraRoleEnum(str, Enum):
- audio = "audio"
- record = "record"
- detect = "detect"
-
-
-class CameraInput(FrigateBaseModel):
- path: EnvString = Field(
- title="Input path",
- description="Camera input stream URL or path.",
- )
- roles: list[CameraRoleEnum] = Field(
- title="Input roles",
- description="Roles for this input stream.",
- )
- global_args: Union[str, list[str]] = Field(
- default_factory=list,
- title="FFmpeg global arguments",
- description="FFmpeg global arguments for this input stream.",
- )
- hwaccel_args: Union[str, list[str]] = Field(
- default_factory=list,
- title="Hardware acceleration arguments",
- description="Hardware acceleration arguments for this input stream.",
- )
- input_args: Union[str, list[str]] = Field(
- default_factory=list,
- title="Input arguments",
- description="Input arguments specific to this stream.",
- )
-
-
-class CameraFfmpegConfig(FfmpegConfig):
- inputs: list[CameraInput] = Field(
- title="Camera inputs",
- description="List of input stream definitions (paths and roles) for this camera.",
- )
-
- @field_validator("inputs")
- @classmethod
- def validate_roles(cls, v):
- roles = [role for input in v for role in input.roles]
-
- if len(roles) != len(set(roles)):
- raise ValueError("Each input role may only be used once.")
-
- if "detect" not in roles:
- raise ValueError("The detect role is required.")
-
- return v
+from enum import Enum
+from typing import Union
+
+from pydantic import Field, field_validator, model_validator
+
+from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, REGEX_CAMERA_NAME
+
+from ..base import FrigateBaseModel
+from ..env import EnvString
+
+__all__ = [
+ "CameraFfmpegConfig",
+ "CameraInput",
+ "CameraRoleEnum",
+ "FfmpegConfig",
+ "FfmpegOutputArgsConfig",
+]
+
+# Note: Setting threads to less than 2 caused several issues with recording segments
+# https://github.com/blakeblackshear/frigate/issues/5659
+FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
+FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
+
+RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
+DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
+ "-threads",
+ "2",
+ "-f",
+ "rawvideo",
+ "-pix_fmt",
+ "yuv420p",
+]
+
+
+class FfmpegOutputArgsConfig(FrigateBaseModel):
+ detect: Union[str, list[str]] = Field(
+ default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
+ title="Detect output arguments",
+ description="Default output arguments for detect role streams.",
+ )
+ record: Union[str, list[str]] = Field(
+ default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
+ title="Record output arguments",
+ description="Default output arguments for record role streams.",
+ )
+
+
+class FfmpegConfig(FrigateBaseModel):
+ path: str = Field(
+ default="default",
+ title="FFmpeg path",
+ description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
+ )
+ global_args: Union[str, list[str]] = Field(
+ default=FFMPEG_GLOBAL_ARGS_DEFAULT,
+ title="FFmpeg global arguments",
+ description="Global arguments passed to FFmpeg processes.",
+ )
+ hwaccel_args: Union[str, list[str]] = Field(
+ default="auto",
+ title="Hardware acceleration arguments",
+ description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
+ )
+ input_args: Union[str, list[str]] = Field(
+ default=FFMPEG_INPUT_ARGS_DEFAULT,
+ title="Input arguments",
+ description="Input arguments applied to FFmpeg input streams.",
+ )
+ output_args: FfmpegOutputArgsConfig = Field(
+ default_factory=FfmpegOutputArgsConfig,
+ title="Output arguments",
+ description="Default output arguments used for different FFmpeg roles such as detect and record.",
+ )
+ retry_interval: float = Field(
+ default=10.0,
+ title="FFmpeg retry time",
+ description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
+ gt=0.0,
+ )
+ apple_compatibility: bool = Field(
+ default=False,
+ title="Apple compatibility",
+ description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
+ )
+ gpu: int = Field(
+ default=0,
+ title="GPU index",
+ description="Default GPU index used for hardware acceleration if available.",
+ )
+
+ @property
+ def ffmpeg_path(self) -> str:
+ if self.path == "default":
+ return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
+ elif self.path in INCLUDED_FFMPEG_VERSIONS:
+ return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
+ else:
+ return f"{self.path}/bin/ffmpeg"
+
+ @property
+ def ffprobe_path(self) -> str:
+ if self.path == "default":
+ return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
+ elif self.path in INCLUDED_FFMPEG_VERSIONS:
+ return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
+ else:
+ return f"{self.path}/bin/ffprobe"
+
+
+class CameraRoleEnum(str, Enum):
+ audio = "audio"
+ record = "record"
+ detect = "detect"
+
+
+class CameraInput(FrigateBaseModel):
+ path: EnvString = Field(
+ title="Input path",
+ description="Camera input stream URL or path.",
+ )
+ roles: list[CameraRoleEnum] = Field(
+ title="Input roles",
+ description="Roles for this input stream.",
+ )
+ global_args: Union[str, list[str]] = Field(
+ default_factory=list,
+ title="FFmpeg global arguments",
+ description="FFmpeg global arguments for this input stream.",
+ )
+ hwaccel_args: Union[str, list[str]] = Field(
+ default_factory=list,
+ title="Hardware acceleration arguments",
+ description="Hardware acceleration arguments for this input stream.",
+ )
+ input_args: Union[str, list[str]] = Field(
+ default_factory=list,
+ title="Input arguments",
+ description="Input arguments specific to this stream.",
+ )
+ record_variant: str | None = Field(
+ default=None,
+ title="Recording variant",
+ description="Optional recording variant label for record role inputs such as main or sub.",
+ pattern=REGEX_CAMERA_NAME,
+ )
+
+ @model_validator(mode="after")
+ def validate_record_variant(self):
+ if CameraRoleEnum.record in self.roles:
+ if not self.record_variant:
+ self.record_variant = "main"
+ else:
+ self.record_variant = None
+
+ return self
+
+
+class CameraFfmpegConfig(FfmpegConfig):
+ inputs: list[CameraInput] = Field(
+ title="Camera inputs",
+ description="List of input stream definitions (paths and roles) for this camera.",
+ )
+
+ @field_validator("inputs")
+ @classmethod
+ def validate_roles(cls, v):
+ detect_inputs = 0
+ audio_inputs = 0
+ record_variants: set[str] = set()
+
+ for camera_input in v:
+ if CameraRoleEnum.detect in camera_input.roles:
+ detect_inputs += 1
+
+ if CameraRoleEnum.audio in camera_input.roles:
+ audio_inputs += 1
+
+ if CameraRoleEnum.record in camera_input.roles:
+ record_variant = camera_input.record_variant or "main"
+ if record_variant in record_variants:
+ raise ValueError(
+ f"Record variant '{record_variant}' may only be used once."
+ )
+ record_variants.add(record_variant)
+
+ if detect_inputs != 1:
+ raise ValueError("The detect role is required.")
+
+ if audio_inputs > 1:
+ raise ValueError("Each input role may only be used once.")
+
+ return v
diff --git a/frigate/config/config.py b/frigate/config/config.py
index 339d675dc..138555a43 100644
--- a/frigate/config/config.py
+++ b/frigate/config/config.py
@@ -1,1043 +1,1049 @@
-from __future__ import annotations
-
-import json
-import logging
-import os
-from typing import Any, Dict, Optional
-
-import numpy as np
-from pydantic import (
- BaseModel,
- ConfigDict,
- Field,
- TypeAdapter,
- ValidationInfo,
- field_serializer,
- field_validator,
- model_validator,
-)
-from ruamel.yaml import YAML
-from typing_extensions import Self
-
-from frigate.const import REGEX_JSON
-from frigate.detectors import DetectorConfig, ModelConfig
-from frigate.detectors.detector_config import BaseDetectorConfig
-from frigate.plus import PlusApi
-from frigate.util.builtin import (
- deep_merge,
- get_ffmpeg_arg_list,
-)
-from frigate.util.config import (
- CURRENT_CONFIG_VERSION,
- StreamInfoRetriever,
- convert_area_to_pixels,
- find_config_file,
- get_relative_coordinates,
- migrate_frigate_config,
-)
-from frigate.util.image import create_mask
-from frigate.util.services import auto_detect_hwaccel
-
-from .auth import AuthConfig
-from .base import FrigateBaseModel
-from .camera import CameraConfig, CameraLiveConfig
-from .camera.audio import AudioConfig
-from .camera.birdseye import BirdseyeConfig
-from .camera.detect import DetectConfig
-from .camera.ffmpeg import FfmpegConfig
-from .camera.genai import GenAIConfig, GenAIRoleEnum
-from .camera.mask import ObjectMaskConfig
-from .camera.motion import MotionConfig
-from .camera.notification import NotificationConfig
-from .camera.objects import FilterConfig, ObjectConfig
-from .camera.record import RecordConfig
-from .camera.review import ReviewConfig
-from .camera.snapshots import SnapshotsConfig
-from .camera.timestamp import TimestampStyleConfig
-from .camera_group import CameraGroupConfig
-from .classification import (
- AudioTranscriptionConfig,
- ClassificationConfig,
- FaceRecognitionConfig,
- LicensePlateRecognitionConfig,
- SemanticSearchConfig,
- SemanticSearchModelEnum,
-)
-from .database import DatabaseConfig
-from .env import EnvVars
-from .logger import LoggerConfig
-from .mqtt import MqttConfig
-from .network import NetworkingConfig
-from .proxy import ProxyConfig
-from .telemetry import TelemetryConfig
-from .tls import TlsConfig
-from .ui import UIConfig
-
-__all__ = ["FrigateConfig"]
-
-logger = logging.getLogger(__name__)
-
-yaml = YAML()
-
-DEFAULT_CONFIG = f"""
-mqtt:
- enabled: False
-
-cameras: {{}} # No cameras defined, UI wizard should be used
-version: {CURRENT_CONFIG_VERSION}
-"""
-
-DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
-DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720}
-
-# stream info handler
-stream_info_retriever = StreamInfoRetriever()
-
-
-class RuntimeMotionConfig(MotionConfig):
- """Runtime version of MotionConfig with rasterized masks."""
-
- # The rasterized numpy mask (combination of all enabled masks)
- rasterized_mask: np.ndarray = None
-
- def __init__(self, **config):
- frame_shape = config.get("frame_shape", (1, 1))
-
- # Store original mask dict for serialization
- original_mask = config.get("mask", {})
- if isinstance(original_mask, dict):
- # Process the new dict format - update raw_coordinates for each mask
- processed_mask = {}
- for mask_id, mask_config in original_mask.items():
- if isinstance(mask_config, dict):
- coords = mask_config.get("coordinates", "")
- relative_coords = get_relative_coordinates(coords, frame_shape)
- mask_config_copy = mask_config.copy()
- mask_config_copy["raw_coordinates"] = (
- relative_coords if relative_coords else coords
- )
- mask_config_copy["coordinates"] = (
- relative_coords if relative_coords else coords
- )
- processed_mask[mask_id] = mask_config_copy
- else:
- processed_mask[mask_id] = mask_config
- config["mask"] = processed_mask
- config["raw_mask"] = processed_mask
-
- super().__init__(**config)
-
- # Rasterize only enabled masks
- enabled_coords = []
- for mask_config in self.mask.values():
- if mask_config.enabled and mask_config.coordinates:
- coords = mask_config.coordinates
- if isinstance(coords, list):
- enabled_coords.extend(coords)
- else:
- enabled_coords.append(coords)
-
- if enabled_coords:
- self.rasterized_mask = create_mask(frame_shape, enabled_coords)
- else:
- empty_mask = np.zeros(frame_shape, np.uint8)
- empty_mask[:] = 255
- self.rasterized_mask = empty_mask
-
- def dict(self, **kwargs):
- ret = super().model_dump(**kwargs)
- if "rasterized_mask" in ret:
- ret.pop("rasterized_mask")
- return ret
-
- @field_serializer("rasterized_mask", when_used="json")
- def serialize_rasterized_mask(self, value: Any, info):
- return None
-
- model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore")
-
-
-class RuntimeFilterConfig(FilterConfig):
- """Runtime version of FilterConfig with rasterized masks."""
-
- # The rasterized numpy mask (combination of all enabled masks)
- rasterized_mask: Optional[np.ndarray] = None
-
- def __init__(self, **config):
- frame_shape = config.get("frame_shape", (1, 1))
-
- # Store original mask dict for serialization
- original_mask = config.get("mask", {})
- if isinstance(original_mask, dict):
- # Process the new dict format - update raw_coordinates for each mask
- processed_mask = {}
- for mask_id, mask_config in original_mask.items():
- # Handle both dict and ObjectMaskConfig formats
- if hasattr(mask_config, "model_dump"):
- # It's an ObjectMaskConfig object
- mask_dict = mask_config.model_dump()
- coords = mask_dict.get("coordinates", "")
- relative_coords = get_relative_coordinates(coords, frame_shape)
- mask_dict["raw_coordinates"] = (
- relative_coords if relative_coords else coords
- )
- mask_dict["coordinates"] = (
- relative_coords if relative_coords else coords
- )
- processed_mask[mask_id] = mask_dict
- elif isinstance(mask_config, dict):
- coords = mask_config.get("coordinates", "")
- relative_coords = get_relative_coordinates(coords, frame_shape)
- mask_config_copy = mask_config.copy()
- mask_config_copy["raw_coordinates"] = (
- relative_coords if relative_coords else coords
- )
- mask_config_copy["coordinates"] = (
- relative_coords if relative_coords else coords
- )
- processed_mask[mask_id] = mask_config_copy
- else:
- processed_mask[mask_id] = mask_config
- config["mask"] = processed_mask
- config["raw_mask"] = processed_mask
-
- # Convert min_area and max_area to pixels if they're percentages
- if "min_area" in config:
- config["min_area"] = convert_area_to_pixels(config["min_area"], frame_shape)
-
- if "max_area" in config:
- config["max_area"] = convert_area_to_pixels(config["max_area"], frame_shape)
-
- super().__init__(**config)
-
- # Rasterize only enabled masks
- enabled_coords = []
- for mask_config in self.mask.values():
- if mask_config.enabled and mask_config.coordinates:
- coords = mask_config.coordinates
- if isinstance(coords, list):
- enabled_coords.extend(coords)
- else:
- enabled_coords.append(coords)
-
- if enabled_coords:
- self.rasterized_mask = create_mask(frame_shape, enabled_coords)
- else:
- self.rasterized_mask = None
-
- def dict(self, **kwargs):
- ret = super().model_dump(**kwargs)
- if "rasterized_mask" in ret:
- ret.pop("rasterized_mask")
- return ret
-
- @field_serializer("rasterized_mask", when_used="json")
- def serialize_rasterized_mask(self, value: Any, info):
- return None
-
- model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore")
-
-
-class RestreamConfig(BaseModel):
- model_config = ConfigDict(extra="allow")
-
-
-def verify_config_roles(camera_config: CameraConfig) -> None:
- """Verify that roles are setup in the config correctly."""
- assigned_roles = list(
- set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
- )
-
- if camera_config.record.enabled and "record" not in assigned_roles:
- raise ValueError(
- f"Camera {camera_config.name} has record enabled, but record is not assigned to an input."
- )
-
- if camera_config.audio.enabled and "audio" not in assigned_roles:
- raise ValueError(
- f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
- )
-
-
-def verify_valid_live_stream_names(
- frigate_config: FrigateConfig, camera_config: CameraConfig
-) -> ValueError | None:
- """Verify that a restream exists to use for live view."""
- for _, stream_name in camera_config.live.streams.items():
- if (
- stream_name
- not in frigate_config.go2rtc.model_dump().get("streams", {}).keys()
- ):
- return ValueError(
- f"No restream with name {stream_name} exists for camera {camera_config.name}."
- )
-
-
-def verify_recording_segments_setup_with_reasonable_time(
- camera_config: CameraConfig,
-) -> None:
- """Verify that recording segments are setup and segment time is not greater than 60."""
- record_args: list[str] = get_ffmpeg_arg_list(
- camera_config.ffmpeg.output_args.record
- )
-
- if record_args[0].startswith("preset"):
- return
-
- try:
- seg_arg_index = record_args.index("-segment_time")
- except ValueError:
- raise ValueError(
- f"Camera {camera_config.name} has no segment_time in \
- recording output args, segment args are required for record."
- )
-
- if int(record_args[seg_arg_index + 1]) > 60:
- raise ValueError(
- f"Camera {camera_config.name} has invalid segment_time output arg, \
- segment_time must be 60 or less."
- )
-
-
-def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
- """Verify that user has not entered zone objects that are not in the tracking config."""
- for zone_name, zone in camera_config.zones.items():
- for obj in zone.objects:
- if obj not in camera_config.objects.track:
- raise ValueError(
- f"Zone {zone_name} is configured to track {obj} but that object type is not added to objects -> track."
- )
-
-
-def verify_required_zones_exist(camera_config: CameraConfig) -> None:
- for det_zone in camera_config.review.detections.required_zones:
- if det_zone not in camera_config.zones.keys():
- raise ValueError(
- f"Camera {camera_config.name} has a required zone for detections {det_zone} that is not defined."
- )
-
- for det_zone in camera_config.review.alerts.required_zones:
- if det_zone not in camera_config.zones.keys():
- raise ValueError(
- f"Camera {camera_config.name} has a required zone for alerts {det_zone} that is not defined."
- )
-
-
-def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
- """Verify that required_zones are specified when autotracking is enabled."""
- if (
- camera_config.onvif.autotracking.enabled
- and not camera_config.onvif.autotracking.required_zones
- ):
- raise ValueError(
- f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones."
- )
-
-
-def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None:
- """Verify that motion detection is not disabled and object detection is enabled."""
- if camera_config.detect.enabled and not camera_config.motion.enabled:
- raise ValueError(
- f"Camera {camera_config.name} has motion detection disabled and object detection enabled but object detection requires motion detection."
- )
-
-
-def verify_objects_track(
- camera_config: CameraConfig, enabled_objects: list[str]
-) -> None:
- """Verify that a user has not specified an object to track that is not in the labelmap."""
- valid_objects = [
- obj for obj in camera_config.objects.track if obj in enabled_objects
- ]
-
- if len(valid_objects) != len(camera_config.objects.track):
- invalid_objects = set(camera_config.objects.track) - set(valid_objects)
- logger.warning(
- f"{camera_config.name} is configured to track {list(invalid_objects)} objects, which are not supported by the current model."
- )
- camera_config.objects.track = valid_objects
-
-
-def verify_lpr_and_face(
- frigate_config: FrigateConfig, camera_config: CameraConfig
-) -> ValueError | None:
- """Verify that lpr and face are enabled at the global level if enabled at the camera level."""
- if camera_config.lpr.enabled and not frigate_config.lpr.enabled:
- raise ValueError(
- f"Camera {camera_config.name} has lpr enabled but lpr is disabled at the global level of the config. You must enable lpr at the global level."
- )
- if (
- camera_config.face_recognition.enabled
- and not frigate_config.face_recognition.enabled
- ):
- raise ValueError(
- f"Camera {camera_config.name} has face_recognition enabled but face_recognition is disabled at the global level of the config. You must enable face_recognition at the global level."
- )
-
-
-class FrigateConfig(FrigateBaseModel):
- version: Optional[str] = Field(
- default=None,
- title="Current config version",
- description="Numeric or string version of the active configuration to help detect migrations or format changes.",
- )
- safe_mode: bool = Field(
- default=False,
- title="Safe mode",
- description="When enabled, start Frigate in safe mode with reduced features for troubleshooting.",
- )
-
- # Fields that install global state should be defined first, so that their validators run first.
- environment_vars: EnvVars = Field(
- default_factory=dict,
- title="Environment variables",
- description="Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead.",
- )
- logger: LoggerConfig = Field(
- default_factory=LoggerConfig,
- title="Logging",
- description="Controls default log verbosity and per-component log level overrides.",
- validate_default=True,
- )
-
- # Global config
- auth: AuthConfig = Field(
- default_factory=AuthConfig,
- title="Authentication",
- description="Authentication and session-related settings including cookie and rate limit options.",
- )
- database: DatabaseConfig = Field(
- default_factory=DatabaseConfig,
- title="Database",
- description="Settings for the SQLite database used by Frigate to store tracked object and recording metadata.",
- )
- go2rtc: RestreamConfig = Field(
- default_factory=RestreamConfig,
- title="go2rtc",
- description="Settings for the integrated go2rtc restreaming service used for live stream relaying and translation.",
- )
- mqtt: MqttConfig = Field(
- title="MQTT",
- description="Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.",
- )
- notifications: NotificationConfig = Field(
- default_factory=NotificationConfig,
- title="Notifications",
- description="Settings to enable and control notifications for all cameras; can be overridden per-camera.",
- )
- networking: NetworkingConfig = Field(
- default_factory=NetworkingConfig,
- title="Networking",
- description="Network-related settings such as IPv6 enablement for Frigate endpoints.",
- )
- proxy: ProxyConfig = Field(
- default_factory=ProxyConfig,
- title="Proxy",
- description="Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
- )
- telemetry: TelemetryConfig = Field(
- default_factory=TelemetryConfig,
- title="Telemetry",
- description="System telemetry and stats options including GPU and network bandwidth monitoring.",
- )
- tls: TlsConfig = Field(
- default_factory=TlsConfig,
- title="TLS",
- description="TLS settings for Frigate's web endpoints (port 8971).",
- )
- ui: UIConfig = Field(
- default_factory=UIConfig,
- title="UI",
- description="User interface preferences such as timezone, time/date formatting, and units.",
- )
-
- # Detector config
- detectors: Dict[str, BaseDetectorConfig] = Field(
- default=DEFAULT_DETECTORS,
- title="Detector hardware",
- description="Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
- )
- model: ModelConfig = Field(
- default_factory=ModelConfig,
- title="Detection model",
- description="Settings to configure a custom object detection model and its input shape.",
- )
-
- # GenAI config (named provider configs: name -> GenAIConfig)
- genai: Dict[str, GenAIConfig] = Field(
- default_factory=dict,
- title="Generative AI configuration (named providers).",
- description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
- )
-
- # Camera config
- cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras")
- audio: AudioConfig = Field(
- default_factory=AudioConfig,
- title="Audio events",
- description="Settings for audio-based event detection for all cameras; can be overridden per-camera.",
- )
- birdseye: BirdseyeConfig = Field(
- default_factory=BirdseyeConfig,
- title="Birdseye",
- description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
- )
- detect: DetectConfig = Field(
- default_factory=DetectConfig,
- title="Object Detection",
- description="Settings for the detection/detect role used to run object detection and initialize trackers.",
- )
- ffmpeg: FfmpegConfig = Field(
- default_factory=FfmpegConfig,
- title="FFmpeg",
- description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
- )
- live: CameraLiveConfig = Field(
- default_factory=CameraLiveConfig,
- title="Live playback",
- description="Settings used by the Web UI to control live stream resolution and quality.",
- )
- motion: Optional[MotionConfig] = Field(
- default=None,
- title="Motion detection",
- description="Default motion detection settings applied to cameras unless overridden per-camera.",
- )
- objects: ObjectConfig = Field(
- default_factory=ObjectConfig,
- title="Objects",
- description="Object tracking defaults including which labels to track and per-object filters.",
- )
- record: RecordConfig = Field(
- default_factory=RecordConfig,
- title="Recording",
- description="Recording and retention settings applied to cameras unless overridden per-camera.",
- )
- review: ReviewConfig = Field(
- default_factory=ReviewConfig,
- title="Review",
- description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
- )
- snapshots: SnapshotsConfig = Field(
- default_factory=SnapshotsConfig,
- title="Snapshots",
- description="Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.",
- )
- timestamp_style: TimestampStyleConfig = Field(
- default_factory=TimestampStyleConfig,
- title="Timestamp style",
- description="Styling options for in-feed timestamps applied to debug view and snapshots.",
- )
-
- # Classification Config
- audio_transcription: AudioTranscriptionConfig = Field(
- default_factory=AudioTranscriptionConfig,
- title="Audio transcription",
- description="Settings for live and speech audio transcription used for events and live captions.",
- )
- classification: ClassificationConfig = Field(
- default_factory=ClassificationConfig,
- title="Object classification",
- description="Settings for classification models used to refine object labels or state classification.",
- )
- semantic_search: SemanticSearchConfig = Field(
- default_factory=SemanticSearchConfig,
- title="Semantic Search",
- description="Settings for Semantic Search which builds and queries object embeddings to find similar items.",
- )
- face_recognition: FaceRecognitionConfig = Field(
- default_factory=FaceRecognitionConfig,
- title="Face recognition",
- description="Settings for face detection and recognition for all cameras; can be overridden per-camera.",
- )
- lpr: LicensePlateRecognitionConfig = Field(
- default_factory=LicensePlateRecognitionConfig,
- title="License Plate Recognition",
- description="License plate recognition settings including detection thresholds, formatting, and known plates.",
- )
-
- camera_groups: Dict[str, CameraGroupConfig] = Field(
- default_factory=dict,
- title="Camera groups",
- description="Configuration for named camera groups used to organize cameras in the UI.",
- )
-
- _plus_api: PlusApi
-
- @property
- def plus_api(self) -> PlusApi:
- return self._plus_api
-
- @model_validator(mode="after")
- def post_validation(self, info: ValidationInfo) -> Self:
- # Load plus api from context, if possible.
- self._plus_api = None
- if isinstance(info.context, dict):
- self._plus_api = info.context.get("plus_api")
-
- # Ensure self._plus_api is set, if no explicit value is provided.
- if self._plus_api is None:
- self._plus_api = PlusApi()
-
- # set notifications state
- self.notifications.enabled_in_config = self.notifications.enabled
-
- # validate genai: each role (tools, vision, embeddings) at most once
- role_to_name: dict[GenAIRoleEnum, str] = {}
- for name, genai_cfg in self.genai.items():
- for role in genai_cfg.roles:
- if role in role_to_name:
- raise ValueError(
- f"GenAI role '{role.value}' is assigned to both "
- f"'{role_to_name[role]}' and '{name}'; each role must have "
- "exactly one provider."
- )
- role_to_name[role] = name
-
- # validate semantic_search.model when it is a GenAI provider name
- if (
- self.semantic_search.enabled
- and isinstance(self.semantic_search.model, str)
- and not isinstance(self.semantic_search.model, SemanticSearchModelEnum)
- ):
- if self.semantic_search.model not in self.genai:
- raise ValueError(
- f"semantic_search.model '{self.semantic_search.model}' is not a "
- "valid GenAI config key. Must match a key in genai config."
- )
- genai_cfg = self.genai[self.semantic_search.model]
- if GenAIRoleEnum.embeddings not in genai_cfg.roles:
- raise ValueError(
- f"GenAI provider '{self.semantic_search.model}' must have "
- "'embeddings' in its roles for semantic search."
- )
-
- # set default min_score for object attributes
- for attribute in self.model.all_attributes:
- if not self.objects.filters.get(attribute):
- self.objects.filters[attribute] = FilterConfig(min_score=0.7)
- elif self.objects.filters[attribute].min_score == 0.5:
- self.objects.filters[attribute].min_score = 0.7
-
- # auto detect hwaccel args
- if self.ffmpeg.hwaccel_args == "auto":
- self.ffmpeg.hwaccel_args = auto_detect_hwaccel()
-
- # Global config to propagate down to camera level
- global_config = self.model_dump(
- include={
- "audio": ...,
- "audio_transcription": ...,
- "birdseye": ...,
- "face_recognition": ...,
- "lpr": ...,
- "record": ...,
- "snapshots": ...,
- "live": ...,
- "objects": ...,
- "review": ...,
- "motion": ...,
- "notifications": ...,
- "detect": ...,
- "ffmpeg": ...,
- "timestamp_style": ...,
- },
- exclude_unset=True,
- )
-
- for key, detector in self.detectors.items():
- adapter = TypeAdapter(DetectorConfig)
- model_dict = (
- detector
- if isinstance(detector, dict)
- else detector.model_dump(warnings="none")
- )
- detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
-
- # users should not set model themselves
- if detector_config.model:
- logger.warning(
- "The model key should be specified at the root level of the config, not under detectors. The nested model key will be ignored."
- )
- detector_config.model = None
-
- model_config = self.model.model_dump(exclude_unset=True, warnings="none")
-
- if detector_config.model_path:
- model_config["path"] = detector_config.model_path
-
- if "path" not in model_config:
- if detector_config.type == "cpu" or detector_config.type.endswith(
- "_tfl"
- ):
- model_config["path"] = "/cpu_model.tflite"
- elif detector_config.type == "edgetpu":
- model_config["path"] = "/edgetpu_model.tflite"
-
- model = ModelConfig.model_validate(model_config)
- model.check_and_load_plus_model(self.plus_api, detector_config.type)
- model.compute_model_hash()
- labelmap_objects = model.merged_labelmap.values()
- detector_config.model = model
- self.detectors[key] = detector_config
-
- for name, camera in self.cameras.items():
- modified_global_config = global_config.copy()
-
- # only populate some fields down to the camera level for specific keys
- allowed_fields_map = {
- "face_recognition": ["enabled", "min_area"],
- "lpr": ["enabled", "expire_time", "min_area", "enhancement"],
- "audio_transcription": ["enabled", "live_enabled"],
- }
-
- for section in allowed_fields_map:
- if section in modified_global_config:
- modified_global_config[section] = {
- k: v
- for k, v in modified_global_config[section].items()
- if k in allowed_fields_map[section]
- }
-
- merged_config = deep_merge(
- camera.model_dump(exclude_unset=True), modified_global_config
- )
- camera_config: CameraConfig = CameraConfig.model_validate(
- {"name": name, **merged_config}
- )
-
- if camera_config.ffmpeg.hwaccel_args == "auto":
- camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
-
- # Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg
- # This allows per-camera override for exports (e.g., when camera resolution
- # exceeds hardware encoder limits)
- if camera_config.record.export.hwaccel_args == "auto":
- camera_config.record.export.hwaccel_args = (
- camera_config.ffmpeg.hwaccel_args
- )
-
- for input in camera_config.ffmpeg.inputs:
- need_detect_dimensions = "detect" in input.roles and (
- camera_config.detect.height is None
- or camera_config.detect.width is None
- )
-
- if need_detect_dimensions:
- stream_info = {"width": 0, "height": 0, "fourcc": None}
- try:
- stream_info = stream_info_retriever.get_stream_info(
- self.ffmpeg, input.path
- )
- except Exception:
- logger.warning(
- f"Error detecting stream parameters automatically for {input.path} Applying default values."
- )
- stream_info = {"width": 0, "height": 0, "fourcc": None}
-
- if need_detect_dimensions:
- camera_config.detect.width = (
- stream_info["width"]
- if stream_info.get("width")
- else DEFAULT_DETECT_DIMENSIONS["width"]
- )
- camera_config.detect.height = (
- stream_info["height"]
- if stream_info.get("height")
- else DEFAULT_DETECT_DIMENSIONS["height"]
- )
-
- # Warn if detect fps > 10
- if camera_config.detect.fps > 10 and camera_config.type != "lpr":
- logger.warning(
- f"{camera_config.name} detect fps is set to {camera_config.detect.fps}. This does NOT need to match your camera's frame rate. High values could lead to reduced performance. Recommended value is 5."
- )
- if camera_config.detect.fps > 15 and camera_config.type == "lpr":
- logger.warning(
- f"{camera_config.name} detect fps is set to {camera_config.detect.fps}. This does NOT need to match your camera's frame rate. High values could lead to reduced performance. Recommended value for LPR cameras are between 5-15."
- )
-
- # Default min_initialized configuration
- min_initialized = int(camera_config.detect.fps / 2)
- if camera_config.detect.min_initialized is None:
- camera_config.detect.min_initialized = min_initialized
-
- # Default max_disappeared configuration
- max_disappeared = camera_config.detect.fps * 5
- if camera_config.detect.max_disappeared is None:
- camera_config.detect.max_disappeared = max_disappeared
-
- # Default stationary_threshold configuration
- stationary_threshold = camera_config.detect.fps * 10
- if camera_config.detect.stationary.threshold is None:
- camera_config.detect.stationary.threshold = stationary_threshold
- # default to the stationary_threshold if not defined
- if camera_config.detect.stationary.interval is None:
- camera_config.detect.stationary.interval = stationary_threshold
-
- # set config pre-value
- camera_config.enabled_in_config = camera_config.enabled
- camera_config.audio.enabled_in_config = camera_config.audio.enabled
- camera_config.audio_transcription.enabled_in_config = (
- camera_config.audio_transcription.enabled
- )
- camera_config.record.enabled_in_config = camera_config.record.enabled
- camera_config.notifications.enabled_in_config = (
- camera_config.notifications.enabled
- )
- camera_config.onvif.autotracking.enabled_in_config = (
- camera_config.onvif.autotracking.enabled
- )
- camera_config.review.alerts.enabled_in_config = (
- camera_config.review.alerts.enabled
- )
- camera_config.review.detections.enabled_in_config = (
- camera_config.review.detections.enabled
- )
- camera_config.objects.genai.enabled_in_config = (
- camera_config.objects.genai.enabled
- )
- camera_config.review.genai.enabled_in_config = (
- camera_config.review.genai.enabled
- )
-
- # Add default filters
- object_keys = camera_config.objects.track
- if camera_config.objects.filters is None:
- camera_config.objects.filters = {}
- object_keys = object_keys - camera_config.objects.filters.keys()
- for key in object_keys:
- camera_config.objects.filters[key] = FilterConfig()
-
- # Process global object masks to set raw_coordinates
- if camera_config.objects.mask:
- processed_global_masks = {}
- for mask_id, mask_config in camera_config.objects.mask.items():
- if mask_config:
- coords = mask_config.coordinates
- relative_coords = get_relative_coordinates(
- coords, camera_config.frame_shape
- )
- # Create a new ObjectMaskConfig with raw_coordinates set
- processed_global_masks[mask_id] = ObjectMaskConfig(
- friendly_name=mask_config.friendly_name,
- enabled=mask_config.enabled,
- coordinates=relative_coords if relative_coords else coords,
- raw_coordinates=relative_coords
- if relative_coords
- else coords,
- enabled_in_config=mask_config.enabled,
- )
- else:
- processed_global_masks[mask_id] = mask_config
- camera_config.objects.mask = processed_global_masks
- camera_config.objects.raw_mask = processed_global_masks
-
- # Apply global object masks and convert masks to numpy array
- for object, filter in camera_config.objects.filters.items():
- # Set enabled_in_config for per-object masks before processing
- for mask_config in filter.mask.values():
- if mask_config:
- mask_config.enabled_in_config = mask_config.enabled
-
- # Merge global object masks with per-object filter masks
- merged_mask = dict(filter.mask) # Copy filter-specific masks
-
- # Add global object masks if they exist
- if camera_config.objects.mask:
- for mask_id, mask_config in camera_config.objects.mask.items():
- # Use a global prefix to avoid key collisions
- global_mask_id = f"global_{mask_id}"
- merged_mask[global_mask_id] = mask_config
-
- # Set runtime filter to create masks
- camera_config.objects.filters[object] = RuntimeFilterConfig(
- frame_shape=camera_config.frame_shape,
- mask=merged_mask,
- **filter.model_dump(
- exclude_unset=True, exclude={"mask", "raw_mask"}
- ),
- )
-
- # Set enabled_in_config for motion masks to match config file state BEFORE creating RuntimeMotionConfig
- if camera_config.motion:
- camera_config.motion.enabled_in_config = camera_config.motion.enabled
- for mask_config in camera_config.motion.mask.values():
- if mask_config:
- mask_config.enabled_in_config = mask_config.enabled
-
- # Convert motion configuration
- if camera_config.motion is None:
- camera_config.motion = RuntimeMotionConfig(
- frame_shape=camera_config.frame_shape
- )
- else:
- camera_config.motion = RuntimeMotionConfig(
- frame_shape=camera_config.frame_shape,
- **camera_config.motion.model_dump(exclude_unset=True),
- )
-
- # generate zone contours
- if len(camera_config.zones) > 0:
- for zone in camera_config.zones.values():
- if zone.filters:
- for object_name, filter_config in zone.filters.items():
- zone.filters[object_name] = RuntimeFilterConfig(
- frame_shape=camera_config.frame_shape,
- **filter_config.model_dump(exclude_unset=True),
- )
-
- zone.generate_contour(camera_config.frame_shape)
-
- # Set enabled_in_config for zones to match config file state
- for zone in camera_config.zones.values():
- zone.enabled_in_config = zone.enabled
-
- # Set live view stream if none is set
- if not camera_config.live.streams:
- camera_config.live.streams = {name: name}
-
- # generate the ffmpeg commands
- camera_config.create_ffmpeg_cmds()
- self.cameras[name] = camera_config
-
- verify_config_roles(camera_config)
- verify_valid_live_stream_names(self, camera_config)
- verify_recording_segments_setup_with_reasonable_time(camera_config)
- verify_zone_objects_are_tracked(camera_config)
- verify_required_zones_exist(camera_config)
- verify_autotrack_zones(camera_config)
- verify_motion_and_detect(camera_config)
- verify_objects_track(camera_config, labelmap_objects)
- verify_lpr_and_face(self, camera_config)
-
- # set names on classification configs
- for name, config in self.classification.custom.items():
- config.name = name
-
- self.objects.parse_all_objects(self.cameras)
- self.model.create_colormap(sorted(self.objects.all_objects))
- self.model.check_and_load_plus_model(self.plus_api)
-
- # Check audio transcription and audio detection requirements
- if self.audio_transcription.enabled:
- # If audio transcription is enabled globally, at least one camera must have audio detection enabled
- if not any(camera.audio.enabled for camera in self.cameras.values()):
- raise ValueError(
- "Audio transcription is enabled globally, but no cameras have audio detection enabled. At least one camera must have audio detection enabled."
- )
- else:
- # If audio transcription is disabled globally, check each camera with audio_transcription enabled
- for camera in self.cameras.values():
- if camera.audio_transcription.enabled and not camera.audio.enabled:
- raise ValueError(
- f"Camera {camera.name} has audio transcription enabled, but audio detection is not enabled for this camera. Audio detection must be enabled for cameras with audio transcription when it is disabled globally."
- )
-
- if self.plus_api and not self.snapshots.clean_copy:
- logger.warning(
- "Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
- )
-
- # Validate auth roles against cameras
- camera_names = set(self.cameras.keys())
-
- for role, allowed_cameras in self.auth.roles.items():
- invalid_cameras = [
- cam for cam in allowed_cameras if cam not in camera_names
- ]
- if invalid_cameras:
- logger.warning(
- f"Role '{role}' references non-existent cameras: {invalid_cameras}. "
- )
-
- return self
-
- @field_validator("cameras")
- @classmethod
- def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig]):
- zones = [zone for camera in v.values() for zone in camera.zones.keys()]
- for zone in zones:
- if zone in v.keys():
- raise ValueError("Zones cannot share names with cameras")
- return v
-
- @classmethod
- def load(cls, **kwargs):
- """Loads the Frigate config file, runs migrations, and creates the config object."""
- config_path = find_config_file()
-
- # No configuration file found, create one.
- new_config = False
- if not os.path.isfile(config_path):
- logger.info("No config file found, saving default config")
- config_path = config_path
- new_config = True
- else:
- # Check if the config file needs to be migrated.
- migrate_frigate_config(config_path)
-
- # Finally, load the resulting configuration file.
- with open(config_path, "a+" if new_config else "r") as f:
- # Only write the default config if the opened file is non-empty. This can happen as
- # a race condition. It's extremely unlikely, but eh. Might as well check it.
- if new_config and f.tell() == 0:
- f.write(DEFAULT_CONFIG)
- logger.info(
- "Created default config file, see the getting started docs for configuration: https://docs.frigate.video/guides/getting_started"
- )
-
- f.seek(0)
- return FrigateConfig.parse(f, **kwargs)
-
- @classmethod
- def parse(cls, config, *, is_json=None, safe_load=False, **context):
- # If config is a file, read its contents.
- if hasattr(config, "read"):
- fname = getattr(config, "name", None)
- config = config.read()
-
- # Try to guess the value of is_json from the file extension.
- if is_json is None and fname:
- _, ext = os.path.splitext(fname)
- if ext in (".yaml", ".yml"):
- is_json = False
- elif ext == ".json":
- is_json = True
-
- # At this point, try to sniff the config string, to guess if it is json or not.
- if is_json is None:
- is_json = REGEX_JSON.match(config) is not None
-
- # Parse the config into a dictionary.
- if is_json:
- config = json.load(config)
- else:
- config = yaml.load(config)
-
- # load minimal Frigate config after the full config did not validate
- if safe_load:
- safe_config = {"safe_mode": True, "cameras": {}, "mqtt": {"enabled": False}}
-
- # copy over auth and proxy config in case auth needs to be enforced
- safe_config["auth"] = config.get("auth", {})
- safe_config["proxy"] = config.get("proxy", {})
-
- # copy over database config for auth and so a new db is not created
- safe_config["database"] = config.get("database", {})
-
- return cls.parse_object(safe_config, **context)
-
- # Validate and return the config dict.
- return cls.parse_object(config, **context)
-
- @classmethod
- def parse_yaml(cls, config_yaml, **context):
- return cls.parse(config_yaml, is_json=False, **context)
-
- @classmethod
- def parse_object(
- cls, obj: Any, *, plus_api: Optional[PlusApi] = None, install: bool = False
- ):
- return cls.model_validate(
- obj, context={"plus_api": plus_api, "install": install}
- )
+from __future__ import annotations
+
+import json
+import logging
+import os
+from typing import Any, Dict, Optional
+
+import numpy as np
+from pydantic import (
+ BaseModel,
+ ConfigDict,
+ Field,
+ TypeAdapter,
+ ValidationInfo,
+ field_serializer,
+ field_validator,
+ model_validator,
+)
+from ruamel.yaml import YAML
+from typing_extensions import Self
+
+from frigate.const import REGEX_JSON
+from frigate.detectors import DetectorConfig, ModelConfig
+from frigate.detectors.detector_config import BaseDetectorConfig
+from frigate.plus import PlusApi
+from frigate.util.builtin import (
+ deep_merge,
+ get_ffmpeg_arg_list,
+)
+from frigate.util.config import (
+ CURRENT_CONFIG_VERSION,
+ StreamInfoRetriever,
+ convert_area_to_pixels,
+ find_config_file,
+ get_relative_coordinates,
+ migrate_frigate_config,
+)
+from frigate.util.image import create_mask
+from frigate.util.services import auto_detect_hwaccel
+
+from .auth import AuthConfig
+from .base import FrigateBaseModel
+from .camera import CameraConfig, CameraLiveConfig
+from .camera.audio import AudioConfig
+from .camera.birdseye import BirdseyeConfig
+from .camera.detect import DetectConfig
+from .camera.ffmpeg import FfmpegConfig
+from .camera.genai import GenAIConfig, GenAIRoleEnum
+from .camera.mask import ObjectMaskConfig
+from .camera.motion import MotionConfig
+from .camera.notification import NotificationConfig
+from .camera.objects import FilterConfig, ObjectConfig
+from .camera.record import RecordConfig
+from .camera.review import ReviewConfig
+from .camera.snapshots import SnapshotsConfig
+from .camera.timestamp import TimestampStyleConfig
+from .camera_group import CameraGroupConfig
+from .classification import (
+ AudioTranscriptionConfig,
+ ClassificationConfig,
+ FaceRecognitionConfig,
+ LicensePlateRecognitionConfig,
+ SemanticSearchConfig,
+ SemanticSearchModelEnum,
+)
+from .database import DatabaseConfig
+from .env import EnvVars
+from .logger import LoggerConfig
+from .mqtt import MqttConfig
+from .network import NetworkingConfig
+from .proxy import ProxyConfig
+from .telemetry import TelemetryConfig
+from .tls import TlsConfig
+from .transcode_proxy import TranscodeProxyConfig
+from .ui import UIConfig
+
+__all__ = ["FrigateConfig"]
+
+logger = logging.getLogger(__name__)
+
+yaml = YAML()
+
+DEFAULT_CONFIG = f"""
+mqtt:
+ enabled: False
+
+cameras: {{}} # No cameras defined, UI wizard should be used
+version: {CURRENT_CONFIG_VERSION}
+"""
+
+DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
+DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720}
+
+# stream info handler
+stream_info_retriever = StreamInfoRetriever()
+
+
+class RuntimeMotionConfig(MotionConfig):
+ """Runtime version of MotionConfig with rasterized masks."""
+
+ # The rasterized numpy mask (combination of all enabled masks)
+ rasterized_mask: np.ndarray = None
+
+ def __init__(self, **config):
+ frame_shape = config.get("frame_shape", (1, 1))
+
+ # Store original mask dict for serialization
+ original_mask = config.get("mask", {})
+ if isinstance(original_mask, dict):
+ # Process the new dict format - update raw_coordinates for each mask
+ processed_mask = {}
+ for mask_id, mask_config in original_mask.items():
+ if isinstance(mask_config, dict):
+ coords = mask_config.get("coordinates", "")
+ relative_coords = get_relative_coordinates(coords, frame_shape)
+ mask_config_copy = mask_config.copy()
+ mask_config_copy["raw_coordinates"] = (
+ relative_coords if relative_coords else coords
+ )
+ mask_config_copy["coordinates"] = (
+ relative_coords if relative_coords else coords
+ )
+ processed_mask[mask_id] = mask_config_copy
+ else:
+ processed_mask[mask_id] = mask_config
+ config["mask"] = processed_mask
+ config["raw_mask"] = processed_mask
+
+ super().__init__(**config)
+
+ # Rasterize only enabled masks
+ enabled_coords = []
+ for mask_config in self.mask.values():
+ if mask_config.enabled and mask_config.coordinates:
+ coords = mask_config.coordinates
+ if isinstance(coords, list):
+ enabled_coords.extend(coords)
+ else:
+ enabled_coords.append(coords)
+
+ if enabled_coords:
+ self.rasterized_mask = create_mask(frame_shape, enabled_coords)
+ else:
+ empty_mask = np.zeros(frame_shape, np.uint8)
+ empty_mask[:] = 255
+ self.rasterized_mask = empty_mask
+
+ def dict(self, **kwargs):
+ ret = super().model_dump(**kwargs)
+ if "rasterized_mask" in ret:
+ ret.pop("rasterized_mask")
+ return ret
+
+ @field_serializer("rasterized_mask", when_used="json")
+ def serialize_rasterized_mask(self, value: Any, info):
+ return None
+
+ model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore")
+
+
+class RuntimeFilterConfig(FilterConfig):
+ """Runtime version of FilterConfig with rasterized masks."""
+
+ # The rasterized numpy mask (combination of all enabled masks)
+ rasterized_mask: Optional[np.ndarray] = None
+
+ def __init__(self, **config):
+ frame_shape = config.get("frame_shape", (1, 1))
+
+ # Store original mask dict for serialization
+ original_mask = config.get("mask", {})
+ if isinstance(original_mask, dict):
+ # Process the new dict format - update raw_coordinates for each mask
+ processed_mask = {}
+ for mask_id, mask_config in original_mask.items():
+ # Handle both dict and ObjectMaskConfig formats
+ if hasattr(mask_config, "model_dump"):
+ # It's an ObjectMaskConfig object
+ mask_dict = mask_config.model_dump()
+ coords = mask_dict.get("coordinates", "")
+ relative_coords = get_relative_coordinates(coords, frame_shape)
+ mask_dict["raw_coordinates"] = (
+ relative_coords if relative_coords else coords
+ )
+ mask_dict["coordinates"] = (
+ relative_coords if relative_coords else coords
+ )
+ processed_mask[mask_id] = mask_dict
+ elif isinstance(mask_config, dict):
+ coords = mask_config.get("coordinates", "")
+ relative_coords = get_relative_coordinates(coords, frame_shape)
+ mask_config_copy = mask_config.copy()
+ mask_config_copy["raw_coordinates"] = (
+ relative_coords if relative_coords else coords
+ )
+ mask_config_copy["coordinates"] = (
+ relative_coords if relative_coords else coords
+ )
+ processed_mask[mask_id] = mask_config_copy
+ else:
+ processed_mask[mask_id] = mask_config
+ config["mask"] = processed_mask
+ config["raw_mask"] = processed_mask
+
+ # Convert min_area and max_area to pixels if they're percentages
+ if "min_area" in config:
+ config["min_area"] = convert_area_to_pixels(config["min_area"], frame_shape)
+
+ if "max_area" in config:
+ config["max_area"] = convert_area_to_pixels(config["max_area"], frame_shape)
+
+ super().__init__(**config)
+
+ # Rasterize only enabled masks
+ enabled_coords = []
+ for mask_config in self.mask.values():
+ if mask_config.enabled and mask_config.coordinates:
+ coords = mask_config.coordinates
+ if isinstance(coords, list):
+ enabled_coords.extend(coords)
+ else:
+ enabled_coords.append(coords)
+
+ if enabled_coords:
+ self.rasterized_mask = create_mask(frame_shape, enabled_coords)
+ else:
+ self.rasterized_mask = None
+
+ def dict(self, **kwargs):
+ ret = super().model_dump(**kwargs)
+ if "rasterized_mask" in ret:
+ ret.pop("rasterized_mask")
+ return ret
+
+ @field_serializer("rasterized_mask", when_used="json")
+ def serialize_rasterized_mask(self, value: Any, info):
+ return None
+
+ model_config = ConfigDict(arbitrary_types_allowed=True, extra="ignore")
+
+
+class RestreamConfig(BaseModel):
+ model_config = ConfigDict(extra="allow")
+
+
+def verify_config_roles(camera_config: CameraConfig) -> None:
+ """Verify that roles are setup in the config correctly."""
+ assigned_roles = list(
+ set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
+ )
+
+ if camera_config.record.enabled and "record" not in assigned_roles:
+ raise ValueError(
+ f"Camera {camera_config.name} has record enabled, but record is not assigned to an input."
+ )
+
+ if camera_config.audio.enabled and "audio" not in assigned_roles:
+ raise ValueError(
+ f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
+ )
+
+
+def verify_valid_live_stream_names(
+ frigate_config: FrigateConfig, camera_config: CameraConfig
+) -> ValueError | None:
+ """Verify that a restream exists to use for live view."""
+ for _, stream_name in camera_config.live.streams.items():
+ if (
+ stream_name
+ not in frigate_config.go2rtc.model_dump().get("streams", {}).keys()
+ ):
+ return ValueError(
+ f"No restream with name {stream_name} exists for camera {camera_config.name}."
+ )
+
+
+def verify_recording_segments_setup_with_reasonable_time(
+ camera_config: CameraConfig,
+) -> None:
+ """Verify that recording segments are setup and segment time is not greater than 60."""
+ record_args: list[str] = get_ffmpeg_arg_list(
+ camera_config.ffmpeg.output_args.record
+ )
+
+ if record_args[0].startswith("preset"):
+ return
+
+ try:
+ seg_arg_index = record_args.index("-segment_time")
+ except ValueError:
+ raise ValueError(
+ f"Camera {camera_config.name} has no segment_time in \
+ recording output args, segment args are required for record."
+ )
+
+ if int(record_args[seg_arg_index + 1]) > 60:
+ raise ValueError(
+ f"Camera {camera_config.name} has invalid segment_time output arg, \
+ segment_time must be 60 or less."
+ )
+
+
+def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
+ """Verify that user has not entered zone objects that are not in the tracking config."""
+ for zone_name, zone in camera_config.zones.items():
+ for obj in zone.objects:
+ if obj not in camera_config.objects.track:
+ raise ValueError(
+ f"Zone {zone_name} is configured to track {obj} but that object type is not added to objects -> track."
+ )
+
+
+def verify_required_zones_exist(camera_config: CameraConfig) -> None:
+ for det_zone in camera_config.review.detections.required_zones:
+ if det_zone not in camera_config.zones.keys():
+ raise ValueError(
+ f"Camera {camera_config.name} has a required zone for detections {det_zone} that is not defined."
+ )
+
+ for det_zone in camera_config.review.alerts.required_zones:
+ if det_zone not in camera_config.zones.keys():
+ raise ValueError(
+ f"Camera {camera_config.name} has a required zone for alerts {det_zone} that is not defined."
+ )
+
+
+def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
+ """Verify that required_zones are specified when autotracking is enabled."""
+ if (
+ camera_config.onvif.autotracking.enabled
+ and not camera_config.onvif.autotracking.required_zones
+ ):
+ raise ValueError(
+ f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones."
+ )
+
+
+def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None:
+ """Verify that motion detection is not disabled and object detection is enabled."""
+ if camera_config.detect.enabled and not camera_config.motion.enabled:
+ raise ValueError(
+ f"Camera {camera_config.name} has motion detection disabled and object detection enabled but object detection requires motion detection."
+ )
+
+
+def verify_objects_track(
+ camera_config: CameraConfig, enabled_objects: list[str]
+) -> None:
+ """Verify that a user has not specified an object to track that is not in the labelmap."""
+ valid_objects = [
+ obj for obj in camera_config.objects.track if obj in enabled_objects
+ ]
+
+ if len(valid_objects) != len(camera_config.objects.track):
+ invalid_objects = set(camera_config.objects.track) - set(valid_objects)
+ logger.warning(
+ f"{camera_config.name} is configured to track {list(invalid_objects)} objects, which are not supported by the current model."
+ )
+ camera_config.objects.track = valid_objects
+
+
+def verify_lpr_and_face(
+ frigate_config: FrigateConfig, camera_config: CameraConfig
+) -> ValueError | None:
+ """Verify that lpr and face are enabled at the global level if enabled at the camera level."""
+ if camera_config.lpr.enabled and not frigate_config.lpr.enabled:
+ raise ValueError(
+ f"Camera {camera_config.name} has lpr enabled but lpr is disabled at the global level of the config. You must enable lpr at the global level."
+ )
+ if (
+ camera_config.face_recognition.enabled
+ and not frigate_config.face_recognition.enabled
+ ):
+ raise ValueError(
+ f"Camera {camera_config.name} has face_recognition enabled but face_recognition is disabled at the global level of the config. You must enable face_recognition at the global level."
+ )
+
+
+class FrigateConfig(FrigateBaseModel):
+ version: Optional[str] = Field(
+ default=None,
+ title="Current config version",
+ description="Numeric or string version of the active configuration to help detect migrations or format changes.",
+ )
+ safe_mode: bool = Field(
+ default=False,
+ title="Safe mode",
+ description="When enabled, start Frigate in safe mode with reduced features for troubleshooting.",
+ )
+
+ # Fields that install global state should be defined first, so that their validators run first.
+ environment_vars: EnvVars = Field(
+ default_factory=dict,
+ title="Environment variables",
+ description="Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead.",
+ )
+ logger: LoggerConfig = Field(
+ default_factory=LoggerConfig,
+ title="Logging",
+ description="Controls default log verbosity and per-component log level overrides.",
+ validate_default=True,
+ )
+
+ # Global config
+ auth: AuthConfig = Field(
+ default_factory=AuthConfig,
+ title="Authentication",
+ description="Authentication and session-related settings including cookie and rate limit options.",
+ )
+ database: DatabaseConfig = Field(
+ default_factory=DatabaseConfig,
+ title="Database",
+ description="Settings for the SQLite database used by Frigate to store tracked object and recording metadata.",
+ )
+ go2rtc: RestreamConfig = Field(
+ default_factory=RestreamConfig,
+ title="go2rtc",
+ description="Settings for the integrated go2rtc restreaming service used for live stream relaying and translation.",
+ )
+ mqtt: MqttConfig = Field(
+ title="MQTT",
+ description="Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.",
+ )
+ notifications: NotificationConfig = Field(
+ default_factory=NotificationConfig,
+ title="Notifications",
+ description="Settings to enable and control notifications for all cameras; can be overridden per-camera.",
+ )
+ networking: NetworkingConfig = Field(
+ default_factory=NetworkingConfig,
+ title="Networking",
+ description="Network-related settings such as IPv6 enablement for Frigate endpoints.",
+ )
+ proxy: ProxyConfig = Field(
+ default_factory=ProxyConfig,
+ title="Proxy",
+ description="Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
+ )
+ telemetry: TelemetryConfig = Field(
+ default_factory=TelemetryConfig,
+ title="Telemetry",
+ description="System telemetry and stats options including GPU and network bandwidth monitoring.",
+ )
+ tls: TlsConfig = Field(
+ default_factory=TlsConfig,
+ title="TLS",
+ description="TLS settings for Frigate's web endpoints (port 8971).",
+ )
+ ui: UIConfig = Field(
+ default_factory=UIConfig,
+ title="UI",
+ description="User interface preferences such as timezone, time/date formatting, and units.",
+ )
+ transcode_proxy: TranscodeProxyConfig = Field(
+ default_factory=TranscodeProxyConfig,
+ title="Transcode proxy",
+ description="Optional proxy for transcoding VOD playback to H.264 on the fly (e.g. for HEVC compatibility).",
+ )
+
+ # Detector config
+ detectors: Dict[str, BaseDetectorConfig] = Field(
+ default=DEFAULT_DETECTORS,
+ title="Detector hardware",
+ description="Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
+ )
+ model: ModelConfig = Field(
+ default_factory=ModelConfig,
+ title="Detection model",
+ description="Settings to configure a custom object detection model and its input shape.",
+ )
+
+ # GenAI config (named provider configs: name -> GenAIConfig)
+ genai: Dict[str, GenAIConfig] = Field(
+ default_factory=dict,
+ title="Generative AI configuration (named providers).",
+ description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
+ )
+
+ # Camera config
+ cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras")
+ audio: AudioConfig = Field(
+ default_factory=AudioConfig,
+ title="Audio events",
+ description="Settings for audio-based event detection for all cameras; can be overridden per-camera.",
+ )
+ birdseye: BirdseyeConfig = Field(
+ default_factory=BirdseyeConfig,
+ title="Birdseye",
+ description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
+ )
+ detect: DetectConfig = Field(
+ default_factory=DetectConfig,
+ title="Object Detection",
+ description="Settings for the detection/detect role used to run object detection and initialize trackers.",
+ )
+ ffmpeg: FfmpegConfig = Field(
+ default_factory=FfmpegConfig,
+ title="FFmpeg",
+ description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
+ )
+ live: CameraLiveConfig = Field(
+ default_factory=CameraLiveConfig,
+ title="Live playback",
+ description="Settings used by the Web UI to control live stream resolution and quality.",
+ )
+ motion: Optional[MotionConfig] = Field(
+ default=None,
+ title="Motion detection",
+ description="Default motion detection settings applied to cameras unless overridden per-camera.",
+ )
+ objects: ObjectConfig = Field(
+ default_factory=ObjectConfig,
+ title="Objects",
+ description="Object tracking defaults including which labels to track and per-object filters.",
+ )
+ record: RecordConfig = Field(
+ default_factory=RecordConfig,
+ title="Recording",
+ description="Recording and retention settings applied to cameras unless overridden per-camera.",
+ )
+ review: ReviewConfig = Field(
+ default_factory=ReviewConfig,
+ title="Review",
+ description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
+ )
+ snapshots: SnapshotsConfig = Field(
+ default_factory=SnapshotsConfig,
+ title="Snapshots",
+ description="Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.",
+ )
+ timestamp_style: TimestampStyleConfig = Field(
+ default_factory=TimestampStyleConfig,
+ title="Timestamp style",
+ description="Styling options for in-feed timestamps applied to debug view and snapshots.",
+ )
+
+ # Classification Config
+ audio_transcription: AudioTranscriptionConfig = Field(
+ default_factory=AudioTranscriptionConfig,
+ title="Audio transcription",
+ description="Settings for live and speech audio transcription used for events and live captions.",
+ )
+ classification: ClassificationConfig = Field(
+ default_factory=ClassificationConfig,
+ title="Object classification",
+ description="Settings for classification models used to refine object labels or state classification.",
+ )
+ semantic_search: SemanticSearchConfig = Field(
+ default_factory=SemanticSearchConfig,
+ title="Semantic Search",
+ description="Settings for Semantic Search which builds and queries object embeddings to find similar items.",
+ )
+ face_recognition: FaceRecognitionConfig = Field(
+ default_factory=FaceRecognitionConfig,
+ title="Face recognition",
+ description="Settings for face detection and recognition for all cameras; can be overridden per-camera.",
+ )
+ lpr: LicensePlateRecognitionConfig = Field(
+ default_factory=LicensePlateRecognitionConfig,
+ title="License Plate Recognition",
+ description="License plate recognition settings including detection thresholds, formatting, and known plates.",
+ )
+
+ camera_groups: Dict[str, CameraGroupConfig] = Field(
+ default_factory=dict,
+ title="Camera groups",
+ description="Configuration for named camera groups used to organize cameras in the UI.",
+ )
+
+ _plus_api: PlusApi
+
+ @property
+ def plus_api(self) -> PlusApi:
+ return self._plus_api
+
+ @model_validator(mode="after")
+ def post_validation(self, info: ValidationInfo) -> Self:
+ # Load plus api from context, if possible.
+ self._plus_api = None
+ if isinstance(info.context, dict):
+ self._plus_api = info.context.get("plus_api")
+
+ # Ensure self._plus_api is set, if no explicit value is provided.
+ if self._plus_api is None:
+ self._plus_api = PlusApi()
+
+ # set notifications state
+ self.notifications.enabled_in_config = self.notifications.enabled
+
+ # validate genai: each role (tools, vision, embeddings) at most once
+ role_to_name: dict[GenAIRoleEnum, str] = {}
+ for name, genai_cfg in self.genai.items():
+ for role in genai_cfg.roles:
+ if role in role_to_name:
+ raise ValueError(
+ f"GenAI role '{role.value}' is assigned to both "
+ f"'{role_to_name[role]}' and '{name}'; each role must have "
+ "exactly one provider."
+ )
+ role_to_name[role] = name
+
+ # validate semantic_search.model when it is a GenAI provider name
+ if (
+ self.semantic_search.enabled
+ and isinstance(self.semantic_search.model, str)
+ and not isinstance(self.semantic_search.model, SemanticSearchModelEnum)
+ ):
+ if self.semantic_search.model not in self.genai:
+ raise ValueError(
+ f"semantic_search.model '{self.semantic_search.model}' is not a "
+ "valid GenAI config key. Must match a key in genai config."
+ )
+ genai_cfg = self.genai[self.semantic_search.model]
+ if GenAIRoleEnum.embeddings not in genai_cfg.roles:
+ raise ValueError(
+ f"GenAI provider '{self.semantic_search.model}' must have "
+ "'embeddings' in its roles for semantic search."
+ )
+
+ # set default min_score for object attributes
+ for attribute in self.model.all_attributes:
+ if not self.objects.filters.get(attribute):
+ self.objects.filters[attribute] = FilterConfig(min_score=0.7)
+ elif self.objects.filters[attribute].min_score == 0.5:
+ self.objects.filters[attribute].min_score = 0.7
+
+ # auto detect hwaccel args
+ if self.ffmpeg.hwaccel_args == "auto":
+ self.ffmpeg.hwaccel_args = auto_detect_hwaccel()
+
+ # Global config to propagate down to camera level
+ global_config = self.model_dump(
+ include={
+ "audio": ...,
+ "audio_transcription": ...,
+ "birdseye": ...,
+ "face_recognition": ...,
+ "lpr": ...,
+ "record": ...,
+ "snapshots": ...,
+ "live": ...,
+ "objects": ...,
+ "review": ...,
+ "motion": ...,
+ "notifications": ...,
+ "detect": ...,
+ "ffmpeg": ...,
+ "timestamp_style": ...,
+ },
+ exclude_unset=True,
+ )
+
+ for key, detector in self.detectors.items():
+ adapter = TypeAdapter(DetectorConfig)
+ model_dict = (
+ detector
+ if isinstance(detector, dict)
+ else detector.model_dump(warnings="none")
+ )
+ detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
+
+ # users should not set model themselves
+ if detector_config.model:
+ logger.warning(
+ "The model key should be specified at the root level of the config, not under detectors. The nested model key will be ignored."
+ )
+ detector_config.model = None
+
+ model_config = self.model.model_dump(exclude_unset=True, warnings="none")
+
+ if detector_config.model_path:
+ model_config["path"] = detector_config.model_path
+
+ if "path" not in model_config:
+ if detector_config.type == "cpu" or detector_config.type.endswith(
+ "_tfl"
+ ):
+ model_config["path"] = "/cpu_model.tflite"
+ elif detector_config.type == "edgetpu":
+ model_config["path"] = "/edgetpu_model.tflite"
+
+ model = ModelConfig.model_validate(model_config)
+ model.check_and_load_plus_model(self.plus_api, detector_config.type)
+ model.compute_model_hash()
+ labelmap_objects = model.merged_labelmap.values()
+ detector_config.model = model
+ self.detectors[key] = detector_config
+
+ for name, camera in self.cameras.items():
+ modified_global_config = global_config.copy()
+
+ # only populate some fields down to the camera level for specific keys
+ allowed_fields_map = {
+ "face_recognition": ["enabled", "min_area"],
+ "lpr": ["enabled", "expire_time", "min_area", "enhancement"],
+ "audio_transcription": ["enabled", "live_enabled"],
+ }
+
+ for section in allowed_fields_map:
+ if section in modified_global_config:
+ modified_global_config[section] = {
+ k: v
+ for k, v in modified_global_config[section].items()
+ if k in allowed_fields_map[section]
+ }
+
+ merged_config = deep_merge(
+ camera.model_dump(exclude_unset=True), modified_global_config
+ )
+ camera_config: CameraConfig = CameraConfig.model_validate(
+ {"name": name, **merged_config}
+ )
+
+ if camera_config.ffmpeg.hwaccel_args == "auto":
+ camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
+
+ # Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg
+ # This allows per-camera override for exports (e.g., when camera resolution
+ # exceeds hardware encoder limits)
+ if camera_config.record.export.hwaccel_args == "auto":
+ camera_config.record.export.hwaccel_args = (
+ camera_config.ffmpeg.hwaccel_args
+ )
+
+ for input in camera_config.ffmpeg.inputs:
+ need_detect_dimensions = "detect" in input.roles and (
+ camera_config.detect.height is None
+ or camera_config.detect.width is None
+ )
+
+ if need_detect_dimensions:
+ stream_info = {"width": 0, "height": 0, "fourcc": None}
+ try:
+ stream_info = stream_info_retriever.get_stream_info(
+ self.ffmpeg, input.path
+ )
+ except Exception:
+ logger.warning(
+ f"Error detecting stream parameters automatically for {input.path} Applying default values."
+ )
+ stream_info = {"width": 0, "height": 0, "fourcc": None}
+
+ if need_detect_dimensions:
+ camera_config.detect.width = (
+ stream_info["width"]
+ if stream_info.get("width")
+ else DEFAULT_DETECT_DIMENSIONS["width"]
+ )
+ camera_config.detect.height = (
+ stream_info["height"]
+ if stream_info.get("height")
+ else DEFAULT_DETECT_DIMENSIONS["height"]
+ )
+
+ # Warn if detect fps > 10
+ if camera_config.detect.fps > 10 and camera_config.type != "lpr":
+ logger.warning(
+ f"{camera_config.name} detect fps is set to {camera_config.detect.fps}. This does NOT need to match your camera's frame rate. High values could lead to reduced performance. Recommended value is 5."
+ )
+ if camera_config.detect.fps > 15 and camera_config.type == "lpr":
+ logger.warning(
+ f"{camera_config.name} detect fps is set to {camera_config.detect.fps}. This does NOT need to match your camera's frame rate. High values could lead to reduced performance. Recommended value for LPR cameras are between 5-15."
+ )
+
+ # Default min_initialized configuration
+ min_initialized = int(camera_config.detect.fps / 2)
+ if camera_config.detect.min_initialized is None:
+ camera_config.detect.min_initialized = min_initialized
+
+ # Default max_disappeared configuration
+ max_disappeared = camera_config.detect.fps * 5
+ if camera_config.detect.max_disappeared is None:
+ camera_config.detect.max_disappeared = max_disappeared
+
+ # Default stationary_threshold configuration
+ stationary_threshold = camera_config.detect.fps * 10
+ if camera_config.detect.stationary.threshold is None:
+ camera_config.detect.stationary.threshold = stationary_threshold
+ # default to the stationary_threshold if not defined
+ if camera_config.detect.stationary.interval is None:
+ camera_config.detect.stationary.interval = stationary_threshold
+
+ # set config pre-value
+ camera_config.enabled_in_config = camera_config.enabled
+ camera_config.audio.enabled_in_config = camera_config.audio.enabled
+ camera_config.audio_transcription.enabled_in_config = (
+ camera_config.audio_transcription.enabled
+ )
+ camera_config.record.enabled_in_config = camera_config.record.enabled
+ camera_config.notifications.enabled_in_config = (
+ camera_config.notifications.enabled
+ )
+ camera_config.onvif.autotracking.enabled_in_config = (
+ camera_config.onvif.autotracking.enabled
+ )
+ camera_config.review.alerts.enabled_in_config = (
+ camera_config.review.alerts.enabled
+ )
+ camera_config.review.detections.enabled_in_config = (
+ camera_config.review.detections.enabled
+ )
+ camera_config.objects.genai.enabled_in_config = (
+ camera_config.objects.genai.enabled
+ )
+ camera_config.review.genai.enabled_in_config = (
+ camera_config.review.genai.enabled
+ )
+
+ # Add default filters
+ object_keys = camera_config.objects.track
+ if camera_config.objects.filters is None:
+ camera_config.objects.filters = {}
+ object_keys = object_keys - camera_config.objects.filters.keys()
+ for key in object_keys:
+ camera_config.objects.filters[key] = FilterConfig()
+
+ # Process global object masks to set raw_coordinates
+ if camera_config.objects.mask:
+ processed_global_masks = {}
+ for mask_id, mask_config in camera_config.objects.mask.items():
+ if mask_config:
+ coords = mask_config.coordinates
+ relative_coords = get_relative_coordinates(
+ coords, camera_config.frame_shape
+ )
+ # Create a new ObjectMaskConfig with raw_coordinates set
+ processed_global_masks[mask_id] = ObjectMaskConfig(
+ friendly_name=mask_config.friendly_name,
+ enabled=mask_config.enabled,
+ coordinates=relative_coords if relative_coords else coords,
+ raw_coordinates=relative_coords
+ if relative_coords
+ else coords,
+ enabled_in_config=mask_config.enabled,
+ )
+ else:
+ processed_global_masks[mask_id] = mask_config
+ camera_config.objects.mask = processed_global_masks
+ camera_config.objects.raw_mask = processed_global_masks
+
+ # Apply global object masks and convert masks to numpy array
+ for object, filter in camera_config.objects.filters.items():
+ # Set enabled_in_config for per-object masks before processing
+ for mask_config in filter.mask.values():
+ if mask_config:
+ mask_config.enabled_in_config = mask_config.enabled
+
+ # Merge global object masks with per-object filter masks
+ merged_mask = dict(filter.mask) # Copy filter-specific masks
+
+ # Add global object masks if they exist
+ if camera_config.objects.mask:
+ for mask_id, mask_config in camera_config.objects.mask.items():
+ # Use a global prefix to avoid key collisions
+ global_mask_id = f"global_{mask_id}"
+ merged_mask[global_mask_id] = mask_config
+
+ # Set runtime filter to create masks
+ camera_config.objects.filters[object] = RuntimeFilterConfig(
+ frame_shape=camera_config.frame_shape,
+ mask=merged_mask,
+ **filter.model_dump(
+ exclude_unset=True, exclude={"mask", "raw_mask"}
+ ),
+ )
+
+ # Set enabled_in_config for motion masks to match config file state BEFORE creating RuntimeMotionConfig
+ if camera_config.motion:
+ camera_config.motion.enabled_in_config = camera_config.motion.enabled
+ for mask_config in camera_config.motion.mask.values():
+ if mask_config:
+ mask_config.enabled_in_config = mask_config.enabled
+
+ # Convert motion configuration
+ if camera_config.motion is None:
+ camera_config.motion = RuntimeMotionConfig(
+ frame_shape=camera_config.frame_shape
+ )
+ else:
+ camera_config.motion = RuntimeMotionConfig(
+ frame_shape=camera_config.frame_shape,
+ **camera_config.motion.model_dump(exclude_unset=True),
+ )
+
+ # generate zone contours
+ if len(camera_config.zones) > 0:
+ for zone in camera_config.zones.values():
+ if zone.filters:
+ for object_name, filter_config in zone.filters.items():
+ zone.filters[object_name] = RuntimeFilterConfig(
+ frame_shape=camera_config.frame_shape,
+ **filter_config.model_dump(exclude_unset=True),
+ )
+
+ zone.generate_contour(camera_config.frame_shape)
+
+ # Set enabled_in_config for zones to match config file state
+ for zone in camera_config.zones.values():
+ zone.enabled_in_config = zone.enabled
+
+ # Set live view stream if none is set
+ if not camera_config.live.streams:
+ camera_config.live.streams = {name: name}
+
+ # generate the ffmpeg commands
+ camera_config.create_ffmpeg_cmds()
+ self.cameras[name] = camera_config
+
+ verify_config_roles(camera_config)
+ verify_valid_live_stream_names(self, camera_config)
+ verify_recording_segments_setup_with_reasonable_time(camera_config)
+ verify_zone_objects_are_tracked(camera_config)
+ verify_required_zones_exist(camera_config)
+ verify_autotrack_zones(camera_config)
+ verify_motion_and_detect(camera_config)
+ verify_objects_track(camera_config, labelmap_objects)
+ verify_lpr_and_face(self, camera_config)
+
+ # set names on classification configs
+ for name, config in self.classification.custom.items():
+ config.name = name
+
+ self.objects.parse_all_objects(self.cameras)
+ self.model.create_colormap(sorted(self.objects.all_objects))
+ self.model.check_and_load_plus_model(self.plus_api)
+
+ # Check audio transcription and audio detection requirements
+ if self.audio_transcription.enabled:
+ # If audio transcription is enabled globally, at least one camera must have audio detection enabled
+ if not any(camera.audio.enabled for camera in self.cameras.values()):
+ raise ValueError(
+ "Audio transcription is enabled globally, but no cameras have audio detection enabled. At least one camera must have audio detection enabled."
+ )
+ else:
+ # If audio transcription is disabled globally, check each camera with audio_transcription enabled
+ for camera in self.cameras.values():
+ if camera.audio_transcription.enabled and not camera.audio.enabled:
+ raise ValueError(
+ f"Camera {camera.name} has audio transcription enabled, but audio detection is not enabled for this camera. Audio detection must be enabled for cameras with audio transcription when it is disabled globally."
+ )
+
+ if self.plus_api and not self.snapshots.clean_copy:
+ logger.warning(
+ "Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
+ )
+
+ # Validate auth roles against cameras
+ camera_names = set(self.cameras.keys())
+
+ for role, allowed_cameras in self.auth.roles.items():
+ invalid_cameras = [
+ cam for cam in allowed_cameras if cam not in camera_names
+ ]
+ if invalid_cameras:
+ logger.warning(
+ f"Role '{role}' references non-existent cameras: {invalid_cameras}. "
+ )
+
+ return self
+
+ @field_validator("cameras")
+ @classmethod
+ def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig]):
+ zones = [zone for camera in v.values() for zone in camera.zones.keys()]
+ for zone in zones:
+ if zone in v.keys():
+ raise ValueError("Zones cannot share names with cameras")
+ return v
+
+ @classmethod
+ def load(cls, **kwargs):
+ """Loads the Frigate config file, runs migrations, and creates the config object."""
+ config_path = find_config_file()
+
+ # No configuration file found, create one.
+ new_config = False
+ if not os.path.isfile(config_path):
+ logger.info("No config file found, saving default config")
+ config_path = config_path
+ new_config = True
+ else:
+ # Check if the config file needs to be migrated.
+ migrate_frigate_config(config_path)
+
+ # Finally, load the resulting configuration file.
+ with open(config_path, "a+" if new_config else "r") as f:
+ # Only write the default config if the opened file is non-empty. This can happen as
+ # a race condition. It's extremely unlikely, but eh. Might as well check it.
+ if new_config and f.tell() == 0:
+ f.write(DEFAULT_CONFIG)
+ logger.info(
+ "Created default config file, see the getting started docs for configuration: https://docs.frigate.video/guides/getting_started"
+ )
+
+ f.seek(0)
+ return FrigateConfig.parse(f, **kwargs)
+
+ @classmethod
+ def parse(cls, config, *, is_json=None, safe_load=False, **context):
+ # If config is a file, read its contents.
+ if hasattr(config, "read"):
+ fname = getattr(config, "name", None)
+ config = config.read()
+
+ # Try to guess the value of is_json from the file extension.
+ if is_json is None and fname:
+ _, ext = os.path.splitext(fname)
+ if ext in (".yaml", ".yml"):
+ is_json = False
+ elif ext == ".json":
+ is_json = True
+
+ # At this point, try to sniff the config string, to guess if it is json or not.
+ if is_json is None:
+ is_json = REGEX_JSON.match(config) is not None
+
+ # Parse the config into a dictionary.
+ if is_json:
+ config = json.load(config)
+ else:
+ config = yaml.load(config)
+
+ # load minimal Frigate config after the full config did not validate
+ if safe_load:
+ safe_config = {"safe_mode": True, "cameras": {}, "mqtt": {"enabled": False}}
+
+ # copy over auth and proxy config in case auth needs to be enforced
+ safe_config["auth"] = config.get("auth", {})
+ safe_config["proxy"] = config.get("proxy", {})
+
+ # copy over database config for auth and so a new db is not created
+ safe_config["database"] = config.get("database", {})
+
+ return cls.parse_object(safe_config, **context)
+
+ # Validate and return the config dict.
+ return cls.parse_object(config, **context)
+
+ @classmethod
+ def parse_yaml(cls, config_yaml, **context):
+ return cls.parse(config_yaml, is_json=False, **context)
+
+ @classmethod
+ def parse_object(
+ cls, obj: Any, *, plus_api: Optional[PlusApi] = None, install: bool = False
+ ):
+ return cls.model_validate(
+ obj, context={"plus_api": plus_api, "install": install}
+ )
diff --git a/frigate/config/transcode_proxy.py b/frigate/config/transcode_proxy.py
new file mode 100644
index 000000000..14f340d7a
--- /dev/null
+++ b/frigate/config/transcode_proxy.py
@@ -0,0 +1,21 @@
+"""Configuration for the VOD transcode proxy (optional playback transcoding)."""
+from pydantic import Field
+
+from .base import FrigateBaseModel
+
+__all__ = ["TranscodeProxyConfig"]
+
+
+class TranscodeProxyConfig(FrigateBaseModel):
+ """Settings for the optional transcode proxy used for recording playback."""
+
+ enabled: bool = Field(
+ default=False,
+ title="Transcode proxy enabled",
+ description="When enabled, the UI uses the transcode proxy URL for VOD playback so recordings are transcoded to H.264 on the fly (e.g. for HEVC compatibility or lower bitrate).",
+ )
+ vod_proxy_url: str = Field(
+ default="",
+ title="VOD proxy base URL",
+ description="Base URL for the transcode proxy (e.g. http://host:5010). When enabled, recording playback requests go to this URL + /vod/... Leave empty if the proxy is mounted at the same host (e.g. /vod-transcoded/ under the same origin).",
+ )
diff --git a/frigate/models.py b/frigate/models.py
index d927a12c8..92152a649 100644
--- a/frigate/models.py
+++ b/frigate/models.py
@@ -1,179 +1,184 @@
-from peewee import (
- BlobField,
- BooleanField,
- CharField,
- CompositeKey,
- DateTimeField,
- FloatField,
- ForeignKeyField,
- IntegerField,
- Model,
- TextField,
-)
-from playhouse.sqlite_ext import JSONField
-
-
-class Event(Model):
- id = CharField(null=False, primary_key=True, max_length=30)
- label = CharField(index=True, max_length=20)
- sub_label = CharField(max_length=100, null=True)
- camera = CharField(index=True, max_length=20)
- start_time = DateTimeField()
- end_time = DateTimeField()
- top_score = (
- FloatField()
- ) # TODO remove when columns can be dropped without rebuilding table
- score = (
- FloatField()
- ) # TODO remove when columns can be dropped without rebuilding table
- false_positive = BooleanField()
- zones = JSONField()
- thumbnail = TextField()
- has_clip = BooleanField(default=True)
- has_snapshot = BooleanField(default=True)
- region = (
- JSONField()
- ) # TODO remove when columns can be dropped without rebuilding table
- box = (
- JSONField()
- ) # TODO remove when columns can be dropped without rebuilding table
- area = (
- IntegerField()
- ) # TODO remove when columns can be dropped without rebuilding table
- retain_indefinitely = BooleanField(default=False)
- ratio = FloatField(
- default=1.0
- ) # TODO remove when columns can be dropped without rebuilding table
- plus_id = CharField(max_length=30)
- model_hash = CharField(max_length=32)
- detector_type = CharField(max_length=32)
- model_type = CharField(max_length=32)
- data = JSONField() # ex: tracked object box, region, etc.
-
-
-class Timeline(Model):
- timestamp = DateTimeField()
- camera = CharField(index=True, max_length=20)
- source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
- source_id = CharField(index=True, max_length=30)
- class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
- data = JSONField() # ex: tracked object id, region, box, etc.
-
-
-class Regions(Model):
- camera = CharField(null=False, primary_key=True, max_length=20)
- grid = JSONField() # json blob of grid
- last_update = DateTimeField()
-
-
-class Recordings(Model):
- id = CharField(null=False, primary_key=True, max_length=30)
- camera = CharField(index=True, max_length=20)
- path = CharField(unique=True)
- start_time = DateTimeField()
- end_time = DateTimeField()
- duration = FloatField()
- motion = IntegerField(null=True)
- objects = IntegerField(null=True)
- dBFS = IntegerField(null=True)
- segment_size = FloatField(default=0) # this should be stored as MB
- regions = IntegerField(null=True)
- motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
-
-
-class ExportCase(Model):
- id = CharField(null=False, primary_key=True, max_length=30)
- name = CharField(index=True, max_length=100)
- description = TextField(null=True)
- created_at = DateTimeField()
- updated_at = DateTimeField()
-
-
-class Export(Model):
- id = CharField(null=False, primary_key=True, max_length=30)
- camera = CharField(index=True, max_length=20)
- name = CharField(index=True, max_length=100)
- date = DateTimeField()
- video_path = CharField(unique=True)
- thumb_path = CharField(unique=True)
- in_progress = BooleanField()
- export_case = ForeignKeyField(
- ExportCase,
- null=True,
- backref="exports",
- column_name="export_case_id",
- )
-
-
-class ReviewSegment(Model):
- id = CharField(null=False, primary_key=True, max_length=30)
- camera = CharField(index=True, max_length=20)
- start_time = DateTimeField()
- end_time = DateTimeField()
- severity = CharField(max_length=30) # alert, detection
- thumb_path = CharField(unique=True)
- data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
-
-
-class UserReviewStatus(Model):
- user_id = CharField(max_length=30)
- review_segment = ForeignKeyField(ReviewSegment, backref="user_reviews")
- has_been_reviewed = BooleanField(default=False)
-
- class Meta:
- indexes = ((("user_id", "review_segment"), True),)
-
-
-class Previews(Model):
- id = CharField(null=False, primary_key=True, max_length=30)
- camera = CharField(index=True, max_length=20)
- path = CharField(unique=True)
- start_time = DateTimeField()
- end_time = DateTimeField()
- duration = FloatField()
-
-
-# Used for temporary table in record/cleanup.py
-class RecordingsToDelete(Model):
- id = CharField(null=False, primary_key=False, max_length=30)
-
- class Meta:
- temporary = True
-
-
-class User(Model):
- username = CharField(null=False, primary_key=True, max_length=30)
- role = CharField(
- max_length=20,
- default="admin",
- )
- password_hash = CharField(null=False, max_length=120)
- password_changed_at = DateTimeField(null=True)
- notification_tokens = JSONField()
-
- @classmethod
- def get_allowed_cameras(
- cls, role: str, roles_dict: dict[str, list[str]], all_camera_names: set[str]
- ) -> list[str]:
- if role not in roles_dict:
- return [] # Invalid role grants no access
- allowed = roles_dict[role]
- if not allowed: # Empty list means all cameras
- return list(all_camera_names)
-
- return [cam for cam in allowed if cam in all_camera_names]
-
-
-class Trigger(Model):
- camera = CharField(max_length=20)
- name = CharField()
- type = CharField(max_length=10)
- data = TextField()
- threshold = FloatField()
- model = CharField(max_length=30)
- embedding = BlobField()
- triggering_event_id = CharField(max_length=30)
- last_triggered = DateTimeField()
-
- class Meta:
- primary_key = CompositeKey("camera", "name")
+from peewee import (
+ BlobField,
+ BooleanField,
+ CharField,
+ CompositeKey,
+ DateTimeField,
+ FloatField,
+ ForeignKeyField,
+ IntegerField,
+ Model,
+ TextField,
+)
+from playhouse.sqlite_ext import JSONField
+
+
+class Event(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ label = CharField(index=True, max_length=20)
+ sub_label = CharField(max_length=100, null=True)
+ camera = CharField(index=True, max_length=20)
+ start_time = DateTimeField()
+ end_time = DateTimeField()
+ top_score = (
+ FloatField()
+ ) # TODO remove when columns can be dropped without rebuilding table
+ score = (
+ FloatField()
+ ) # TODO remove when columns can be dropped without rebuilding table
+ false_positive = BooleanField()
+ zones = JSONField()
+ thumbnail = TextField()
+ has_clip = BooleanField(default=True)
+ has_snapshot = BooleanField(default=True)
+ region = (
+ JSONField()
+ ) # TODO remove when columns can be dropped without rebuilding table
+ box = (
+ JSONField()
+ ) # TODO remove when columns can be dropped without rebuilding table
+ area = (
+ IntegerField()
+ ) # TODO remove when columns can be dropped without rebuilding table
+ retain_indefinitely = BooleanField(default=False)
+ ratio = FloatField(
+ default=1.0
+ ) # TODO remove when columns can be dropped without rebuilding table
+ plus_id = CharField(max_length=30)
+ model_hash = CharField(max_length=32)
+ detector_type = CharField(max_length=32)
+ model_type = CharField(max_length=32)
+ data = JSONField() # ex: tracked object box, region, etc.
+
+
+class Timeline(Model):
+ timestamp = DateTimeField()
+ camera = CharField(index=True, max_length=20)
+ source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
+ source_id = CharField(index=True, max_length=30)
+ class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
+ data = JSONField() # ex: tracked object id, region, box, etc.
+
+
+class Regions(Model):
+ camera = CharField(null=False, primary_key=True, max_length=20)
+ grid = JSONField() # json blob of grid
+ last_update = DateTimeField()
+
+
+class Recordings(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ camera = CharField(index=True, max_length=20)
+ path = CharField(unique=True)
+ variant = CharField(default="main", index=True, max_length=20)
+ start_time = DateTimeField()
+ end_time = DateTimeField()
+ duration = FloatField()
+ motion = IntegerField(null=True)
+ objects = IntegerField(null=True)
+ dBFS = IntegerField(null=True)
+ segment_size = FloatField(default=0) # this should be stored as MB
+ codec_name = CharField(null=True, max_length=32)
+ width = IntegerField(null=True)
+ height = IntegerField(null=True)
+ bitrate = IntegerField(null=True)
+ regions = IntegerField(null=True)
+ motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
+
+
+class ExportCase(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ name = CharField(index=True, max_length=100)
+ description = TextField(null=True)
+ created_at = DateTimeField()
+ updated_at = DateTimeField()
+
+
+class Export(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ camera = CharField(index=True, max_length=20)
+ name = CharField(index=True, max_length=100)
+ date = DateTimeField()
+ video_path = CharField(unique=True)
+ thumb_path = CharField(unique=True)
+ in_progress = BooleanField()
+ export_case = ForeignKeyField(
+ ExportCase,
+ null=True,
+ backref="exports",
+ column_name="export_case_id",
+ )
+
+
+class ReviewSegment(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ camera = CharField(index=True, max_length=20)
+ start_time = DateTimeField()
+ end_time = DateTimeField()
+ severity = CharField(max_length=30) # alert, detection
+ thumb_path = CharField(unique=True)
+ data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
+
+
+class UserReviewStatus(Model):
+ user_id = CharField(max_length=30)
+ review_segment = ForeignKeyField(ReviewSegment, backref="user_reviews")
+ has_been_reviewed = BooleanField(default=False)
+
+ class Meta:
+ indexes = ((("user_id", "review_segment"), True),)
+
+
+class Previews(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ camera = CharField(index=True, max_length=20)
+ path = CharField(unique=True)
+ start_time = DateTimeField()
+ end_time = DateTimeField()
+ duration = FloatField()
+
+
+# Used for temporary table in record/cleanup.py
+class RecordingsToDelete(Model):
+ id = CharField(null=False, primary_key=False, max_length=30)
+
+ class Meta:
+ temporary = True
+
+
+class User(Model):
+ username = CharField(null=False, primary_key=True, max_length=30)
+ role = CharField(
+ max_length=20,
+ default="admin",
+ )
+ password_hash = CharField(null=False, max_length=120)
+ password_changed_at = DateTimeField(null=True)
+ notification_tokens = JSONField()
+
+ @classmethod
+ def get_allowed_cameras(
+ cls, role: str, roles_dict: dict[str, list[str]], all_camera_names: set[str]
+ ) -> list[str]:
+ if role not in roles_dict:
+ return [] # Invalid role grants no access
+ allowed = roles_dict[role]
+ if not allowed: # Empty list means all cameras
+ return list(all_camera_names)
+
+ return [cam for cam in allowed if cam in all_camera_names]
+
+
+class Trigger(Model):
+ camera = CharField(max_length=20)
+ name = CharField()
+ type = CharField(max_length=10)
+ data = TextField()
+ threshold = FloatField()
+ model = CharField(max_length=30)
+ embedding = BlobField()
+ triggering_event_id = CharField(max_length=30)
+ last_triggered = DateTimeField()
+
+ class Meta:
+ primary_key = CompositeKey("camera", "name")
diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py
index 68040476a..463d815f3 100644
--- a/frigate/record/maintainer.py
+++ b/frigate/record/maintainer.py
@@ -1,754 +1,788 @@
-"""Maintain recording segments in cache."""
-
-import asyncio
-import datetime
-import logging
-import os
-import random
-import string
-import threading
-import time
-from collections import defaultdict
-from multiprocessing.synchronize import Event as MpEvent
-from pathlib import Path
-from typing import Any, Optional, Tuple
-
-import numpy as np
-import psutil
-
-from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
-from frigate.comms.inter_process import InterProcessRequestor
-from frigate.comms.recordings_updater import (
- RecordingsDataPublisher,
- RecordingsDataTypeEnum,
-)
-from frigate.config import FrigateConfig, RetainModeEnum
-from frigate.config.camera.updater import (
- CameraConfigUpdateEnum,
- CameraConfigUpdateSubscriber,
-)
-from frigate.const import (
- CACHE_DIR,
- CACHE_SEGMENT_FORMAT,
- FAST_QUEUE_TIMEOUT,
- INSERT_MANY_RECORDINGS,
- MAX_SEGMENT_DURATION,
- MAX_SEGMENTS_IN_CACHE,
- RECORD_DIR,
-)
-from frigate.models import Recordings, ReviewSegment
-from frigate.review.types import SeverityEnum
-from frigate.util.services import get_video_properties
-
-logger = logging.getLogger(__name__)
-
-
-class SegmentInfo:
- def __init__(
- self,
- motion_count: int,
- active_object_count: int,
- region_count: int,
- average_dBFS: int,
- motion_heatmap: dict[str, int] | None = None,
- ) -> None:
- self.motion_count = motion_count
- self.active_object_count = active_object_count
- self.region_count = region_count
- self.average_dBFS = average_dBFS
- self.motion_heatmap = motion_heatmap
-
- def should_discard_segment(self, retain_mode: RetainModeEnum) -> bool:
- keep = False
-
- # all mode should never discard
- if retain_mode == RetainModeEnum.all:
- keep = True
-
- # motion mode should keep if motion or audio is detected
- if (
- not keep
- and retain_mode == RetainModeEnum.motion
- and (self.motion_count > 0 or self.average_dBFS != 0)
- ):
- keep = True
-
- # active objects mode should keep if any active objects are detected
- if not keep and self.active_object_count > 0:
- keep = True
-
- return not keep
-
-
-class RecordingMaintainer(threading.Thread):
- def __init__(self, config: FrigateConfig, stop_event: MpEvent):
- super().__init__(name="recording_maintainer")
- self.config = config
-
- # create communication for retained recordings
- self.requestor = InterProcessRequestor()
- self.config_subscriber = CameraConfigUpdateSubscriber(
- self.config,
- self.config.cameras,
- [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.record],
- )
- self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all.value)
- self.recordings_publisher = RecordingsDataPublisher()
-
- self.stop_event = stop_event
- self.object_recordings_info: dict[str, list] = defaultdict(list)
- self.audio_recordings_info: dict[str, list] = defaultdict(list)
- self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
- self.unexpected_cache_files_logged: bool = False
-
- async def move_files(self) -> None:
- cache_files = [
- d
- for d in os.listdir(CACHE_DIR)
- if os.path.isfile(os.path.join(CACHE_DIR, d))
- and d.endswith(".mp4")
- and not d.startswith("preview_")
- ]
-
- # publish newest cached segment per camera (including in use files)
- newest_cache_segments: dict[str, dict[str, Any]] = {}
- for cache in cache_files:
- cache_path = os.path.join(CACHE_DIR, cache)
- basename = os.path.splitext(cache)[0]
- try:
- camera, date = basename.rsplit("@", maxsplit=1)
- except ValueError:
- if not self.unexpected_cache_files_logged:
- logger.warning("Skipping unexpected files in cache")
- self.unexpected_cache_files_logged = True
- continue
-
- start_time = datetime.datetime.strptime(
- date, CACHE_SEGMENT_FORMAT
- ).astimezone(datetime.timezone.utc)
- if (
- camera not in newest_cache_segments
- or start_time > newest_cache_segments[camera]["start_time"]
- ):
- newest_cache_segments[camera] = {
- "start_time": start_time,
- "cache_path": cache_path,
- }
-
- for camera, newest in newest_cache_segments.items():
- self.recordings_publisher.publish(
- (
- camera,
- newest["start_time"].timestamp(),
- newest["cache_path"],
- ),
- RecordingsDataTypeEnum.latest.value,
- )
- # publish None for cameras with no cache files (but only if we know the camera exists)
- for camera_name in self.config.cameras:
- if camera_name not in newest_cache_segments:
- self.recordings_publisher.publish(
- (camera_name, None, None),
- RecordingsDataTypeEnum.latest.value,
- )
-
- files_in_use = []
- for process in psutil.process_iter():
- try:
- if process.name() != "ffmpeg":
- continue
- file_list = process.open_files()
- if file_list:
- for nt in file_list:
- if nt.path.startswith(CACHE_DIR):
- files_in_use.append(nt.path.split("/")[-1])
- except psutil.Error:
- continue
-
- # group recordings by camera (skip in-use for validation/moving)
- grouped_recordings: defaultdict[str, list[dict[str, Any]]] = defaultdict(list)
- for cache in cache_files:
- # Skip files currently in use
- if cache in files_in_use:
- continue
-
- cache_path = os.path.join(CACHE_DIR, cache)
- basename = os.path.splitext(cache)[0]
- try:
- camera, date = basename.rsplit("@", maxsplit=1)
- except ValueError:
- if not self.unexpected_cache_files_logged:
- logger.warning("Skipping unexpected files in cache")
- self.unexpected_cache_files_logged = True
- continue
-
- # important that start_time is utc because recordings are stored and compared in utc
- start_time = datetime.datetime.strptime(
- date, CACHE_SEGMENT_FORMAT
- ).astimezone(datetime.timezone.utc)
-
- grouped_recordings[camera].append(
- {
- "cache_path": cache_path,
- "start_time": start_time,
- }
- )
-
- # delete all cached files past the most recent MAX_SEGMENTS_IN_CACHE
- keep_count = MAX_SEGMENTS_IN_CACHE
- for camera in grouped_recordings.keys():
- # sort based on start time
- grouped_recordings[camera] = sorted(
- grouped_recordings[camera], key=lambda s: s["start_time"]
- )
-
- camera_info = self.object_recordings_info[camera]
- most_recently_processed_frame_time = (
- camera_info[-1][0] if len(camera_info) > 0 else 0
- )
-
- processed_segment_count = len(
- list(
- filter(
- lambda r: (
- r["start_time"].timestamp()
- < most_recently_processed_frame_time
- ),
- grouped_recordings[camera],
- )
- )
- )
-
- # see if the recording mover is too slow and segments need to be deleted
- if processed_segment_count > keep_count:
- logger.warning(
- f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {processed_segment_count} and discarding the rest..."
- )
- to_remove = grouped_recordings[camera][:-keep_count]
- for rec in to_remove:
- cache_path = rec["cache_path"]
- Path(cache_path).unlink(missing_ok=True)
- self.end_time_cache.pop(cache_path, None)
- grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
-
- # see if detection has failed and unprocessed segments need to be deleted
- unprocessed_segment_count = (
- len(grouped_recordings[camera]) - processed_segment_count
- )
- if unprocessed_segment_count > keep_count:
- logger.warning(
- f"Too many unprocessed recording segments in cache for {camera}. This likely indicates an issue with the detect stream, keeping the {keep_count} most recent segments out of {unprocessed_segment_count} and discarding the rest..."
- )
- to_remove = grouped_recordings[camera][:-keep_count]
- for rec in to_remove:
- cache_path = rec["cache_path"]
- Path(cache_path).unlink(missing_ok=True)
- self.end_time_cache.pop(cache_path, None)
- grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
-
- tasks = []
- for camera, recordings in grouped_recordings.items():
- # clear out all the object recording info for old frames
- while (
- len(self.object_recordings_info[camera]) > 0
- and self.object_recordings_info[camera][0][0]
- < recordings[0]["start_time"].timestamp()
- ):
- self.object_recordings_info[camera].pop(0)
-
- # clear out all the audio recording info for old frames
- while (
- len(self.audio_recordings_info[camera]) > 0
- and self.audio_recordings_info[camera][0][0]
- < recordings[0]["start_time"].timestamp()
- ):
- self.audio_recordings_info[camera].pop(0)
-
- # get all reviews with the end time after the start of the oldest cache file
- # or with end_time None
- reviews: ReviewSegment = (
- ReviewSegment.select(
- ReviewSegment.start_time,
- ReviewSegment.end_time,
- ReviewSegment.severity,
- ReviewSegment.data,
- )
- .where(
- ReviewSegment.camera == camera,
- (ReviewSegment.end_time == None)
- | (
- ReviewSegment.end_time
- >= recordings[0]["start_time"].timestamp()
- ),
- )
- .order_by(ReviewSegment.start_time)
- )
-
- tasks.extend(
- [self.validate_and_move_segment(camera, reviews, r) for r in recordings]
- )
-
- # publish most recently available recording time and None if disabled
- camera_cfg = self.config.cameras.get(camera)
- self.recordings_publisher.publish(
- (
- camera,
- recordings[0]["start_time"].timestamp()
- if camera_cfg and camera_cfg.record.enabled
- else None,
- None,
- ),
- RecordingsDataTypeEnum.saved.value,
- )
-
- recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
-
- # fire and forget recordings entries
- self.requestor.send_data(
- INSERT_MANY_RECORDINGS,
- [r for r in recordings_to_insert if r is not None],
- )
-
- def drop_segment(self, cache_path: str) -> None:
- Path(cache_path).unlink(missing_ok=True)
- self.end_time_cache.pop(cache_path, None)
-
- async def validate_and_move_segment(
- self, camera: str, reviews: list[ReviewSegment], recording: dict[str, Any]
- ) -> Optional[Recordings]:
- cache_path: str = recording["cache_path"]
- start_time: datetime.datetime = recording["start_time"]
-
- # Just delete files if camera removed or recordings are turned off
- if (
- camera not in self.config.cameras
- or not self.config.cameras[camera].record.enabled
- ):
- self.drop_segment(cache_path)
- return None
-
- if cache_path in self.end_time_cache:
- end_time, duration = self.end_time_cache[cache_path]
- else:
- segment_info = await get_video_properties(
- self.config.ffmpeg, cache_path, get_duration=True
- )
-
- if not segment_info.get("has_valid_video", False):
- logger.warning(
- f"Invalid or missing video stream in segment {cache_path}. Discarding."
- )
- self.recordings_publisher.publish(
- (camera, start_time.timestamp(), cache_path),
- RecordingsDataTypeEnum.invalid.value,
- )
- self.drop_segment(cache_path)
- return None
-
- duration = float(segment_info.get("duration", -1))
-
- # ensure duration is within expected length
- if 0 < duration < MAX_SEGMENT_DURATION:
- end_time = start_time + datetime.timedelta(seconds=duration)
- self.end_time_cache[cache_path] = (end_time, duration)
- else:
- if duration == -1:
- logger.warning(f"Failed to probe corrupt segment {cache_path}")
-
- logger.warning(f"Discarding a corrupt recording segment: {cache_path}")
- self.recordings_publisher.publish(
- (camera, start_time.timestamp(), cache_path),
- RecordingsDataTypeEnum.invalid.value,
- )
- self.drop_segment(cache_path)
- return None
-
- # this segment has a valid duration and has video data, so publish an update
- self.recordings_publisher.publish(
- (camera, start_time.timestamp(), cache_path),
- RecordingsDataTypeEnum.valid.value,
- )
-
- record_config = self.config.cameras[camera].record
- highest = None
-
- if record_config.continuous.days > 0:
- highest = "continuous"
- elif record_config.motion.days > 0:
- highest = "motion"
-
- # if we have continuous or motion recording enabled
- # we should first just check if this segment matches that
- # and avoid any DB calls
- if highest is not None:
- # assume that empty means the relevant recording info has not been received yet
- camera_info = self.object_recordings_info[camera]
- most_recently_processed_frame_time = (
- camera_info[-1][0] if len(camera_info) > 0 else 0
- )
-
- # ensure delayed segment info does not lead to lost segments
- if (
- datetime.datetime.fromtimestamp(
- most_recently_processed_frame_time
- ).astimezone(datetime.timezone.utc)
- >= end_time
- ):
- record_mode = (
- RetainModeEnum.all
- if highest == "continuous"
- else RetainModeEnum.motion
- )
- return await self.move_segment(
- camera, start_time, end_time, duration, cache_path, record_mode
- )
-
- # we fell through the continuous / motion check, so we need to check the review items
- # if the cached segment overlaps with the review items:
- overlaps = False
- for review in reviews:
- severity = SeverityEnum[review.severity]
-
- # if the review item starts in the future, stop checking review items
- # and remove this segment
- if (
- review.start_time - record_config.get_review_pre_capture(severity)
- ) > end_time.timestamp():
- overlaps = False
- break
-
- # if the review item is in progress or ends after the recording starts, keep it
- # and stop looking at review items
- if (
- review.end_time is None
- or (review.end_time + record_config.get_review_post_capture(severity))
- >= start_time.timestamp()
- ):
- overlaps = True
- break
-
- if overlaps:
- record_mode = (
- record_config.alerts.retain.mode
- if review.severity == "alert"
- else record_config.detections.retain.mode
- )
- # move from cache to recordings immediately
- return await self.move_segment(
- camera,
- start_time,
- end_time,
- duration,
- cache_path,
- record_mode,
- )
- # if it doesn't overlap with an review item, go ahead and drop the segment
- # if it ends more than the configured pre_capture for the camera
- # BUT only if continuous/motion is NOT enabled (otherwise wait for processing)
- elif highest is None:
- camera_info = self.object_recordings_info[camera]
- most_recently_processed_frame_time = (
- camera_info[-1][0] if len(camera_info) > 0 else 0
- )
- retain_cutoff = datetime.datetime.fromtimestamp(
- most_recently_processed_frame_time - record_config.event_pre_capture
- ).astimezone(datetime.timezone.utc)
- if end_time < retain_cutoff:
- self.drop_segment(cache_path)
-
- def _compute_motion_heatmap(
- self, camera: str, motion_boxes: list[tuple[int, int, int, int]]
- ) -> dict[str, int] | None:
- """Compute a 16x16 motion intensity heatmap from motion boxes.
-
- Returns a sparse dict mapping cell index (as string) to intensity (1-255).
- Only cells with motion are included.
-
- Args:
- camera: Camera name to get detect dimensions from.
- motion_boxes: List of (x1, y1, x2, y2) pixel coordinates.
-
- Returns:
- Sparse dict like {"45": 3, "46": 5}, or None if no boxes.
- """
- if not motion_boxes:
- return None
-
- camera_config = self.config.cameras.get(camera)
- if not camera_config:
- return None
-
- frame_width = camera_config.detect.width
- frame_height = camera_config.detect.height
-
- if frame_width <= 0 or frame_height <= 0:
- return None
-
- GRID_SIZE = 16
- counts: dict[int, int] = {}
-
- for box in motion_boxes:
- if len(box) < 4:
- continue
- x1, y1, x2, y2 = box
-
- # Convert pixel coordinates to grid cells
- grid_x1 = max(0, int((x1 / frame_width) * GRID_SIZE))
- grid_y1 = max(0, int((y1 / frame_height) * GRID_SIZE))
- grid_x2 = min(GRID_SIZE - 1, int((x2 / frame_width) * GRID_SIZE))
- grid_y2 = min(GRID_SIZE - 1, int((y2 / frame_height) * GRID_SIZE))
-
- for y in range(grid_y1, grid_y2 + 1):
- for x in range(grid_x1, grid_x2 + 1):
- idx = y * GRID_SIZE + x
- counts[idx] = min(255, counts.get(idx, 0) + 1)
-
- if not counts:
- return None
-
- # Convert to string keys for JSON storage
- return {str(k): v for k, v in counts.items()}
-
- def segment_stats(
- self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime
- ) -> SegmentInfo:
- video_frame_count = 0
- active_count = 0
- region_count = 0
- motion_count = 0
- all_motion_boxes: list[tuple[int, int, int, int]] = []
-
- for frame in self.object_recordings_info[camera]:
- # frame is after end time of segment
- if frame[0] > end_time.timestamp():
- break
- # frame is before start time of segment
- if frame[0] < start_time.timestamp():
- continue
-
- video_frame_count += 1
- active_count += len(
- [
- o
- for o in frame[1]
- if not o["false_positive"] and o["motionless_count"] == 0
- ]
- )
- motion_count += len(frame[2])
- region_count += len(frame[3])
- # Collect motion boxes for heatmap computation
- all_motion_boxes.extend(frame[2])
-
- audio_values = []
- for frame in self.audio_recordings_info[camera]:
- # frame is after end time of segment
- if frame[0] > end_time.timestamp():
- break
-
- # frame is before start time of segment
- if frame[0] < start_time.timestamp():
- continue
-
- # add active audio label count to count of active objects
- active_count += len(frame[2])
-
- # add sound level to audio values
- audio_values.append(frame[1])
-
- average_dBFS = 0 if not audio_values else np.average(audio_values)
-
- motion_heatmap = self._compute_motion_heatmap(camera, all_motion_boxes)
-
- return SegmentInfo(
- motion_count,
- active_count,
- region_count,
- round(average_dBFS),
- motion_heatmap,
- )
-
- async def move_segment(
- self,
- camera: str,
- start_time: datetime.datetime,
- end_time: datetime.datetime,
- duration: float,
- cache_path: str,
- store_mode: RetainModeEnum,
- ) -> Optional[Recordings]:
- segment_info = self.segment_stats(camera, start_time, end_time)
-
- # check if the segment shouldn't be stored
- if segment_info.should_discard_segment(store_mode):
- self.drop_segment(cache_path)
- return
-
- # directory will be in utc due to start_time being in utc
- directory = os.path.join(
- RECORD_DIR,
- start_time.strftime("%Y-%m-%d/%H"),
- camera,
- )
-
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- # file will be in utc due to start_time being in utc
- file_name = f"{start_time.strftime('%M.%S.mp4')}"
- file_path = os.path.join(directory, file_name)
-
- try:
- if not os.path.exists(file_path):
- start_frame = datetime.datetime.now().timestamp()
-
- # add faststart to kept segments to improve metadata reading
- p = await asyncio.create_subprocess_exec(
- self.config.ffmpeg.ffmpeg_path,
- "-hide_banner",
- "-y",
- "-i",
- cache_path,
- "-c",
- "copy",
- "-movflags",
- "+faststart",
- file_path,
- stderr=asyncio.subprocess.PIPE,
- stdout=asyncio.subprocess.DEVNULL,
- )
- await p.wait()
-
- if p.returncode != 0:
- logger.error(f"Unable to convert {cache_path} to {file_path}")
- logger.error((await p.stderr.read()).decode("ascii"))
- return None
- else:
- logger.debug(
- f"Copied {file_path} in {datetime.datetime.now().timestamp() - start_frame} seconds."
- )
-
- try:
- # get the segment size of the cache file
- # file without faststart is same size
- segment_size = round(
- float(os.path.getsize(cache_path)) / pow(2, 20), 2
- )
- except OSError:
- segment_size = 0
-
- os.remove(cache_path)
-
- rand_id = "".join(
- random.choices(string.ascii_lowercase + string.digits, k=6)
- )
-
- return {
- Recordings.id.name: f"{start_time.timestamp()}-{rand_id}",
- Recordings.camera.name: camera,
- Recordings.path.name: file_path,
- Recordings.start_time.name: start_time.timestamp(),
- Recordings.end_time.name: end_time.timestamp(),
- Recordings.duration.name: duration,
- Recordings.motion.name: segment_info.motion_count,
- # TODO: update this to store list of active objects at some point
- Recordings.objects.name: segment_info.active_object_count,
- Recordings.regions.name: segment_info.region_count,
- Recordings.dBFS.name: segment_info.average_dBFS,
- Recordings.segment_size.name: segment_size,
- Recordings.motion_heatmap.name: segment_info.motion_heatmap,
- }
- except Exception as e:
- logger.error(f"Unable to store recording segment {cache_path}")
- Path(cache_path).unlink(missing_ok=True)
- logger.error(e)
-
- # clear end_time cache
- self.end_time_cache.pop(cache_path, None)
- return None
-
- def run(self) -> None:
- # Check for new files every 5 seconds
- wait_time = 0.0
- while not self.stop_event.is_set():
- time.sleep(wait_time)
-
- if self.stop_event.is_set():
- break
-
- run_start = datetime.datetime.now().timestamp()
-
- # check if there is an updated config
- self.config_subscriber.check_for_updates()
-
- stale_frame_count = 0
- stale_frame_count_threshold = 10
- # empty the object recordings info queue
- while True:
- (topic, data) = self.detection_subscriber.check_for_update(
- timeout=FAST_QUEUE_TIMEOUT
- )
-
- if not topic:
- break
-
- if topic == DetectionTypeEnum.video.value:
- (
- camera,
- _,
- frame_time,
- current_tracked_objects,
- motion_boxes,
- regions,
- ) = data
-
- if self.config.cameras[camera].record.enabled:
- self.object_recordings_info[camera].append(
- (
- frame_time,
- current_tracked_objects,
- motion_boxes,
- regions,
- )
- )
- elif topic == DetectionTypeEnum.audio.value:
- (
- camera,
- frame_time,
- dBFS,
- audio_detections,
- ) = data
-
- if self.config.cameras[camera].record.enabled:
- self.audio_recordings_info[camera].append(
- (
- frame_time,
- dBFS,
- audio_detections,
- )
- )
- elif (
- topic == DetectionTypeEnum.api.value or DetectionTypeEnum.lpr.value
- ):
- continue
-
- if frame_time < run_start - stale_frame_count_threshold:
- stale_frame_count += 1
-
- if stale_frame_count > 0:
- logger.debug(f"Found {stale_frame_count} old frames.")
-
- try:
- asyncio.run(self.move_files())
- except Exception as e:
- logger.error(
- "Error occurred when attempting to maintain recording cache"
- )
- logger.error(e)
- duration = datetime.datetime.now().timestamp() - run_start
- wait_time = max(0, 5 - duration)
-
- self.requestor.stop()
- self.config_subscriber.stop()
- self.detection_subscriber.stop()
- self.recordings_publisher.stop()
- logger.info("Exiting recording maintenance...")
+"""Maintain recording segments in cache."""
+
+import asyncio
+import datetime
+import logging
+import os
+import random
+import string
+import threading
+import time
+from collections import defaultdict
+from multiprocessing.synchronize import Event as MpEvent
+from pathlib import Path
+from typing import Any, Optional, Tuple
+
+import numpy as np
+import psutil
+
+from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
+from frigate.comms.inter_process import InterProcessRequestor
+from frigate.comms.recordings_updater import (
+ RecordingsDataPublisher,
+ RecordingsDataTypeEnum,
+)
+from frigate.config import FrigateConfig, RetainModeEnum
+from frigate.config.camera.updater import (
+ CameraConfigUpdateEnum,
+ CameraConfigUpdateSubscriber,
+)
+from frigate.const import (
+ CACHE_DIR,
+ CACHE_SEGMENT_FORMAT,
+ FAST_QUEUE_TIMEOUT,
+ INSERT_MANY_RECORDINGS,
+ MAX_SEGMENT_DURATION,
+ MAX_SEGMENTS_IN_CACHE,
+ RECORD_DIR,
+)
+from frigate.models import Recordings, ReviewSegment
+from frigate.review.types import SeverityEnum
+from frigate.util.services import get_video_properties
+
+logger = logging.getLogger(__name__)
+
+
+class SegmentInfo:
+ def __init__(
+ self,
+ motion_count: int,
+ active_object_count: int,
+ region_count: int,
+ average_dBFS: int,
+ motion_heatmap: dict[str, int] | None = None,
+ ) -> None:
+ self.motion_count = motion_count
+ self.active_object_count = active_object_count
+ self.region_count = region_count
+ self.average_dBFS = average_dBFS
+ self.motion_heatmap = motion_heatmap
+
+ def should_discard_segment(self, retain_mode: RetainModeEnum) -> bool:
+ keep = False
+
+ # all mode should never discard
+ if retain_mode == RetainModeEnum.all:
+ keep = True
+
+ # motion mode should keep if motion or audio is detected
+ if (
+ not keep
+ and retain_mode == RetainModeEnum.motion
+ and (self.motion_count > 0 or self.average_dBFS != 0)
+ ):
+ keep = True
+
+ # active objects mode should keep if any active objects are detected
+ if not keep and self.active_object_count > 0:
+ keep = True
+
+ return not keep
+
+
+class RecordingMaintainer(threading.Thread):
+ def __init__(self, config: FrigateConfig, stop_event: MpEvent):
+ super().__init__(name="recording_maintainer")
+ self.config = config
+
+ # create communication for retained recordings
+ self.requestor = InterProcessRequestor()
+ self.config_subscriber = CameraConfigUpdateSubscriber(
+ self.config,
+ self.config.cameras,
+ [CameraConfigUpdateEnum.add, CameraConfigUpdateEnum.record],
+ )
+ self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all.value)
+ self.recordings_publisher = RecordingsDataPublisher()
+
+ self.stop_event = stop_event
+ self.object_recordings_info: dict[str, list] = defaultdict(list)
+ self.audio_recordings_info: dict[str, list] = defaultdict(list)
+ self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
+ self.unexpected_cache_files_logged: bool = False
+
+ def _parse_cache_segment(self, cache_name: str) -> Optional[dict[str, Any]]:
+ basename = os.path.splitext(cache_name)[0]
+ parts = basename.rsplit("@", maxsplit=2)
+
+ if len(parts) == 2:
+ camera, date = parts
+ variant = "main"
+ elif len(parts) == 3:
+ camera, variant, date = parts
+ else:
+ return None
+
+ start_time = datetime.datetime.strptime(
+ date, CACHE_SEGMENT_FORMAT
+ ).astimezone(datetime.timezone.utc)
+
+ return {
+ "camera": camera,
+ "variant": variant,
+ "start_time": start_time,
+ "cache_path": os.path.join(CACHE_DIR, cache_name),
+ }
+
+ async def move_files(self) -> None:
+ cache_files = [
+ d
+ for d in os.listdir(CACHE_DIR)
+ if os.path.isfile(os.path.join(CACHE_DIR, d))
+ and d.endswith(".mp4")
+ and not d.startswith("preview_")
+ ]
+
+ # publish newest cached segment per camera (including in use files)
+ newest_cache_segments: dict[str, dict[str, Any]] = {}
+ for cache in cache_files:
+ parsed = self._parse_cache_segment(cache)
+ if parsed is None:
+ if not self.unexpected_cache_files_logged:
+ logger.warning("Skipping unexpected files in cache")
+ self.unexpected_cache_files_logged = True
+ continue
+
+ camera = parsed["camera"]
+ start_time = parsed["start_time"]
+ if (
+ camera not in newest_cache_segments
+ or start_time > newest_cache_segments[camera]["start_time"]
+ ):
+ newest_cache_segments[camera] = {
+ "start_time": start_time,
+ "cache_path": parsed["cache_path"],
+ }
+
+ for camera, newest in newest_cache_segments.items():
+ self.recordings_publisher.publish(
+ (
+ camera,
+ newest["start_time"].timestamp(),
+ newest["cache_path"],
+ ),
+ RecordingsDataTypeEnum.latest.value,
+ )
+ # publish None for cameras with no cache files (but only if we know the camera exists)
+ for camera_name in self.config.cameras:
+ if camera_name not in newest_cache_segments:
+ self.recordings_publisher.publish(
+ (camera_name, None, None),
+ RecordingsDataTypeEnum.latest.value,
+ )
+
+ files_in_use = []
+ for process in psutil.process_iter():
+ try:
+ if process.name() != "ffmpeg":
+ continue
+ file_list = process.open_files()
+ if file_list:
+ for nt in file_list:
+ if nt.path.startswith(CACHE_DIR):
+ files_in_use.append(nt.path.split("/")[-1])
+ except psutil.Error:
+ continue
+
+ # group recordings by camera (skip in-use for validation/moving)
+ grouped_recordings: defaultdict[str, list[dict[str, Any]]] = defaultdict(list)
+ for cache in cache_files:
+ # Skip files currently in use
+ if cache in files_in_use:
+ continue
+
+ parsed = self._parse_cache_segment(cache)
+ if parsed is None:
+ if not self.unexpected_cache_files_logged:
+ logger.warning("Skipping unexpected files in cache")
+ self.unexpected_cache_files_logged = True
+ continue
+
+ grouped_recordings[parsed["camera"]].append(parsed)
+
+ # delete all cached files past the most recent MAX_SEGMENTS_IN_CACHE
+ keep_count = MAX_SEGMENTS_IN_CACHE
+ for camera in grouped_recordings.keys():
+ # sort based on start time
+ grouped_recordings[camera] = sorted(
+ grouped_recordings[camera], key=lambda s: s["start_time"]
+ )
+
+ camera_info = self.object_recordings_info[camera]
+ most_recently_processed_frame_time = (
+ camera_info[-1][0] if len(camera_info) > 0 else 0
+ )
+
+ processed_segment_count = len(
+ list(
+ filter(
+ lambda r: (
+ r["start_time"].timestamp()
+ < most_recently_processed_frame_time
+ ),
+ grouped_recordings[camera],
+ )
+ )
+ )
+
+ # see if the recording mover is too slow and segments need to be deleted
+ if processed_segment_count > keep_count:
+ logger.warning(
+ f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {processed_segment_count} and discarding the rest..."
+ )
+ to_remove = grouped_recordings[camera][:-keep_count]
+ for rec in to_remove:
+ cache_path = rec["cache_path"]
+ Path(cache_path).unlink(missing_ok=True)
+ self.end_time_cache.pop(cache_path, None)
+ grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
+
+ # see if detection has failed and unprocessed segments need to be deleted
+ unprocessed_segment_count = (
+ len(grouped_recordings[camera]) - processed_segment_count
+ )
+ if unprocessed_segment_count > keep_count:
+ logger.warning(
+ f"Too many unprocessed recording segments in cache for {camera}. This likely indicates an issue with the detect stream, keeping the {keep_count} most recent segments out of {unprocessed_segment_count} and discarding the rest..."
+ )
+ to_remove = grouped_recordings[camera][:-keep_count]
+ for rec in to_remove:
+ cache_path = rec["cache_path"]
+ Path(cache_path).unlink(missing_ok=True)
+ self.end_time_cache.pop(cache_path, None)
+ grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
+
+ tasks = []
+ for camera, recordings in grouped_recordings.items():
+ # clear out all the object recording info for old frames
+ while (
+ len(self.object_recordings_info[camera]) > 0
+ and self.object_recordings_info[camera][0][0]
+ < recordings[0]["start_time"].timestamp()
+ ):
+ self.object_recordings_info[camera].pop(0)
+
+ # clear out all the audio recording info for old frames
+ while (
+ len(self.audio_recordings_info[camera]) > 0
+ and self.audio_recordings_info[camera][0][0]
+ < recordings[0]["start_time"].timestamp()
+ ):
+ self.audio_recordings_info[camera].pop(0)
+
+ # get all reviews with the end time after the start of the oldest cache file
+ # or with end_time None
+ reviews: ReviewSegment = (
+ ReviewSegment.select(
+ ReviewSegment.start_time,
+ ReviewSegment.end_time,
+ ReviewSegment.severity,
+ ReviewSegment.data,
+ )
+ .where(
+ ReviewSegment.camera == camera,
+ (ReviewSegment.end_time == None)
+ | (
+ ReviewSegment.end_time
+ >= recordings[0]["start_time"].timestamp()
+ ),
+ )
+ .order_by(ReviewSegment.start_time)
+ )
+
+ tasks.extend(
+ [self.validate_and_move_segment(camera, reviews, r) for r in recordings]
+ )
+
+ # publish most recently available recording time and None if disabled
+ camera_cfg = self.config.cameras.get(camera)
+ self.recordings_publisher.publish(
+ (
+ camera,
+ recordings[0]["start_time"].timestamp()
+ if camera_cfg and camera_cfg.record.enabled
+ else None,
+ None,
+ ),
+ RecordingsDataTypeEnum.saved.value,
+ )
+
+ recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
+
+ # fire and forget recordings entries
+ self.requestor.send_data(
+ INSERT_MANY_RECORDINGS,
+ [r for r in recordings_to_insert if r is not None],
+ )
+
+ def drop_segment(self, cache_path: str) -> None:
+ Path(cache_path).unlink(missing_ok=True)
+ self.end_time_cache.pop(cache_path, None)
+
+ async def validate_and_move_segment(
+ self, camera: str, reviews: list[ReviewSegment], recording: dict[str, Any]
+ ) -> Optional[Recordings]:
+ cache_path: str = recording["cache_path"]
+ start_time: datetime.datetime = recording["start_time"]
+ variant: str = recording.get("variant", "main")
+
+ # Just delete files if camera removed or recordings are turned off
+ if (
+ camera not in self.config.cameras
+ or not self.config.cameras[camera].record.enabled
+ ):
+ self.drop_segment(cache_path)
+ return None
+
+ segment_info: dict[str, Any]
+ if cache_path in self.end_time_cache:
+ end_time, duration = self.end_time_cache[cache_path]
+ segment_info = await get_video_properties(
+ self.config.ffmpeg, cache_path, get_duration=False
+ )
+ else:
+ segment_info = await get_video_properties(
+ self.config.ffmpeg, cache_path, get_duration=True
+ )
+
+ if not segment_info.get("has_valid_video", False):
+ logger.warning(
+ f"Invalid or missing video stream in segment {cache_path}. Discarding."
+ )
+ self.recordings_publisher.publish(
+ (camera, start_time.timestamp(), cache_path),
+ RecordingsDataTypeEnum.invalid.value,
+ )
+ self.drop_segment(cache_path)
+ return None
+
+ duration = float(segment_info.get("duration", -1))
+
+ # ensure duration is within expected length
+ if 0 < duration < MAX_SEGMENT_DURATION:
+ end_time = start_time + datetime.timedelta(seconds=duration)
+ self.end_time_cache[cache_path] = (end_time, duration)
+ else:
+ if duration == -1:
+ logger.warning(f"Failed to probe corrupt segment {cache_path}")
+
+ logger.warning(f"Discarding a corrupt recording segment: {cache_path}")
+ self.recordings_publisher.publish(
+ (camera, start_time.timestamp(), cache_path),
+ RecordingsDataTypeEnum.invalid.value,
+ )
+ self.drop_segment(cache_path)
+ return None
+
+ # this segment has a valid duration and has video data, so publish an update
+ self.recordings_publisher.publish(
+ (camera, start_time.timestamp(), cache_path),
+ RecordingsDataTypeEnum.valid.value,
+ )
+
+ record_config = self.config.cameras[camera].record
+ highest = None
+
+ if record_config.continuous.days > 0:
+ highest = "continuous"
+ elif record_config.motion.days > 0:
+ highest = "motion"
+
+ # if we have continuous or motion recording enabled
+ # we should first just check if this segment matches that
+ # and avoid any DB calls
+ if highest is not None:
+ # assume that empty means the relevant recording info has not been received yet
+ camera_info = self.object_recordings_info[camera]
+ most_recently_processed_frame_time = (
+ camera_info[-1][0] if len(camera_info) > 0 else 0
+ )
+
+ # ensure delayed segment info does not lead to lost segments
+ if (
+ datetime.datetime.fromtimestamp(
+ most_recently_processed_frame_time
+ ).astimezone(datetime.timezone.utc)
+ >= end_time
+ ):
+ record_mode = (
+ RetainModeEnum.all
+ if highest == "continuous"
+ else RetainModeEnum.motion
+ )
+ return await self.move_segment(
+ camera,
+ variant,
+ start_time,
+ end_time,
+ duration,
+ cache_path,
+ record_mode,
+ segment_info,
+ )
+
+ # we fell through the continuous / motion check, so we need to check the review items
+ # if the cached segment overlaps with the review items:
+ overlaps = False
+ for review in reviews:
+ severity = SeverityEnum[review.severity]
+
+ # if the review item starts in the future, stop checking review items
+ # and remove this segment
+ if (
+ review.start_time - record_config.get_review_pre_capture(severity)
+ ) > end_time.timestamp():
+ overlaps = False
+ break
+
+ # if the review item is in progress or ends after the recording starts, keep it
+ # and stop looking at review items
+ if (
+ review.end_time is None
+ or (review.end_time + record_config.get_review_post_capture(severity))
+ >= start_time.timestamp()
+ ):
+ overlaps = True
+ break
+
+ if overlaps:
+ record_mode = (
+ record_config.alerts.retain.mode
+ if review.severity == "alert"
+ else record_config.detections.retain.mode
+ )
+ # move from cache to recordings immediately
+ return await self.move_segment(
+ camera,
+ variant,
+ start_time,
+ end_time,
+ duration,
+ cache_path,
+ record_mode,
+ segment_info,
+ )
+ # if it doesn't overlap with an review item, go ahead and drop the segment
+ # if it ends more than the configured pre_capture for the camera
+ # BUT only if continuous/motion is NOT enabled (otherwise wait for processing)
+ elif highest is None:
+ camera_info = self.object_recordings_info[camera]
+ most_recently_processed_frame_time = (
+ camera_info[-1][0] if len(camera_info) > 0 else 0
+ )
+ retain_cutoff = datetime.datetime.fromtimestamp(
+ most_recently_processed_frame_time - record_config.event_pre_capture
+ ).astimezone(datetime.timezone.utc)
+ if end_time < retain_cutoff:
+ self.drop_segment(cache_path)
+
+ def _compute_motion_heatmap(
+ self, camera: str, motion_boxes: list[tuple[int, int, int, int]]
+ ) -> dict[str, int] | None:
+ """Compute a 16x16 motion intensity heatmap from motion boxes.
+
+ Returns a sparse dict mapping cell index (as string) to intensity (1-255).
+ Only cells with motion are included.
+
+ Args:
+ camera: Camera name to get detect dimensions from.
+ motion_boxes: List of (x1, y1, x2, y2) pixel coordinates.
+
+ Returns:
+ Sparse dict like {"45": 3, "46": 5}, or None if no boxes.
+ """
+ if not motion_boxes:
+ return None
+
+ camera_config = self.config.cameras.get(camera)
+ if not camera_config:
+ return None
+
+ frame_width = camera_config.detect.width
+ frame_height = camera_config.detect.height
+
+ if frame_width <= 0 or frame_height <= 0:
+ return None
+
+ GRID_SIZE = 16
+ counts: dict[int, int] = {}
+
+ for box in motion_boxes:
+ if len(box) < 4:
+ continue
+ x1, y1, x2, y2 = box
+
+ # Convert pixel coordinates to grid cells
+ grid_x1 = max(0, int((x1 / frame_width) * GRID_SIZE))
+ grid_y1 = max(0, int((y1 / frame_height) * GRID_SIZE))
+ grid_x2 = min(GRID_SIZE - 1, int((x2 / frame_width) * GRID_SIZE))
+ grid_y2 = min(GRID_SIZE - 1, int((y2 / frame_height) * GRID_SIZE))
+
+ for y in range(grid_y1, grid_y2 + 1):
+ for x in range(grid_x1, grid_x2 + 1):
+ idx = y * GRID_SIZE + x
+ counts[idx] = min(255, counts.get(idx, 0) + 1)
+
+ if not counts:
+ return None
+
+ # Convert to string keys for JSON storage
+ return {str(k): v for k, v in counts.items()}
+
+ def segment_stats(
+ self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime
+ ) -> SegmentInfo:
+ video_frame_count = 0
+ active_count = 0
+ region_count = 0
+ motion_count = 0
+ all_motion_boxes: list[tuple[int, int, int, int]] = []
+
+ for frame in self.object_recordings_info[camera]:
+ # frame is after end time of segment
+ if frame[0] > end_time.timestamp():
+ break
+ # frame is before start time of segment
+ if frame[0] < start_time.timestamp():
+ continue
+
+ video_frame_count += 1
+ active_count += len(
+ [
+ o
+ for o in frame[1]
+ if not o["false_positive"] and o["motionless_count"] == 0
+ ]
+ )
+ motion_count += len(frame[2])
+ region_count += len(frame[3])
+ # Collect motion boxes for heatmap computation
+ all_motion_boxes.extend(frame[2])
+
+ audio_values = []
+ for frame in self.audio_recordings_info[camera]:
+ # frame is after end time of segment
+ if frame[0] > end_time.timestamp():
+ break
+
+ # frame is before start time of segment
+ if frame[0] < start_time.timestamp():
+ continue
+
+ # add active audio label count to count of active objects
+ active_count += len(frame[2])
+
+ # add sound level to audio values
+ audio_values.append(frame[1])
+
+ average_dBFS = 0 if not audio_values else np.average(audio_values)
+
+ motion_heatmap = self._compute_motion_heatmap(camera, all_motion_boxes)
+
+ return SegmentInfo(
+ motion_count,
+ active_count,
+ region_count,
+ round(average_dBFS),
+ motion_heatmap,
+ )
+
+ async def move_segment(
+ self,
+ camera: str,
+ variant: str,
+ start_time: datetime.datetime,
+ end_time: datetime.datetime,
+ duration: float,
+ cache_path: str,
+ store_mode: RetainModeEnum,
+ media_info: Optional[dict[str, Any]] = None,
+ ) -> Optional[Recordings]:
+ segment_info = self.segment_stats(camera, start_time, end_time)
+
+ # check if the segment shouldn't be stored
+ if segment_info.should_discard_segment(store_mode):
+ self.drop_segment(cache_path)
+ return
+
+ # directory will be in utc due to start_time being in utc
+ directory = os.path.join(
+ RECORD_DIR,
+ start_time.strftime("%Y-%m-%d/%H"),
+ camera,
+ variant,
+ )
+
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # file will be in utc due to start_time being in utc
+ file_name = f"{start_time.strftime('%M.%S.mp4')}"
+ file_path = os.path.join(directory, file_name)
+
+ try:
+ if not os.path.exists(file_path):
+ start_frame = datetime.datetime.now().timestamp()
+
+ # add faststart to kept segments to improve metadata reading
+ p = await asyncio.create_subprocess_exec(
+ self.config.ffmpeg.ffmpeg_path,
+ "-hide_banner",
+ "-y",
+ "-i",
+ cache_path,
+ "-c",
+ "copy",
+ "-movflags",
+ "+faststart",
+ file_path,
+ stderr=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.DEVNULL,
+ )
+ await p.wait()
+
+ if p.returncode != 0:
+ logger.error(f"Unable to convert {cache_path} to {file_path}")
+ logger.error((await p.stderr.read()).decode("ascii"))
+ return None
+ else:
+ logger.debug(
+ f"Copied {file_path} in {datetime.datetime.now().timestamp() - start_frame} seconds."
+ )
+
+ try:
+ # get the segment size of the cache file
+ # file without faststart is same size
+ segment_size = round(
+ float(os.path.getsize(cache_path)) / pow(2, 20), 2
+ )
+ except OSError:
+ segment_size = 0
+
+ os.remove(cache_path)
+
+ rand_id = "".join(
+ random.choices(string.ascii_lowercase + string.digits, k=6)
+ )
+
+ return {
+ Recordings.id.name: f"{start_time.timestamp()}-{rand_id}",
+ Recordings.camera.name: camera,
+ Recordings.path.name: file_path,
+ Recordings.variant.name: variant,
+ Recordings.start_time.name: start_time.timestamp(),
+ Recordings.end_time.name: end_time.timestamp(),
+ Recordings.duration.name: duration,
+ Recordings.motion.name: segment_info.motion_count,
+ # TODO: update this to store list of active objects at some point
+ Recordings.objects.name: segment_info.active_object_count,
+ Recordings.regions.name: segment_info.region_count,
+ Recordings.dBFS.name: segment_info.average_dBFS,
+ Recordings.segment_size.name: segment_size,
+ Recordings.codec_name.name: (
+ media_info.get("codec_name") if media_info else None
+ ),
+ Recordings.width.name: media_info.get("width") if media_info else None,
+ Recordings.height.name: media_info.get("height") if media_info else None,
+ Recordings.bitrate.name: (
+ int((segment_size * pow(2, 20) * 8) / duration)
+ if duration > 0 and segment_size > 0
+ else None
+ ),
+ Recordings.motion_heatmap.name: segment_info.motion_heatmap,
+ }
+ except Exception as e:
+ logger.error(f"Unable to store recording segment {cache_path}")
+ Path(cache_path).unlink(missing_ok=True)
+ logger.error(e)
+
+ # clear end_time cache
+ self.end_time_cache.pop(cache_path, None)
+ return None
+
+ def run(self) -> None:
+ # Check for new files every 5 seconds
+ wait_time = 0.0
+ while not self.stop_event.is_set():
+ time.sleep(wait_time)
+
+ if self.stop_event.is_set():
+ break
+
+ run_start = datetime.datetime.now().timestamp()
+
+ # check if there is an updated config
+ self.config_subscriber.check_for_updates()
+
+ stale_frame_count = 0
+ stale_frame_count_threshold = 10
+ # empty the object recordings info queue
+ while True:
+ (topic, data) = self.detection_subscriber.check_for_update(
+ timeout=FAST_QUEUE_TIMEOUT
+ )
+
+ if not topic:
+ break
+
+ if topic == DetectionTypeEnum.video.value:
+ (
+ camera,
+ _,
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ ) = data
+
+ if self.config.cameras[camera].record.enabled:
+ self.object_recordings_info[camera].append(
+ (
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ )
+ )
+ elif topic == DetectionTypeEnum.audio.value:
+ (
+ camera,
+ frame_time,
+ dBFS,
+ audio_detections,
+ ) = data
+
+ if self.config.cameras[camera].record.enabled:
+ self.audio_recordings_info[camera].append(
+ (
+ frame_time,
+ dBFS,
+ audio_detections,
+ )
+ )
+ elif (
+ topic == DetectionTypeEnum.api.value or DetectionTypeEnum.lpr.value
+ ):
+ continue
+
+ if frame_time < run_start - stale_frame_count_threshold:
+ stale_frame_count += 1
+
+ if stale_frame_count > 0:
+ logger.debug(f"Found {stale_frame_count} old frames.")
+
+ try:
+ asyncio.run(self.move_files())
+ except Exception as e:
+ logger.error(
+ "Error occurred when attempting to maintain recording cache"
+ )
+ logger.error(e)
+ duration = datetime.datetime.now().timestamp() - run_start
+ wait_time = max(0, 5 - duration)
+
+ self.requestor.stop()
+ self.config_subscriber.stop()
+ self.detection_subscriber.stop()
+ self.recordings_publisher.stop()
+ logger.info("Exiting recording maintenance...")
diff --git a/frigate/test/http_api/test_http_media.py b/frigate/test/http_api/test_http_media.py
index 6af3dd972..6f0adc562 100644
--- a/frigate/test/http_api/test_http_media.py
+++ b/frigate/test/http_api/test_http_media.py
@@ -1,405 +1,458 @@
-"""Unit tests for recordings/media API endpoints."""
-
-from datetime import datetime, timezone
-
-import pytz
-from fastapi import Request
-
-from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
-from frigate.models import Recordings
-from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
-
-
-class TestHttpMedia(BaseTestHttp):
- """Test media API endpoints, particularly recordings with DST handling."""
-
- def setUp(self):
- """Set up test fixtures."""
- super().setUp([Recordings])
- self.app = super().create_app()
-
- # Mock get_current_user for all tests
- async def mock_get_current_user(request: Request):
- username = request.headers.get("remote-user")
- role = request.headers.get("remote-role")
- if not username or not role:
- from fastapi.responses import JSONResponse
-
- return JSONResponse(
- content={"message": "No authorization headers."}, status_code=401
- )
- return {"username": username, "role": role}
-
- self.app.dependency_overrides[get_current_user] = mock_get_current_user
-
- async def mock_get_allowed_cameras_for_filter(request: Request):
- return ["front_door"]
-
- self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
- mock_get_allowed_cameras_for_filter
- )
-
- def tearDown(self):
- """Clean up after tests."""
- self.app.dependency_overrides.clear()
- super().tearDown()
-
- def test_recordings_summary_across_dst_spring_forward(self):
- """
- Test recordings summary across spring DST transition (spring forward).
-
- In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
- Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
- """
- tz = pytz.timezone("America/New_York")
-
- # March 9, 2024 at 12:00 PM EST (before DST)
- march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
-
- # March 10, 2024 at 12:00 PM EDT (after DST transition)
- march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
-
- # March 11, 2024 at 12:00 PM EDT (after DST)
- march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
-
- with AuthTestClient(self.app) as client:
- # Insert recordings for each day
- Recordings.insert(
- id="recording_march_9",
- path="/media/recordings/march_9.mp4",
- camera="front_door",
- start_time=march_9_noon,
- end_time=march_9_noon + 3600, # 1 hour recording
- duration=3600,
- motion=100,
- objects=5,
- ).execute()
-
- Recordings.insert(
- id="recording_march_10",
- path="/media/recordings/march_10.mp4",
- camera="front_door",
- start_time=march_10_noon,
- end_time=march_10_noon + 3600,
- duration=3600,
- motion=150,
- objects=8,
- ).execute()
-
- Recordings.insert(
- id="recording_march_11",
- path="/media/recordings/march_11.mp4",
- camera="front_door",
- start_time=march_11_noon,
- end_time=march_11_noon + 3600,
- duration=3600,
- motion=200,
- objects=10,
- ).execute()
-
- # Test recordings summary with America/New_York timezone
- response = client.get(
- "/recordings/summary",
- params={"timezone": "America/New_York", "cameras": "all"},
- )
-
- assert response.status_code == 200
- summary = response.json()
-
- # Verify we get exactly 3 days
- assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
-
- # Verify the correct dates are returned (API returns dict with True values)
- assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
- assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
- assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
- assert summary["2024-03-09"] is True
- assert summary["2024-03-10"] is True
- assert summary["2024-03-11"] is True
-
- def test_recordings_summary_across_dst_fall_back(self):
- """
- Test recordings summary across fall DST transition (fall back).
-
- In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
- Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
- """
- tz = pytz.timezone("America/New_York")
-
- # November 2, 2024 at 12:00 PM EDT (before DST transition)
- nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
-
- # November 3, 2024 at 12:00 PM EST (after DST transition)
- # Need to specify is_dst=False to get the time after fall back
- nov_3_noon = tz.localize(
- datetime(2024, 11, 3, 12, 0, 0), is_dst=False
- ).timestamp()
-
- # November 4, 2024 at 12:00 PM EST (after DST)
- nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
-
- with AuthTestClient(self.app) as client:
- # Insert recordings for each day
- Recordings.insert(
- id="recording_nov_2",
- path="/media/recordings/nov_2.mp4",
- camera="front_door",
- start_time=nov_2_noon,
- end_time=nov_2_noon + 3600,
- duration=3600,
- motion=100,
- objects=5,
- ).execute()
-
- Recordings.insert(
- id="recording_nov_3",
- path="/media/recordings/nov_3.mp4",
- camera="front_door",
- start_time=nov_3_noon,
- end_time=nov_3_noon + 3600,
- duration=3600,
- motion=150,
- objects=8,
- ).execute()
-
- Recordings.insert(
- id="recording_nov_4",
- path="/media/recordings/nov_4.mp4",
- camera="front_door",
- start_time=nov_4_noon,
- end_time=nov_4_noon + 3600,
- duration=3600,
- motion=200,
- objects=10,
- ).execute()
-
- # Test recordings summary with America/New_York timezone
- response = client.get(
- "/recordings/summary",
- params={"timezone": "America/New_York", "cameras": "all"},
- )
-
- assert response.status_code == 200
- summary = response.json()
-
- # Verify we get exactly 3 days
- assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
-
- # Verify the correct dates are returned (API returns dict with True values)
- assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
- assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
- assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
- assert summary["2024-11-02"] is True
- assert summary["2024-11-03"] is True
- assert summary["2024-11-04"] is True
-
- def test_recordings_summary_multiple_cameras_across_dst(self):
- """
- Test recordings summary with multiple cameras across DST boundary.
- """
- tz = pytz.timezone("America/New_York")
-
- # March 9, 2024 at 10:00 AM EST (before DST)
- march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
-
- # March 10, 2024 at 3:00 PM EDT (after DST transition)
- march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
-
- with AuthTestClient(self.app) as client:
- # Override allowed cameras for this test to include both
- async def mock_get_allowed_cameras_for_filter(_request: Request):
- return ["front_door", "back_door"]
-
- self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
- mock_get_allowed_cameras_for_filter
- )
-
- # Insert recordings for front_door on March 9
- Recordings.insert(
- id="front_march_9",
- path="/media/recordings/front_march_9.mp4",
- camera="front_door",
- start_time=march_9_morning,
- end_time=march_9_morning + 3600,
- duration=3600,
- motion=100,
- objects=5,
- ).execute()
-
- # Insert recordings for back_door on March 10
- Recordings.insert(
- id="back_march_10",
- path="/media/recordings/back_march_10.mp4",
- camera="back_door",
- start_time=march_10_afternoon,
- end_time=march_10_afternoon + 3600,
- duration=3600,
- motion=150,
- objects=8,
- ).execute()
-
- # Test with all cameras
- response = client.get(
- "/recordings/summary",
- params={"timezone": "America/New_York", "cameras": "all"},
- )
-
- assert response.status_code == 200
- summary = response.json()
-
- # Verify we get both days
- assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
- assert "2024-03-09" in summary
- assert "2024-03-10" in summary
- assert summary["2024-03-09"] is True
- assert summary["2024-03-10"] is True
-
- # Reset dependency override back to default single camera for other tests
- async def reset_allowed_cameras(_request: Request):
- return ["front_door"]
-
- self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
- reset_allowed_cameras
- )
-
- def test_recordings_summary_at_dst_transition_time(self):
- """
- Test recordings that span the exact DST transition time.
- """
- tz = pytz.timezone("America/New_York")
-
- # March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
- # At 2:00 AM, clocks jump to 3:00 AM
- before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
-
- # Recording that spans the transition (1:00 AM to 3:30 AM EDT)
- # This is 1.5 hours of actual time but spans the "missing" hour
- after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
-
- with AuthTestClient(self.app) as client:
- Recordings.insert(
- id="recording_during_transition",
- path="/media/recordings/transition.mp4",
- camera="front_door",
- start_time=before_transition,
- end_time=after_transition,
- duration=after_transition - before_transition,
- motion=100,
- objects=5,
- ).execute()
-
- response = client.get(
- "/recordings/summary",
- params={"timezone": "America/New_York", "cameras": "all"},
- )
-
- assert response.status_code == 200
- summary = response.json()
-
- # The recording should appear on March 10
- assert len(summary) == 1
- assert "2024-03-10" in summary
- assert summary["2024-03-10"] is True
-
- def test_recordings_summary_utc_timezone(self):
- """
- Test recordings summary with UTC timezone (no DST).
- """
- # Use UTC timestamps directly
- march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
- march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
-
- with AuthTestClient(self.app) as client:
- Recordings.insert(
- id="recording_march_9_utc",
- path="/media/recordings/march_9_utc.mp4",
- camera="front_door",
- start_time=march_9_utc,
- end_time=march_9_utc + 3600,
- duration=3600,
- motion=100,
- objects=5,
- ).execute()
-
- Recordings.insert(
- id="recording_march_10_utc",
- path="/media/recordings/march_10_utc.mp4",
- camera="front_door",
- start_time=march_10_utc,
- end_time=march_10_utc + 3600,
- duration=3600,
- motion=150,
- objects=8,
- ).execute()
-
- # Test with UTC timezone
- response = client.get(
- "/recordings/summary", params={"timezone": "utc", "cameras": "all"}
- )
-
- assert response.status_code == 200
- summary = response.json()
-
- # Verify we get both days
- assert len(summary) == 2
- assert "2024-03-09" in summary
- assert "2024-03-10" in summary
- assert summary["2024-03-09"] is True
- assert summary["2024-03-10"] is True
-
- def test_recordings_summary_no_recordings(self):
- """
- Test recordings summary when no recordings exist.
- """
- with AuthTestClient(self.app) as client:
- response = client.get(
- "/recordings/summary",
- params={"timezone": "America/New_York", "cameras": "all"},
- )
-
- assert response.status_code == 200
- summary = response.json()
- assert len(summary) == 0
-
- def test_recordings_summary_single_camera_filter(self):
- """
- Test recordings summary filtered to a single camera.
- """
- tz = pytz.timezone("America/New_York")
- march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
-
- with AuthTestClient(self.app) as client:
- # Insert recordings for both cameras
- Recordings.insert(
- id="front_recording",
- path="/media/recordings/front.mp4",
- camera="front_door",
- start_time=march_10_noon,
- end_time=march_10_noon + 3600,
- duration=3600,
- motion=100,
- objects=5,
- ).execute()
-
- Recordings.insert(
- id="back_recording",
- path="/media/recordings/back.mp4",
- camera="back_door",
- start_time=march_10_noon,
- end_time=march_10_noon + 3600,
- duration=3600,
- motion=150,
- objects=8,
- ).execute()
-
- # Test with only front_door camera
- response = client.get(
- "/recordings/summary",
- params={"timezone": "America/New_York", "cameras": "front_door"},
- )
-
- assert response.status_code == 200
- summary = response.json()
- assert len(summary) == 1
- assert "2024-03-10" in summary
- assert summary["2024-03-10"] is True
+"""Unit tests for recordings/media API endpoints."""
+
+from datetime import datetime, timezone
+
+import pytz
+from fastapi import Request
+
+from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
+from frigate.models import Recordings
+from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
+
+
+class TestHttpMedia(BaseTestHttp):
+ """Test media API endpoints, particularly recordings with DST handling."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ super().setUp([Recordings])
+ self.app = super().create_app()
+
+ # Mock get_current_user for all tests
+ async def mock_get_current_user(request: Request):
+ username = request.headers.get("remote-user")
+ role = request.headers.get("remote-role")
+ if not username or not role:
+ from fastapi.responses import JSONResponse
+
+ return JSONResponse(
+ content={"message": "No authorization headers."}, status_code=401
+ )
+ return {"username": username, "role": role}
+
+ self.app.dependency_overrides[get_current_user] = mock_get_current_user
+
+ async def mock_get_allowed_cameras_for_filter(request: Request):
+ return ["front_door"]
+
+ self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
+ mock_get_allowed_cameras_for_filter
+ )
+
+ def tearDown(self):
+ """Clean up after tests."""
+ self.app.dependency_overrides.clear()
+ super().tearDown()
+
+ def test_camera_recordings_variant_filter(self):
+ start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
+ end_ts = start_ts + 3600
+
+ with AuthTestClient(self.app) as client:
+ Recordings.insert(
+ id="recording_main",
+ path="/media/recordings/front/main.mp4",
+ camera="front_door",
+ variant="main",
+ start_time=start_ts,
+ end_time=end_ts,
+ duration=3600,
+ motion=100,
+ objects=5,
+ codec_name="h264",
+ width=1920,
+ height=1080,
+ bitrate=4_000_000,
+ ).execute()
+ Recordings.insert(
+ id="recording_sub",
+ path="/media/recordings/front/sub.mp4",
+ camera="front_door",
+ variant="sub",
+ start_time=start_ts,
+ end_time=end_ts,
+ duration=3600,
+ motion=100,
+ objects=5,
+ codec_name="h264",
+ width=640,
+ height=360,
+ bitrate=512_000,
+ ).execute()
+
+ default_response = client.get(
+ "/front_door/recordings",
+ params={"after": start_ts, "before": end_ts},
+ )
+ assert default_response.status_code == 200
+ default_recordings = default_response.json()
+ assert len(default_recordings) == 1
+ assert default_recordings[0]["variant"] == "main"
+
+ all_response = client.get(
+ "/front_door/recordings",
+ params={"after": start_ts, "before": end_ts, "variant": "all"},
+ )
+ assert all_response.status_code == 200
+ variants = {recording["variant"] for recording in all_response.json()}
+ assert variants == {"main", "sub"}
+
+ def test_recordings_summary_across_dst_spring_forward(self):
+ """
+ Test recordings summary across spring DST transition (spring forward).
+
+ In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
+ Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
+ """
+ tz = pytz.timezone("America/New_York")
+
+ # March 9, 2024 at 12:00 PM EST (before DST)
+ march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
+
+ # March 10, 2024 at 12:00 PM EDT (after DST transition)
+ march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
+
+ # March 11, 2024 at 12:00 PM EDT (after DST)
+ march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
+
+ with AuthTestClient(self.app) as client:
+ # Insert recordings for each day
+ Recordings.insert(
+ id="recording_march_9",
+ path="/media/recordings/march_9.mp4",
+ camera="front_door",
+ start_time=march_9_noon,
+ end_time=march_9_noon + 3600, # 1 hour recording
+ duration=3600,
+ motion=100,
+ objects=5,
+ ).execute()
+
+ Recordings.insert(
+ id="recording_march_10",
+ path="/media/recordings/march_10.mp4",
+ camera="front_door",
+ start_time=march_10_noon,
+ end_time=march_10_noon + 3600,
+ duration=3600,
+ motion=150,
+ objects=8,
+ ).execute()
+
+ Recordings.insert(
+ id="recording_march_11",
+ path="/media/recordings/march_11.mp4",
+ camera="front_door",
+ start_time=march_11_noon,
+ end_time=march_11_noon + 3600,
+ duration=3600,
+ motion=200,
+ objects=10,
+ ).execute()
+
+ # Test recordings summary with America/New_York timezone
+ response = client.get(
+ "/recordings/summary",
+ params={"timezone": "America/New_York", "cameras": "all"},
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+
+ # Verify we get exactly 3 days
+ assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
+
+ # Verify the correct dates are returned (API returns dict with True values)
+ assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
+ assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
+ assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
+ assert summary["2024-03-09"] is True
+ assert summary["2024-03-10"] is True
+ assert summary["2024-03-11"] is True
+
+ def test_recordings_summary_across_dst_fall_back(self):
+ """
+ Test recordings summary across fall DST transition (fall back).
+
+ In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
+ Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
+ """
+ tz = pytz.timezone("America/New_York")
+
+ # November 2, 2024 at 12:00 PM EDT (before DST transition)
+ nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
+
+ # November 3, 2024 at 12:00 PM EST (after DST transition)
+ # Need to specify is_dst=False to get the time after fall back
+ nov_3_noon = tz.localize(
+ datetime(2024, 11, 3, 12, 0, 0), is_dst=False
+ ).timestamp()
+
+ # November 4, 2024 at 12:00 PM EST (after DST)
+ nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
+
+ with AuthTestClient(self.app) as client:
+ # Insert recordings for each day
+ Recordings.insert(
+ id="recording_nov_2",
+ path="/media/recordings/nov_2.mp4",
+ camera="front_door",
+ start_time=nov_2_noon,
+ end_time=nov_2_noon + 3600,
+ duration=3600,
+ motion=100,
+ objects=5,
+ ).execute()
+
+ Recordings.insert(
+ id="recording_nov_3",
+ path="/media/recordings/nov_3.mp4",
+ camera="front_door",
+ start_time=nov_3_noon,
+ end_time=nov_3_noon + 3600,
+ duration=3600,
+ motion=150,
+ objects=8,
+ ).execute()
+
+ Recordings.insert(
+ id="recording_nov_4",
+ path="/media/recordings/nov_4.mp4",
+ camera="front_door",
+ start_time=nov_4_noon,
+ end_time=nov_4_noon + 3600,
+ duration=3600,
+ motion=200,
+ objects=10,
+ ).execute()
+
+ # Test recordings summary with America/New_York timezone
+ response = client.get(
+ "/recordings/summary",
+ params={"timezone": "America/New_York", "cameras": "all"},
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+
+ # Verify we get exactly 3 days
+ assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
+
+ # Verify the correct dates are returned (API returns dict with True values)
+ assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
+ assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
+ assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
+ assert summary["2024-11-02"] is True
+ assert summary["2024-11-03"] is True
+ assert summary["2024-11-04"] is True
+
+ def test_recordings_summary_multiple_cameras_across_dst(self):
+ """
+ Test recordings summary with multiple cameras across DST boundary.
+ """
+ tz = pytz.timezone("America/New_York")
+
+ # March 9, 2024 at 10:00 AM EST (before DST)
+ march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
+
+ # March 10, 2024 at 3:00 PM EDT (after DST transition)
+ march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
+
+ with AuthTestClient(self.app) as client:
+ # Override allowed cameras for this test to include both
+ async def mock_get_allowed_cameras_for_filter(_request: Request):
+ return ["front_door", "back_door"]
+
+ self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
+ mock_get_allowed_cameras_for_filter
+ )
+
+ # Insert recordings for front_door on March 9
+ Recordings.insert(
+ id="front_march_9",
+ path="/media/recordings/front_march_9.mp4",
+ camera="front_door",
+ start_time=march_9_morning,
+ end_time=march_9_morning + 3600,
+ duration=3600,
+ motion=100,
+ objects=5,
+ ).execute()
+
+ # Insert recordings for back_door on March 10
+ Recordings.insert(
+ id="back_march_10",
+ path="/media/recordings/back_march_10.mp4",
+ camera="back_door",
+ start_time=march_10_afternoon,
+ end_time=march_10_afternoon + 3600,
+ duration=3600,
+ motion=150,
+ objects=8,
+ ).execute()
+
+ # Test with all cameras
+ response = client.get(
+ "/recordings/summary",
+ params={"timezone": "America/New_York", "cameras": "all"},
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+
+ # Verify we get both days
+ assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
+ assert "2024-03-09" in summary
+ assert "2024-03-10" in summary
+ assert summary["2024-03-09"] is True
+ assert summary["2024-03-10"] is True
+
+ # Reset dependency override back to default single camera for other tests
+ async def reset_allowed_cameras(_request: Request):
+ return ["front_door"]
+
+ self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
+ reset_allowed_cameras
+ )
+
+ def test_recordings_summary_at_dst_transition_time(self):
+ """
+ Test recordings that span the exact DST transition time.
+ """
+ tz = pytz.timezone("America/New_York")
+
+ # March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
+ # At 2:00 AM, clocks jump to 3:00 AM
+ before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
+
+ # Recording that spans the transition (1:00 AM to 3:30 AM EDT)
+ # This is 1.5 hours of actual time but spans the "missing" hour
+ after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
+
+ with AuthTestClient(self.app) as client:
+ Recordings.insert(
+ id="recording_during_transition",
+ path="/media/recordings/transition.mp4",
+ camera="front_door",
+ start_time=before_transition,
+ end_time=after_transition,
+ duration=after_transition - before_transition,
+ motion=100,
+ objects=5,
+ ).execute()
+
+ response = client.get(
+ "/recordings/summary",
+ params={"timezone": "America/New_York", "cameras": "all"},
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+
+ # The recording should appear on March 10
+ assert len(summary) == 1
+ assert "2024-03-10" in summary
+ assert summary["2024-03-10"] is True
+
+ def test_recordings_summary_utc_timezone(self):
+ """
+ Test recordings summary with UTC timezone (no DST).
+ """
+ # Use UTC timestamps directly
+ march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
+ march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
+
+ with AuthTestClient(self.app) as client:
+ Recordings.insert(
+ id="recording_march_9_utc",
+ path="/media/recordings/march_9_utc.mp4",
+ camera="front_door",
+ start_time=march_9_utc,
+ end_time=march_9_utc + 3600,
+ duration=3600,
+ motion=100,
+ objects=5,
+ ).execute()
+
+ Recordings.insert(
+ id="recording_march_10_utc",
+ path="/media/recordings/march_10_utc.mp4",
+ camera="front_door",
+ start_time=march_10_utc,
+ end_time=march_10_utc + 3600,
+ duration=3600,
+ motion=150,
+ objects=8,
+ ).execute()
+
+ # Test with UTC timezone
+ response = client.get(
+ "/recordings/summary", params={"timezone": "utc", "cameras": "all"}
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+
+ # Verify we get both days
+ assert len(summary) == 2
+ assert "2024-03-09" in summary
+ assert "2024-03-10" in summary
+ assert summary["2024-03-09"] is True
+ assert summary["2024-03-10"] is True
+
+ def test_recordings_summary_no_recordings(self):
+ """
+ Test recordings summary when no recordings exist.
+ """
+ with AuthTestClient(self.app) as client:
+ response = client.get(
+ "/recordings/summary",
+ params={"timezone": "America/New_York", "cameras": "all"},
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+ assert len(summary) == 0
+
+ def test_recordings_summary_single_camera_filter(self):
+ """
+ Test recordings summary filtered to a single camera.
+ """
+ tz = pytz.timezone("America/New_York")
+ march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
+
+ with AuthTestClient(self.app) as client:
+ # Insert recordings for both cameras
+ Recordings.insert(
+ id="front_recording",
+ path="/media/recordings/front.mp4",
+ camera="front_door",
+ start_time=march_10_noon,
+ end_time=march_10_noon + 3600,
+ duration=3600,
+ motion=100,
+ objects=5,
+ ).execute()
+
+ Recordings.insert(
+ id="back_recording",
+ path="/media/recordings/back.mp4",
+ camera="back_door",
+ start_time=march_10_noon,
+ end_time=march_10_noon + 3600,
+ duration=3600,
+ motion=150,
+ objects=8,
+ ).execute()
+
+ # Test with only front_door camera
+ response = client.get(
+ "/recordings/summary",
+ params={"timezone": "America/New_York", "cameras": "front_door"},
+ )
+
+ assert response.status_code == 200
+ summary = response.json()
+ assert len(summary) == 1
+ assert "2024-03-10" in summary
+ assert summary["2024-03-10"] is True
diff --git a/frigate/test/test_maintainer.py b/frigate/test/test_maintainer.py
index d978cfd9f..562d42ba0 100644
--- a/frigate/test/test_maintainer.py
+++ b/frigate/test/test_maintainer.py
@@ -1,66 +1,78 @@
-import sys
-import unittest
-from unittest.mock import MagicMock, patch
-
-# Mock complex imports before importing maintainer
-sys.modules["frigate.comms.inter_process"] = MagicMock()
-sys.modules["frigate.comms.detections_updater"] = MagicMock()
-sys.modules["frigate.comms.recordings_updater"] = MagicMock()
-sys.modules["frigate.config.camera.updater"] = MagicMock()
-
-# Now import the class under test
-from frigate.config import FrigateConfig # noqa: E402
-from frigate.record.maintainer import RecordingMaintainer # noqa: E402
-
-
-class TestMaintainer(unittest.IsolatedAsyncioTestCase):
- async def test_move_files_survives_bad_filename(self):
- config = MagicMock(spec=FrigateConfig)
- config.cameras = {}
- stop_event = MagicMock()
-
- maintainer = RecordingMaintainer(config, stop_event)
-
- # We need to mock end_time_cache to avoid key errors if logic proceeds
- maintainer.end_time_cache = {}
-
- # Mock filesystem
- # One bad file, one good file
- files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
-
- with patch("os.listdir", return_value=files):
- with patch("os.path.isfile", return_value=True):
- with patch(
- "frigate.record.maintainer.psutil.process_iter", return_value=[]
- ):
- with patch("frigate.record.maintainer.logger.warning") as warn:
- # Mock validate_and_move_segment to avoid further logic
- maintainer.validate_and_move_segment = MagicMock()
-
- try:
- await maintainer.move_files()
- except ValueError as e:
- if "not enough values to unpack" in str(e):
- self.fail("move_files() crashed on bad filename!")
- raise e
- except Exception:
- # Ignore other errors (like DB connection) as we only care about the unpack crash
- pass
-
- # The bad filename is encountered in multiple loops, but should only warn once.
- matching = [
- c
- for c in warn.call_args_list
- if c.args
- and isinstance(c.args[0], str)
- and "Skipping unexpected files in cache" in c.args[0]
- ]
- self.assertEqual(
- 1,
- len(matching),
- f"Expected a single warning for unexpected files, got {len(matching)}",
- )
-
-
-if __name__ == "__main__":
- unittest.main()
+import sys
+import unittest
+from unittest.mock import MagicMock, patch
+
+# Mock complex imports before importing maintainer
+sys.modules["frigate.comms.inter_process"] = MagicMock()
+sys.modules["frigate.comms.detections_updater"] = MagicMock()
+sys.modules["frigate.comms.recordings_updater"] = MagicMock()
+sys.modules["frigate.config.camera.updater"] = MagicMock()
+
+# Now import the class under test
+from frigate.config import FrigateConfig # noqa: E402
+from frigate.record.maintainer import RecordingMaintainer # noqa: E402
+
+
+class TestMaintainer(unittest.IsolatedAsyncioTestCase):
+ async def test_parse_cache_segment_supports_variant(self):
+ config = MagicMock(spec=FrigateConfig)
+ config.cameras = {}
+ stop_event = MagicMock()
+
+ maintainer = RecordingMaintainer(config, stop_event)
+ parsed = maintainer._parse_cache_segment("front@sub@20210101000000+0000.mp4")
+
+ self.assertIsNotNone(parsed)
+ self.assertEqual("front", parsed["camera"])
+ self.assertEqual("sub", parsed["variant"])
+
+ async def test_move_files_survives_bad_filename(self):
+ config = MagicMock(spec=FrigateConfig)
+ config.cameras = {}
+ stop_event = MagicMock()
+
+ maintainer = RecordingMaintainer(config, stop_event)
+
+ # We need to mock end_time_cache to avoid key errors if logic proceeds
+ maintainer.end_time_cache = {}
+
+ # Mock filesystem
+ # One bad file, one good file
+ files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
+
+ with patch("os.listdir", return_value=files):
+ with patch("os.path.isfile", return_value=True):
+ with patch(
+ "frigate.record.maintainer.psutil.process_iter", return_value=[]
+ ):
+ with patch("frigate.record.maintainer.logger.warning") as warn:
+ # Mock validate_and_move_segment to avoid further logic
+ maintainer.validate_and_move_segment = MagicMock()
+
+ try:
+ await maintainer.move_files()
+ except ValueError as e:
+ if "not enough values to unpack" in str(e):
+ self.fail("move_files() crashed on bad filename!")
+ raise e
+ except Exception:
+ # Ignore other errors (like DB connection) as we only care about the unpack crash
+ pass
+
+ # The bad filename is encountered in multiple loops, but should only warn once.
+ matching = [
+ c
+ for c in warn.call_args_list
+ if c.args
+ and isinstance(c.args[0], str)
+ and "Skipping unexpected files in cache" in c.args[0]
+ ]
+ self.assertEqual(
+ 1,
+ len(matching),
+ f"Expected a single warning for unexpected files, got {len(matching)}",
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/frigate/util/services.py b/frigate/util/services.py
index db6e37540..7bd8b978f 100644
--- a/frigate/util/services.py
+++ b/frigate/util/services.py
@@ -1,972 +1,973 @@
-"""Utilities for services."""
-
-import asyncio
-import json
-import logging
-import os
-import re
-import resource
-import shutil
-import signal
-import subprocess as sp
-import time
-import traceback
-from datetime import datetime
-from typing import Any, List, Optional, Tuple
-
-import cv2
-import psutil
-import py3nvml.py3nvml as nvml
-import requests
-
-from frigate.const import (
- DRIVER_AMD,
- DRIVER_ENV_VAR,
- FFMPEG_HWACCEL_NVIDIA,
- FFMPEG_HWACCEL_VAAPI,
- SHM_FRAMES_VAR,
-)
-from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
-
-logger = logging.getLogger(__name__)
-
-
-def restart_frigate():
- proc = psutil.Process(1)
- # if this is running via s6, sigterm pid 1
- if proc.name() == "s6-svscan":
- proc.terminate()
- # otherwise, just try and exit frigate
- else:
- os.kill(os.getpid(), signal.SIGINT)
-
-
-def print_stack(sig, frame):
- traceback.print_stack(frame)
-
-
-def listen():
- signal.signal(signal.SIGUSR1, print_stack)
-
-
-def get_cgroups_version() -> str:
- """Determine what version of cgroups is enabled."""
-
- cgroup_path = "/sys/fs/cgroup"
-
- if not os.path.ismount(cgroup_path):
- logger.debug(f"{cgroup_path} is not a mount point.")
- return "unknown"
-
- try:
- with open("/proc/mounts", "r") as f:
- mounts = f.readlines()
-
- for mount in mounts:
- mount_info = mount.split()
- if mount_info[1] == cgroup_path:
- fs_type = mount_info[2]
- if fs_type == "cgroup2fs" or fs_type == "cgroup2":
- return "cgroup2"
- elif fs_type == "tmpfs":
- return "cgroup"
- else:
- logger.debug(
- f"Could not determine cgroups version: unhandled filesystem {fs_type}"
- )
- break
- except Exception as e:
- logger.debug(f"Could not determine cgroups version: {e}")
-
- return "unknown"
-
-
-def get_docker_memlimit_bytes() -> int:
- """Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
-
- # check running a supported cgroups version
- if get_cgroups_version() == "cgroup2":
- memlimit_path = "/sys/fs/cgroup/memory.max"
-
- try:
- with open(memlimit_path, "r") as f:
- value = f.read().strip()
-
- if value.isnumeric():
- return int(value)
- elif value.lower() == "max":
- return -1
- except Exception as e:
- logger.debug(f"Unable to get docker memlimit: {e}")
-
- return -1
-
-
-def get_cpu_stats() -> dict[str, dict]:
- """Get cpu usages for each process id"""
- usages = {}
- docker_memlimit = get_docker_memlimit_bytes() / 1024
- total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
-
- system_cpu = psutil.cpu_percent(
- interval=None
- ) # no interval as we don't want to be blocking
- system_mem = psutil.virtual_memory()
- usages["frigate.full_system"] = {
- "cpu": str(system_cpu),
- "mem": str(system_mem.percent),
- }
-
- for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
- pid = str(process.info["pid"])
- try:
- cpu_percent = process.info["cpu_percent"]
- cmdline = " ".join(process.info["cmdline"]).rstrip()
-
- with open(f"/proc/{pid}/stat", "r") as f:
- stats = f.readline().split()
- utime = int(stats[13])
- stime = int(stats[14])
- start_time = int(stats[21])
-
- with open("/proc/uptime") as f:
- system_uptime_sec = int(float(f.read().split()[0]))
-
- clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
-
- process_utime_sec = utime // clk_tck
- process_stime_sec = stime // clk_tck
- process_start_time_sec = start_time // clk_tck
-
- process_elapsed_sec = system_uptime_sec - process_start_time_sec
- process_usage_sec = process_utime_sec + process_stime_sec
- cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
-
- with open(f"/proc/{pid}/statm", "r") as f:
- mem_stats = f.readline().split()
- mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
-
- if docker_memlimit > 0:
- mem_pct = round((mem_res / docker_memlimit) * 100, 1)
- else:
- mem_pct = round((mem_res / total_mem) * 100, 1)
-
- usages[pid] = {
- "cpu": str(cpu_percent),
- "cpu_average": str(round(cpu_average_usage, 2)),
- "mem": f"{mem_pct}",
- "cmdline": clean_camera_user_pass(cmdline),
- }
- except Exception:
- continue
-
- return usages
-
-
-def get_physical_interfaces(interfaces) -> list:
- if not interfaces:
- return []
-
- with open("/proc/net/dev", "r") as file:
- lines = file.readlines()
-
- physical_interfaces = []
- for line in lines:
- if ":" in line:
- interface = line.split(":")[0].strip()
- for int in interfaces:
- if interface.startswith(int):
- physical_interfaces.append(interface)
-
- return physical_interfaces
-
-
-def get_bandwidth_stats(config) -> dict[str, dict]:
- """Get bandwidth usages for each ffmpeg process id"""
- usages = {}
- top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
- config.telemetry.network_interfaces
- )
-
- p = sp.run(
- top_command,
- encoding="ascii",
- capture_output=True,
- )
-
- if p.returncode != 0:
- logger.error(f"Error getting network stats :: {p.stderr}")
- return usages
- else:
- lines = p.stdout.split("\n")
- for line in lines:
- stats = list(filter(lambda a: a != "", line.strip().split("\t")))
- try:
- if re.search(
- r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
- ):
- process = stats[0].split("/")
- usages[process[len(process) - 2]] = {
- "bandwidth": round(float(stats[1]) + float(stats[2]), 1),
- }
- except (IndexError, ValueError):
- continue
-
- return usages
-
-
-def is_vaapi_amd_driver() -> bool:
- # Use the explicitly configured driver, if available
- driver = os.environ.get(DRIVER_ENV_VAR)
- if driver:
- return driver == DRIVER_AMD
-
- # Otherwise, ask vainfo what is has autodetected
- p = vainfo_hwaccel()
-
- if p.returncode != 0:
- logger.error(f"Unable to poll vainfo: {p.stderr}")
- return False
- else:
- output = p.stdout.decode("unicode_escape").split("\n")
-
- # VA Info will print out the friendly name of the driver
- return any("AMD Radeon Graphics" in line for line in output)
-
-
-def get_amd_gpu_stats() -> Optional[dict[str, str]]:
- """Get stats using radeontop."""
- radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
-
- p = sp.run(
- radeontop_command,
- encoding="ascii",
- capture_output=True,
- )
-
- if p.returncode != 0:
- logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
- return None
- else:
- usages = p.stdout.split(",")
- results: dict[str, str] = {}
-
- for hw in usages:
- if "gpu" in hw:
- results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
- elif "vram" in hw:
- results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
-
- return results
-
-
-def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, str]]:
- """Get stats using intel_gpu_top."""
-
- def get_stats_manually(output: str) -> dict[str, str]:
- """Find global stats via regex when json fails to parse."""
- reading = "".join(output)
- results: dict[str, str] = {}
-
- # render is used for qsv
- render = []
- for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
- packet = json.loads(result[14:])
- single = packet.get("busy", 0.0)
- render.append(float(single))
-
- if render:
- render_avg = sum(render) / len(render)
- else:
- render_avg = 1
-
- # video is used for vaapi
- video = []
- for result in re.findall(r'"Video/\d":{[a-z":\d.,%]+}', reading):
- packet = json.loads(result[10:])
- single = packet.get("busy", 0.0)
- video.append(float(single))
-
- if video:
- video_avg = sum(video) / len(video)
- else:
- video_avg = 1
-
- results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
- results["mem"] = "-%"
- return results
-
- intel_gpu_top_command = [
- "timeout",
- "0.5s",
- "intel_gpu_top",
- "-J",
- "-o",
- "-",
- "-s",
- "1000", # Intel changed this from seconds to milliseconds in 2024+ versions
- ]
-
- if intel_gpu_device:
- intel_gpu_top_command += ["-d", intel_gpu_device]
-
- try:
- p = sp.run(
- intel_gpu_top_command,
- encoding="ascii",
- capture_output=True,
- )
- except UnicodeDecodeError:
- return None
-
- # timeout has a non-zero returncode when timeout is reached
- if p.returncode != 124:
- logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
- return None
- else:
- output = "".join(p.stdout.split())
-
- try:
- data = json.loads(f"[{output}]")
- except json.JSONDecodeError:
- return get_stats_manually(output)
-
- results: dict[str, str] = {}
- render = {"global": []}
- video = {"global": []}
-
- for block in data:
- global_engine = block.get("engines")
-
- if global_engine:
- render_frame = global_engine.get("Render/3D/0", {}).get("busy")
- video_frame = global_engine.get("Video/0", {}).get("busy")
-
- if render_frame is not None:
- render["global"].append(float(render_frame))
-
- if video_frame is not None:
- video["global"].append(float(video_frame))
-
- clients = block.get("clients", {})
-
- if clients and len(clients):
- for client_block in clients.values():
- key = client_block["pid"]
-
- if render.get(key) is None:
- render[key] = []
- video[key] = []
-
- client_engine = client_block.get("engine-classes", {})
-
- render_frame = client_engine.get("Render/3D", {}).get("busy")
- video_frame = client_engine.get("Video", {}).get("busy")
-
- if render_frame is not None:
- render[key].append(float(render_frame))
-
- if video_frame is not None:
- video[key].append(float(video_frame))
-
- if render["global"] and video["global"]:
- results["gpu"] = (
- f"{round(((sum(render['global']) / len(render['global'])) + (sum(video['global']) / len(video['global']))) / 2, 2)}%"
- )
- results["mem"] = "-%"
-
- if len(render.keys()) > 1:
- results["clients"] = {}
-
- for key in render.keys():
- if key == "global" or not render[key] or not video[key]:
- continue
-
- results["clients"][key] = (
- f"{round(((sum(render[key]) / len(render[key])) + (sum(video[key]) / len(video[key]))) / 2, 2)}%"
- )
-
- return results
-
-
-def get_openvino_npu_stats() -> Optional[dict[str, str]]:
- """Get NPU stats using openvino."""
- NPU_RUNTIME_PATH = "/sys/devices/pci0000:00/0000:00:0b.0/power/runtime_active_time"
-
- try:
- with open(NPU_RUNTIME_PATH, "r") as f:
- initial_runtime = float(f.read().strip())
-
- initial_time = time.time()
-
- # Sleep for 1 second to get an accurate reading
- time.sleep(1.0)
-
- # Read runtime value again
- with open(NPU_RUNTIME_PATH, "r") as f:
- current_runtime = float(f.read().strip())
-
- current_time = time.time()
-
- # Calculate usage percentage
- runtime_diff = current_runtime - initial_runtime
- time_diff = (current_time - initial_time) * 1000.0 # Convert to milliseconds
-
- if time_diff > 0:
- usage = min(100.0, max(0.0, (runtime_diff / time_diff * 100.0)))
- else:
- usage = 0.0
-
- return {"npu": f"{round(usage, 2)}", "mem": "-%"}
- except (FileNotFoundError, PermissionError, ValueError):
- return None
-
-
-def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]:
- """Get GPU stats using rk."""
- try:
- with open("/sys/kernel/debug/rkrga/load", "r") as f:
- content = f.read()
- except FileNotFoundError:
- return None
-
- load_values = []
- for line in content.splitlines():
- match = re.search(r"load = (\d+)%", line)
- if match:
- load_values.append(int(match.group(1)))
-
- if not load_values:
- return None
-
- average_load = f"{round(sum(load_values) / len(load_values), 2)}%"
- stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"}
-
- try:
- with open("/sys/class/thermal/thermal_zone5/temp", "r") as f:
- line = f.readline().strip()
- stats["temp"] = round(int(line) / 1000, 1)
- except (FileNotFoundError, OSError, ValueError):
- pass
-
- return stats
-
-
-def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]:
- """Get NPU stats using rk."""
- try:
- with open("/sys/kernel/debug/rknpu/load", "r") as f:
- npu_output = f.read()
-
- if "Core0:" in npu_output:
- # multi core NPU
- core_loads = re.findall(r"Core\d+:\s*(\d+)%", npu_output)
- else:
- # single core NPU
- core_loads = re.findall(r"NPU load:\s+(\d+)%", npu_output)
- except FileNotFoundError:
- core_loads = None
-
- if not core_loads:
- return None
-
- percentages = [int(load) for load in core_loads]
- mean = round(sum(percentages) / len(percentages), 2)
- stats: dict[str, float | str] = {"npu": mean, "mem": "-%"}
-
- try:
- with open("/sys/class/thermal/thermal_zone6/temp", "r") as f:
- line = f.readline().strip()
- stats["temp"] = round(int(line) / 1000, 1)
- except (FileNotFoundError, OSError, ValueError):
- pass
-
- return stats
-
-
-def try_get_info(f, h, default="N/A", sensor=None):
- try:
- if h:
- if sensor is not None:
- v = f(h, sensor)
- else:
- v = f(h)
- else:
- v = f()
- except nvml.NVMLError_NotSupported:
- v = default
- return v
-
-
-def get_nvidia_gpu_stats() -> dict[int, dict]:
- names: dict[str, int] = {}
- results = {}
- try:
- nvml.nvmlInit()
- deviceCount = nvml.nvmlDeviceGetCount()
- for i in range(deviceCount):
- handle = nvml.nvmlDeviceGetHandleByIndex(i)
- gpu_name = nvml.nvmlDeviceGetName(handle)
-
- # handle case where user has multiple of same GPU
- if gpu_name in names:
- names[gpu_name] += 1
- gpu_name += f" ({names.get(gpu_name)})"
- else:
- names[gpu_name] = 1
-
- meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
- util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
- enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
- dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle)
- temp = try_get_info(
- nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0
- )
- pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None)
-
- if util != "N/A":
- gpu_util = util.gpu
- else:
- gpu_util = 0
-
- if meminfo != "N/A":
- gpu_mem_util = meminfo.used / meminfo.total * 100
- else:
- gpu_mem_util = -1
-
- if temp != "N/A" and temp is not None:
- temp = float(temp)
- else:
- temp = None
-
- if enc != "N/A":
- enc_util = enc[0]
- else:
- enc_util = -1
-
- if dec != "N/A":
- dec_util = dec[0]
- else:
- dec_util = -1
-
- results[i] = {
- "name": gpu_name,
- "gpu": gpu_util,
- "mem": gpu_mem_util,
- "enc": enc_util,
- "dec": dec_util,
- "pstate": pstate or "unknown",
- "temp": temp,
- }
- except Exception:
- pass
- finally:
- return results
-
-
-def get_jetson_stats() -> Optional[dict[int, dict]]:
- results = {}
-
- try:
- results["mem"] = "-" # no discrete gpu memory
-
- if os.path.exists("/sys/devices/gpu.0/load"):
- with open("/sys/devices/gpu.0/load", "r") as f:
- gpuload = float(f.readline()) / 10
- results["gpu"] = f"{gpuload}%"
- elif os.path.exists("/sys/devices/platform/gpu.0/load"):
- with open("/sys/devices/platform/gpu.0/load", "r") as f:
- gpuload = float(f.readline()) / 10
- results["gpu"] = f"{gpuload}%"
- else:
- results["gpu"] = "-"
- except Exception:
- return None
-
- return results
-
-
-def get_hailo_temps() -> dict[str, float]:
- """Get temperatures for Hailo devices."""
- try:
- from hailo_platform import Device
- except ModuleNotFoundError:
- return {}
-
- temps = {}
-
- try:
- device_ids = Device.scan()
- for i, device_id in enumerate(device_ids):
- try:
- with Device(device_id) as device:
- temp_info = device.control.get_chip_temperature()
-
- # Get board name and normalise it
- identity = device.control.identify()
- board_name = None
- for line in str(identity).split("\n"):
- if line.startswith("Board Name:"):
- board_name = (
- line.split(":", 1)[1].strip().lower().replace("-", "")
- )
- break
-
- if not board_name:
- board_name = f"hailo{i}"
-
- # Use indexed name if multiple devices, otherwise just the board name
- device_name = (
- f"{board_name}-{i}" if len(device_ids) > 1 else board_name
- )
-
- # ts1_temperature is also available, but appeared to be the same as ts0 in testing.
- temps[device_name] = round(temp_info.ts0_temperature, 1)
- except Exception as e:
- logger.debug(
- f"Failed to get temperature for Hailo device {device_id}: {e}"
- )
- continue
- except Exception as e:
- logger.debug(f"Failed to scan for Hailo devices: {e}")
-
- return temps
-
-
-def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess:
- """Run ffprobe on stream."""
- clean_path = escape_special_characters(path)
-
- # Base entries that are always included
- stream_entries = "codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate"
-
- # Additional detailed entries
- if detailed:
- stream_entries += ",codec_name,profile,level,pix_fmt,channels,sample_rate,channel_layout,r_frame_rate"
- format_entries = "format_name,size,bit_rate,duration"
- else:
- format_entries = None
-
- ffprobe_cmd = [
- ffmpeg.ffprobe_path,
- "-timeout",
- "1000000",
- "-print_format",
- "json",
- "-show_entries",
- f"stream={stream_entries}",
- ]
-
- # Add format entries for detailed mode
- if detailed and format_entries:
- ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
-
- ffprobe_cmd.extend(["-loglevel", "error", clean_path])
-
- return sp.run(ffprobe_cmd, capture_output=True)
-
-
-def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
- """Run vainfo."""
- if not device_name:
- cmd = ["vainfo"]
- else:
- if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"):
- device_path = device_name
- else:
- device_path = f"/dev/dri/{device_name}"
-
- cmd = ["vainfo", "--display", "drm", "--device", device_path]
-
- return sp.run(cmd, capture_output=True)
-
-
-def get_nvidia_driver_info() -> dict[str, Any]:
- """Get general hardware info for nvidia GPU."""
- results = {}
- try:
- nvml.nvmlInit()
- deviceCount = nvml.nvmlDeviceGetCount()
- for i in range(deviceCount):
- handle = nvml.nvmlDeviceGetHandleByIndex(i)
- driver = try_get_info(nvml.nvmlSystemGetDriverVersion, None, default=None)
- cuda_compute = try_get_info(
- nvml.nvmlDeviceGetCudaComputeCapability, handle, default=None
- )
- vbios = try_get_info(nvml.nvmlDeviceGetVbiosVersion, handle, default=None)
- results[i] = {
- "name": nvml.nvmlDeviceGetName(handle),
- "driver": driver or "unknown",
- "cuda_compute": cuda_compute or "unknown",
- "vbios": vbios or "unknown",
- }
- except Exception:
- pass
- finally:
- return results
-
-
-def auto_detect_hwaccel() -> str:
- """Detect hwaccel args by default."""
- try:
- cuda = False
- vaapi = False
- resp = requests.get("http://127.0.0.1:1984/api/ffmpeg/hardware", timeout=3)
-
- if resp.status_code == 200:
- data: dict[str, list[dict[str, str]]] = resp.json()
- for source in data.get("sources", []):
- if "cuda" in source.get("url", "") and source.get("name") == "OK":
- cuda = True
-
- if "vaapi" in source.get("url", "") and source.get("name") == "OK":
- vaapi = True
- except requests.RequestException:
- pass
-
- if cuda:
- logger.info("Automatically detected nvidia hwaccel for video decoding")
- return FFMPEG_HWACCEL_NVIDIA
-
- if vaapi:
- logger.info("Automatically detected vaapi hwaccel for video decoding")
- return FFMPEG_HWACCEL_VAAPI
-
- logger.warning(
- "Did not detect hwaccel, using a GPU for accelerated video decoding is highly recommended"
- )
- return ""
-
-
-async def get_video_properties(
- ffmpeg, url: str, get_duration: bool = False
-) -> dict[str, Any]:
- async def probe_with_ffprobe(
- url: str,
- ) -> tuple[bool, int, int, Optional[str], float]:
- """Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
- cmd = [
- ffmpeg.ffprobe_path,
- "-v",
- "quiet",
- "-print_format",
- "json",
- "-show_format",
- "-show_streams",
- url,
- ]
- try:
- proc = await asyncio.create_subprocess_exec(
- *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
- )
- stdout, _ = await proc.communicate()
- if proc.returncode != 0:
- return False, 0, 0, None, -1
-
- data = json.loads(stdout.decode())
- video_streams = [
- s for s in data.get("streams", []) if s.get("codec_type") == "video"
- ]
- if not video_streams:
- return False, 0, 0, None, -1
-
- v = video_streams[0]
- width = int(v.get("width", 0))
- height = int(v.get("height", 0))
- codec = v.get("codec_name")
-
- duration_str = data.get("format", {}).get("duration")
- duration = float(duration_str) if duration_str else -1.0
-
- return True, width, height, codec, duration
- except (json.JSONDecodeError, ValueError, KeyError, sp.SubprocessError):
- return False, 0, 0, None, -1
-
- def probe_with_cv2(url: str) -> tuple[bool, int, int, Optional[str], float]:
- """Primary attempt using cv2: returns (valid, width, height, fourcc, duration)."""
- cap = cv2.VideoCapture(url)
- if not cap.isOpened():
- cap.release()
- return False, 0, 0, None, -1
-
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- valid = width > 0 and height > 0
- fourcc = None
- duration = -1.0
-
- if valid:
- fourcc_int = int(cap.get(cv2.CAP_PROP_FOURCC))
- fourcc = fourcc_int.to_bytes(4, "little").decode("latin-1").strip()
-
- if get_duration:
- fps = cap.get(cv2.CAP_PROP_FPS)
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
- if fps > 0 and total_frames > 0:
- duration = total_frames / fps
-
- cap.release()
- return valid, width, height, fourcc, duration
-
- # try cv2 first
- has_video, width, height, fourcc, duration = probe_with_cv2(url)
-
- # fallback to ffprobe if needed
- if not has_video or (get_duration and duration < 0):
- has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
-
- result: dict[str, Any] = {"has_valid_video": has_video}
- if has_video:
- result.update({"width": width, "height": height})
- if fourcc:
- result["fourcc"] = fourcc
- if get_duration:
- result["duration"] = duration
-
- return result
-
-
-def process_logs(
- contents: str,
- service: Optional[str] = None,
- start: Optional[int] = None,
- end: Optional[int] = None,
-) -> Tuple[int, List[str]]:
- log_lines = []
- last_message = None
- last_timestamp = None
- repeat_count = 0
-
- for raw_line in contents.splitlines():
- clean_line = raw_line.strip()
-
- if len(clean_line) < 10:
- continue
-
- # Handle cases where S6 does not include date in log line
- if " " not in clean_line:
- clean_line = f"{datetime.now()} {clean_line}"
-
- try:
- # Find the position of the first double space to extract timestamp and message
- date_end = clean_line.index(" ")
- timestamp = clean_line[:date_end]
- full_message = clean_line[date_end:].strip()
-
- # For frigate, remove the date part from message comparison
- if service == "frigate":
- # Skip the date at the start of the message if it exists
- date_parts = full_message.split("]", 1)
- if len(date_parts) > 1:
- message_part = date_parts[1].strip()
- else:
- message_part = full_message
- else:
- message_part = full_message
-
- if message_part == last_message:
- repeat_count += 1
- continue
- else:
- if repeat_count > 0:
- # Insert a deduplication message formatted the same way as logs
- dedup_message = f"{last_timestamp} [LOGGING] Last message repeated {repeat_count} times"
- log_lines.append(dedup_message)
- repeat_count = 0
-
- log_lines.append(clean_line)
- last_timestamp = timestamp
-
- last_message = message_part
-
- except ValueError:
- # If we can't parse the line properly, just add it as is
- log_lines.append(clean_line)
- continue
-
- # If there were repeated messages at the end, log the count
- if repeat_count > 0:
- dedup_message = (
- f"{last_timestamp} [LOGGING] Last message repeated {repeat_count} times"
- )
- log_lines.append(dedup_message)
-
- return len(log_lines), log_lines[start:end]
-
-
-def set_file_limit() -> None:
- # Newer versions of containerd 2.X+ impose a very low soft file limit of 1024
- # This applies to OSs like HA OS (see https://github.com/home-assistant/operating-system/issues/4110)
- # Attempt to increase this limit
- soft_limit = int(os.getenv("SOFT_FILE_LIMIT", "65536") or "65536")
-
- current_soft, current_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
- logger.debug(f"Current file limits - Soft: {current_soft}, Hard: {current_hard}")
-
- new_soft = min(soft_limit, current_hard)
- resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, current_hard))
- logger.debug(
- f"File limit set. New soft limit: {new_soft}, Hard limit remains: {current_hard}"
- )
-
-
-def get_fs_type(path: str) -> str:
- bestMatch = ""
- fsType = ""
- for part in psutil.disk_partitions(all=True):
- if path.startswith(part.mountpoint) and len(bestMatch) < len(part.mountpoint):
- fsType = part.fstype
- bestMatch = part.mountpoint
- return fsType
-
-
-def calculate_shm_requirements(config) -> dict:
- try:
- storage_stats = shutil.disk_usage("/dev/shm")
- except (FileNotFoundError, OSError):
- return {}
-
- total_mb = round(storage_stats.total / pow(2, 20), 1)
- used_mb = round(storage_stats.used / pow(2, 20), 1)
- free_mb = round(storage_stats.free / pow(2, 20), 1)
-
- # required for log files + nginx cache
- min_req_shm = 40 + 10
-
- if config.birdseye.restream:
- min_req_shm += 8
-
- available_shm = total_mb - min_req_shm
- cam_total_frame_size = 0.0
-
- for camera in config.cameras.values():
- if camera.enabled_in_config and camera.detect.width and camera.detect.height:
- cam_total_frame_size += round(
- (camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576,
- 1,
- )
-
- # leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them.
- cam_total_frame_size += 2 * round(
- (1280 * 720 * 1.5 + 270480) / 1048576,
- 1,
- )
-
- shm_frame_count = min(
- int(os.environ.get(SHM_FRAMES_VAR, "50")),
- int(available_shm / cam_total_frame_size),
- )
-
- # minimum required shm recommendation
- min_shm = round(min_req_shm + cam_total_frame_size * 20)
-
- return {
- "total": total_mb,
- "used": used_mb,
- "free": free_mb,
- "mount_type": get_fs_type("/dev/shm"),
- "available": round(available_shm, 1),
- "camera_frame_size": cam_total_frame_size,
- "shm_frame_count": shm_frame_count,
- "min_shm": min_shm,
- }
+"""Utilities for services."""
+
+import asyncio
+import json
+import logging
+import os
+import re
+import resource
+import shutil
+import signal
+import subprocess as sp
+import time
+import traceback
+from datetime import datetime
+from typing import Any, List, Optional, Tuple
+
+import cv2
+import psutil
+import py3nvml.py3nvml as nvml
+import requests
+
+from frigate.const import (
+ DRIVER_AMD,
+ DRIVER_ENV_VAR,
+ FFMPEG_HWACCEL_NVIDIA,
+ FFMPEG_HWACCEL_VAAPI,
+ SHM_FRAMES_VAR,
+)
+from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
+
+logger = logging.getLogger(__name__)
+
+
+def restart_frigate():
+ proc = psutil.Process(1)
+ # if this is running via s6, sigterm pid 1
+ if proc.name() == "s6-svscan":
+ proc.terminate()
+ # otherwise, just try and exit frigate
+ else:
+ os.kill(os.getpid(), signal.SIGINT)
+
+
+def print_stack(sig, frame):
+ traceback.print_stack(frame)
+
+
+def listen():
+ signal.signal(signal.SIGUSR1, print_stack)
+
+
+def get_cgroups_version() -> str:
+ """Determine what version of cgroups is enabled."""
+
+ cgroup_path = "/sys/fs/cgroup"
+
+ if not os.path.ismount(cgroup_path):
+ logger.debug(f"{cgroup_path} is not a mount point.")
+ return "unknown"
+
+ try:
+ with open("/proc/mounts", "r") as f:
+ mounts = f.readlines()
+
+ for mount in mounts:
+ mount_info = mount.split()
+ if mount_info[1] == cgroup_path:
+ fs_type = mount_info[2]
+ if fs_type == "cgroup2fs" or fs_type == "cgroup2":
+ return "cgroup2"
+ elif fs_type == "tmpfs":
+ return "cgroup"
+ else:
+ logger.debug(
+ f"Could not determine cgroups version: unhandled filesystem {fs_type}"
+ )
+ break
+ except Exception as e:
+ logger.debug(f"Could not determine cgroups version: {e}")
+
+ return "unknown"
+
+
+def get_docker_memlimit_bytes() -> int:
+ """Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
+
+ # check running a supported cgroups version
+ if get_cgroups_version() == "cgroup2":
+ memlimit_path = "/sys/fs/cgroup/memory.max"
+
+ try:
+ with open(memlimit_path, "r") as f:
+ value = f.read().strip()
+
+ if value.isnumeric():
+ return int(value)
+ elif value.lower() == "max":
+ return -1
+ except Exception as e:
+ logger.debug(f"Unable to get docker memlimit: {e}")
+
+ return -1
+
+
+def get_cpu_stats() -> dict[str, dict]:
+ """Get cpu usages for each process id"""
+ usages = {}
+ docker_memlimit = get_docker_memlimit_bytes() / 1024
+ total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
+
+ system_cpu = psutil.cpu_percent(
+ interval=None
+ ) # no interval as we don't want to be blocking
+ system_mem = psutil.virtual_memory()
+ usages["frigate.full_system"] = {
+ "cpu": str(system_cpu),
+ "mem": str(system_mem.percent),
+ }
+
+ for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
+ pid = str(process.info["pid"])
+ try:
+ cpu_percent = process.info["cpu_percent"]
+ cmdline = " ".join(process.info["cmdline"]).rstrip()
+
+ with open(f"/proc/{pid}/stat", "r") as f:
+ stats = f.readline().split()
+ utime = int(stats[13])
+ stime = int(stats[14])
+ start_time = int(stats[21])
+
+ with open("/proc/uptime") as f:
+ system_uptime_sec = int(float(f.read().split()[0]))
+
+ clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
+
+ process_utime_sec = utime // clk_tck
+ process_stime_sec = stime // clk_tck
+ process_start_time_sec = start_time // clk_tck
+
+ process_elapsed_sec = system_uptime_sec - process_start_time_sec
+ process_usage_sec = process_utime_sec + process_stime_sec
+ cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
+
+ with open(f"/proc/{pid}/statm", "r") as f:
+ mem_stats = f.readline().split()
+ mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
+
+ if docker_memlimit > 0:
+ mem_pct = round((mem_res / docker_memlimit) * 100, 1)
+ else:
+ mem_pct = round((mem_res / total_mem) * 100, 1)
+
+ usages[pid] = {
+ "cpu": str(cpu_percent),
+ "cpu_average": str(round(cpu_average_usage, 2)),
+ "mem": f"{mem_pct}",
+ "cmdline": clean_camera_user_pass(cmdline),
+ }
+ except Exception:
+ continue
+
+ return usages
+
+
+def get_physical_interfaces(interfaces) -> list:
+ if not interfaces:
+ return []
+
+ with open("/proc/net/dev", "r") as file:
+ lines = file.readlines()
+
+ physical_interfaces = []
+ for line in lines:
+ if ":" in line:
+ interface = line.split(":")[0].strip()
+ for int in interfaces:
+ if interface.startswith(int):
+ physical_interfaces.append(interface)
+
+ return physical_interfaces
+
+
+def get_bandwidth_stats(config) -> dict[str, dict]:
+ """Get bandwidth usages for each ffmpeg process id"""
+ usages = {}
+ top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
+ config.telemetry.network_interfaces
+ )
+
+ p = sp.run(
+ top_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode != 0:
+ logger.error(f"Error getting network stats :: {p.stderr}")
+ return usages
+ else:
+ lines = p.stdout.split("\n")
+ for line in lines:
+ stats = list(filter(lambda a: a != "", line.strip().split("\t")))
+ try:
+ if re.search(
+ r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
+ ):
+ process = stats[0].split("/")
+ usages[process[len(process) - 2]] = {
+ "bandwidth": round(float(stats[1]) + float(stats[2]), 1),
+ }
+ except (IndexError, ValueError):
+ continue
+
+ return usages
+
+
+def is_vaapi_amd_driver() -> bool:
+ # Use the explicitly configured driver, if available
+ driver = os.environ.get(DRIVER_ENV_VAR)
+ if driver:
+ return driver == DRIVER_AMD
+
+ # Otherwise, ask vainfo what is has autodetected
+ p = vainfo_hwaccel()
+
+ if p.returncode != 0:
+ logger.error(f"Unable to poll vainfo: {p.stderr}")
+ return False
+ else:
+ output = p.stdout.decode("unicode_escape").split("\n")
+
+ # VA Info will print out the friendly name of the driver
+ return any("AMD Radeon Graphics" in line for line in output)
+
+
+def get_amd_gpu_stats() -> Optional[dict[str, str]]:
+ """Get stats using radeontop."""
+ radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
+
+ p = sp.run(
+ radeontop_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode != 0:
+ logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
+ return None
+ else:
+ usages = p.stdout.split(",")
+ results: dict[str, str] = {}
+
+ for hw in usages:
+ if "gpu" in hw:
+ results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
+ elif "vram" in hw:
+ results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
+
+ return results
+
+
+def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, str]]:
+ """Get stats using intel_gpu_top."""
+
+ def get_stats_manually(output: str) -> dict[str, str]:
+ """Find global stats via regex when json fails to parse."""
+ reading = "".join(output)
+ results: dict[str, str] = {}
+
+ # render is used for qsv
+ render = []
+ for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
+ packet = json.loads(result[14:])
+ single = packet.get("busy", 0.0)
+ render.append(float(single))
+
+ if render:
+ render_avg = sum(render) / len(render)
+ else:
+ render_avg = 1
+
+ # video is used for vaapi
+ video = []
+ for result in re.findall(r'"Video/\d":{[a-z":\d.,%]+}', reading):
+ packet = json.loads(result[10:])
+ single = packet.get("busy", 0.0)
+ video.append(float(single))
+
+ if video:
+ video_avg = sum(video) / len(video)
+ else:
+ video_avg = 1
+
+ results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
+ results["mem"] = "-%"
+ return results
+
+ intel_gpu_top_command = [
+ "timeout",
+ "0.5s",
+ "intel_gpu_top",
+ "-J",
+ "-o",
+ "-",
+ "-s",
+ "1000", # Intel changed this from seconds to milliseconds in 2024+ versions
+ ]
+
+ if intel_gpu_device:
+ intel_gpu_top_command += ["-d", intel_gpu_device]
+
+ try:
+ p = sp.run(
+ intel_gpu_top_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+ except UnicodeDecodeError:
+ return None
+
+ # timeout has a non-zero returncode when timeout is reached
+ if p.returncode != 124:
+ logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
+ return None
+ else:
+ output = "".join(p.stdout.split())
+
+ try:
+ data = json.loads(f"[{output}]")
+ except json.JSONDecodeError:
+ return get_stats_manually(output)
+
+ results: dict[str, str] = {}
+ render = {"global": []}
+ video = {"global": []}
+
+ for block in data:
+ global_engine = block.get("engines")
+
+ if global_engine:
+ render_frame = global_engine.get("Render/3D/0", {}).get("busy")
+ video_frame = global_engine.get("Video/0", {}).get("busy")
+
+ if render_frame is not None:
+ render["global"].append(float(render_frame))
+
+ if video_frame is not None:
+ video["global"].append(float(video_frame))
+
+ clients = block.get("clients", {})
+
+ if clients and len(clients):
+ for client_block in clients.values():
+ key = client_block["pid"]
+
+ if render.get(key) is None:
+ render[key] = []
+ video[key] = []
+
+ client_engine = client_block.get("engine-classes", {})
+
+ render_frame = client_engine.get("Render/3D", {}).get("busy")
+ video_frame = client_engine.get("Video", {}).get("busy")
+
+ if render_frame is not None:
+ render[key].append(float(render_frame))
+
+ if video_frame is not None:
+ video[key].append(float(video_frame))
+
+ if render["global"] and video["global"]:
+ results["gpu"] = (
+ f"{round(((sum(render['global']) / len(render['global'])) + (sum(video['global']) / len(video['global']))) / 2, 2)}%"
+ )
+ results["mem"] = "-%"
+
+ if len(render.keys()) > 1:
+ results["clients"] = {}
+
+ for key in render.keys():
+ if key == "global" or not render[key] or not video[key]:
+ continue
+
+ results["clients"][key] = (
+ f"{round(((sum(render[key]) / len(render[key])) + (sum(video[key]) / len(video[key]))) / 2, 2)}%"
+ )
+
+ return results
+
+
+def get_openvino_npu_stats() -> Optional[dict[str, str]]:
+ """Get NPU stats using openvino."""
+ NPU_RUNTIME_PATH = "/sys/devices/pci0000:00/0000:00:0b.0/power/runtime_active_time"
+
+ try:
+ with open(NPU_RUNTIME_PATH, "r") as f:
+ initial_runtime = float(f.read().strip())
+
+ initial_time = time.time()
+
+ # Sleep for 1 second to get an accurate reading
+ time.sleep(1.0)
+
+ # Read runtime value again
+ with open(NPU_RUNTIME_PATH, "r") as f:
+ current_runtime = float(f.read().strip())
+
+ current_time = time.time()
+
+ # Calculate usage percentage
+ runtime_diff = current_runtime - initial_runtime
+ time_diff = (current_time - initial_time) * 1000.0 # Convert to milliseconds
+
+ if time_diff > 0:
+ usage = min(100.0, max(0.0, (runtime_diff / time_diff * 100.0)))
+ else:
+ usage = 0.0
+
+ return {"npu": f"{round(usage, 2)}", "mem": "-%"}
+ except (FileNotFoundError, PermissionError, ValueError):
+ return None
+
+
+def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]:
+ """Get GPU stats using rk."""
+ try:
+ with open("/sys/kernel/debug/rkrga/load", "r") as f:
+ content = f.read()
+ except FileNotFoundError:
+ return None
+
+ load_values = []
+ for line in content.splitlines():
+ match = re.search(r"load = (\d+)%", line)
+ if match:
+ load_values.append(int(match.group(1)))
+
+ if not load_values:
+ return None
+
+ average_load = f"{round(sum(load_values) / len(load_values), 2)}%"
+ stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"}
+
+ try:
+ with open("/sys/class/thermal/thermal_zone5/temp", "r") as f:
+ line = f.readline().strip()
+ stats["temp"] = round(int(line) / 1000, 1)
+ except (FileNotFoundError, OSError, ValueError):
+ pass
+
+ return stats
+
+
+def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]:
+ """Get NPU stats using rk."""
+ try:
+ with open("/sys/kernel/debug/rknpu/load", "r") as f:
+ npu_output = f.read()
+
+ if "Core0:" in npu_output:
+ # multi core NPU
+ core_loads = re.findall(r"Core\d+:\s*(\d+)%", npu_output)
+ else:
+ # single core NPU
+ core_loads = re.findall(r"NPU load:\s+(\d+)%", npu_output)
+ except FileNotFoundError:
+ core_loads = None
+
+ if not core_loads:
+ return None
+
+ percentages = [int(load) for load in core_loads]
+ mean = round(sum(percentages) / len(percentages), 2)
+ stats: dict[str, float | str] = {"npu": mean, "mem": "-%"}
+
+ try:
+ with open("/sys/class/thermal/thermal_zone6/temp", "r") as f:
+ line = f.readline().strip()
+ stats["temp"] = round(int(line) / 1000, 1)
+ except (FileNotFoundError, OSError, ValueError):
+ pass
+
+ return stats
+
+
+def try_get_info(f, h, default="N/A", sensor=None):
+ try:
+ if h:
+ if sensor is not None:
+ v = f(h, sensor)
+ else:
+ v = f(h)
+ else:
+ v = f()
+ except nvml.NVMLError_NotSupported:
+ v = default
+ return v
+
+
+def get_nvidia_gpu_stats() -> dict[int, dict]:
+ names: dict[str, int] = {}
+ results = {}
+ try:
+ nvml.nvmlInit()
+ deviceCount = nvml.nvmlDeviceGetCount()
+ for i in range(deviceCount):
+ handle = nvml.nvmlDeviceGetHandleByIndex(i)
+ gpu_name = nvml.nvmlDeviceGetName(handle)
+
+ # handle case where user has multiple of same GPU
+ if gpu_name in names:
+ names[gpu_name] += 1
+ gpu_name += f" ({names.get(gpu_name)})"
+ else:
+ names[gpu_name] = 1
+
+ meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
+ util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
+ enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
+ dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle)
+ temp = try_get_info(
+ nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0
+ )
+ pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None)
+
+ if util != "N/A":
+ gpu_util = util.gpu
+ else:
+ gpu_util = 0
+
+ if meminfo != "N/A":
+ gpu_mem_util = meminfo.used / meminfo.total * 100
+ else:
+ gpu_mem_util = -1
+
+ if temp != "N/A" and temp is not None:
+ temp = float(temp)
+ else:
+ temp = None
+
+ if enc != "N/A":
+ enc_util = enc[0]
+ else:
+ enc_util = -1
+
+ if dec != "N/A":
+ dec_util = dec[0]
+ else:
+ dec_util = -1
+
+ results[i] = {
+ "name": gpu_name,
+ "gpu": gpu_util,
+ "mem": gpu_mem_util,
+ "enc": enc_util,
+ "dec": dec_util,
+ "pstate": pstate or "unknown",
+ "temp": temp,
+ }
+ except Exception:
+ pass
+ finally:
+ return results
+
+
+def get_jetson_stats() -> Optional[dict[int, dict]]:
+ results = {}
+
+ try:
+ results["mem"] = "-" # no discrete gpu memory
+
+ if os.path.exists("/sys/devices/gpu.0/load"):
+ with open("/sys/devices/gpu.0/load", "r") as f:
+ gpuload = float(f.readline()) / 10
+ results["gpu"] = f"{gpuload}%"
+ elif os.path.exists("/sys/devices/platform/gpu.0/load"):
+ with open("/sys/devices/platform/gpu.0/load", "r") as f:
+ gpuload = float(f.readline()) / 10
+ results["gpu"] = f"{gpuload}%"
+ else:
+ results["gpu"] = "-"
+ except Exception:
+ return None
+
+ return results
+
+
+def get_hailo_temps() -> dict[str, float]:
+ """Get temperatures for Hailo devices."""
+ try:
+ from hailo_platform import Device
+ except ModuleNotFoundError:
+ return {}
+
+ temps = {}
+
+ try:
+ device_ids = Device.scan()
+ for i, device_id in enumerate(device_ids):
+ try:
+ with Device(device_id) as device:
+ temp_info = device.control.get_chip_temperature()
+
+ # Get board name and normalise it
+ identity = device.control.identify()
+ board_name = None
+ for line in str(identity).split("\n"):
+ if line.startswith("Board Name:"):
+ board_name = (
+ line.split(":", 1)[1].strip().lower().replace("-", "")
+ )
+ break
+
+ if not board_name:
+ board_name = f"hailo{i}"
+
+ # Use indexed name if multiple devices, otherwise just the board name
+ device_name = (
+ f"{board_name}-{i}" if len(device_ids) > 1 else board_name
+ )
+
+ # ts1_temperature is also available, but appeared to be the same as ts0 in testing.
+ temps[device_name] = round(temp_info.ts0_temperature, 1)
+ except Exception as e:
+ logger.debug(
+ f"Failed to get temperature for Hailo device {device_id}: {e}"
+ )
+ continue
+ except Exception as e:
+ logger.debug(f"Failed to scan for Hailo devices: {e}")
+
+ return temps
+
+
+def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess:
+ """Run ffprobe on stream."""
+ clean_path = escape_special_characters(path)
+
+ # Base entries that are always included
+ stream_entries = "codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate"
+
+ # Additional detailed entries
+ if detailed:
+ stream_entries += ",codec_name,profile,level,pix_fmt,channels,sample_rate,channel_layout,r_frame_rate"
+ format_entries = "format_name,size,bit_rate,duration"
+ else:
+ format_entries = None
+
+ ffprobe_cmd = [
+ ffmpeg.ffprobe_path,
+ "-timeout",
+ "1000000",
+ "-print_format",
+ "json",
+ "-show_entries",
+ f"stream={stream_entries}",
+ ]
+
+ # Add format entries for detailed mode
+ if detailed and format_entries:
+ ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
+
+ ffprobe_cmd.extend(["-loglevel", "error", clean_path])
+
+ return sp.run(ffprobe_cmd, capture_output=True)
+
+
+def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
+ """Run vainfo."""
+ if not device_name:
+ cmd = ["vainfo"]
+ else:
+ if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"):
+ device_path = device_name
+ else:
+ device_path = f"/dev/dri/{device_name}"
+
+ cmd = ["vainfo", "--display", "drm", "--device", device_path]
+
+ return sp.run(cmd, capture_output=True)
+
+
+def get_nvidia_driver_info() -> dict[str, Any]:
+ """Get general hardware info for nvidia GPU."""
+ results = {}
+ try:
+ nvml.nvmlInit()
+ deviceCount = nvml.nvmlDeviceGetCount()
+ for i in range(deviceCount):
+ handle = nvml.nvmlDeviceGetHandleByIndex(i)
+ driver = try_get_info(nvml.nvmlSystemGetDriverVersion, None, default=None)
+ cuda_compute = try_get_info(
+ nvml.nvmlDeviceGetCudaComputeCapability, handle, default=None
+ )
+ vbios = try_get_info(nvml.nvmlDeviceGetVbiosVersion, handle, default=None)
+ results[i] = {
+ "name": nvml.nvmlDeviceGetName(handle),
+ "driver": driver or "unknown",
+ "cuda_compute": cuda_compute or "unknown",
+ "vbios": vbios or "unknown",
+ }
+ except Exception:
+ pass
+ finally:
+ return results
+
+
+def auto_detect_hwaccel() -> str:
+ """Detect hwaccel args by default."""
+ try:
+ cuda = False
+ vaapi = False
+ resp = requests.get("http://127.0.0.1:1984/api/ffmpeg/hardware", timeout=3)
+
+ if resp.status_code == 200:
+ data: dict[str, list[dict[str, str]]] = resp.json()
+ for source in data.get("sources", []):
+ if "cuda" in source.get("url", "") and source.get("name") == "OK":
+ cuda = True
+
+ if "vaapi" in source.get("url", "") and source.get("name") == "OK":
+ vaapi = True
+ except requests.RequestException:
+ pass
+
+ if cuda:
+ logger.info("Automatically detected nvidia hwaccel for video decoding")
+ return FFMPEG_HWACCEL_NVIDIA
+
+ if vaapi:
+ logger.info("Automatically detected vaapi hwaccel for video decoding")
+ return FFMPEG_HWACCEL_VAAPI
+
+ logger.warning(
+ "Did not detect hwaccel, using a GPU for accelerated video decoding is highly recommended"
+ )
+ return ""
+
+
+async def get_video_properties(
+ ffmpeg, url: str, get_duration: bool = False
+) -> dict[str, Any]:
+ async def probe_with_ffprobe(
+ url: str,
+ ) -> tuple[bool, int, int, Optional[str], float]:
+ """Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
+ cmd = [
+ ffmpeg.ffprobe_path,
+ "-v",
+ "quiet",
+ "-print_format",
+ "json",
+ "-show_format",
+ "-show_streams",
+ url,
+ ]
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
+ )
+ stdout, _ = await proc.communicate()
+ if proc.returncode != 0:
+ return False, 0, 0, None, -1
+
+ data = json.loads(stdout.decode())
+ video_streams = [
+ s for s in data.get("streams", []) if s.get("codec_type") == "video"
+ ]
+ if not video_streams:
+ return False, 0, 0, None, -1
+
+ v = video_streams[0]
+ width = int(v.get("width", 0))
+ height = int(v.get("height", 0))
+ codec = v.get("codec_name")
+
+ duration_str = data.get("format", {}).get("duration")
+ duration = float(duration_str) if duration_str else -1.0
+
+ return True, width, height, codec, duration
+ except (json.JSONDecodeError, ValueError, KeyError, sp.SubprocessError):
+ return False, 0, 0, None, -1
+
+ def probe_with_cv2(url: str) -> tuple[bool, int, int, Optional[str], float]:
+ """Primary attempt using cv2: returns (valid, width, height, fourcc, duration)."""
+ cap = cv2.VideoCapture(url)
+ if not cap.isOpened():
+ cap.release()
+ return False, 0, 0, None, -1
+
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ valid = width > 0 and height > 0
+ fourcc = None
+ duration = -1.0
+
+ if valid:
+ fourcc_int = int(cap.get(cv2.CAP_PROP_FOURCC))
+ fourcc = fourcc_int.to_bytes(4, "little").decode("latin-1").strip()
+
+ if get_duration:
+ fps = cap.get(cv2.CAP_PROP_FPS)
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ if fps > 0 and total_frames > 0:
+ duration = total_frames / fps
+
+ cap.release()
+ return valid, width, height, fourcc, duration
+
+ # try cv2 first
+ has_video, width, height, fourcc, duration = probe_with_cv2(url)
+
+ # fallback to ffprobe if needed
+ if not has_video or (get_duration and duration < 0):
+ has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
+
+ result: dict[str, Any] = {"has_valid_video": has_video}
+ if has_video:
+ result.update({"width": width, "height": height})
+ if fourcc:
+ result["fourcc"] = fourcc
+ result["codec_name"] = fourcc
+ if get_duration:
+ result["duration"] = duration
+
+ return result
+
+
+def process_logs(
+ contents: str,
+ service: Optional[str] = None,
+ start: Optional[int] = None,
+ end: Optional[int] = None,
+) -> Tuple[int, List[str]]:
+ log_lines = []
+ last_message = None
+ last_timestamp = None
+ repeat_count = 0
+
+ for raw_line in contents.splitlines():
+ clean_line = raw_line.strip()
+
+ if len(clean_line) < 10:
+ continue
+
+ # Handle cases where S6 does not include date in log line
+ if " " not in clean_line:
+ clean_line = f"{datetime.now()} {clean_line}"
+
+ try:
+ # Find the position of the first double space to extract timestamp and message
+ date_end = clean_line.index(" ")
+ timestamp = clean_line[:date_end]
+ full_message = clean_line[date_end:].strip()
+
+ # For frigate, remove the date part from message comparison
+ if service == "frigate":
+ # Skip the date at the start of the message if it exists
+ date_parts = full_message.split("]", 1)
+ if len(date_parts) > 1:
+ message_part = date_parts[1].strip()
+ else:
+ message_part = full_message
+ else:
+ message_part = full_message
+
+ if message_part == last_message:
+ repeat_count += 1
+ continue
+ else:
+ if repeat_count > 0:
+ # Insert a deduplication message formatted the same way as logs
+ dedup_message = f"{last_timestamp} [LOGGING] Last message repeated {repeat_count} times"
+ log_lines.append(dedup_message)
+ repeat_count = 0
+
+ log_lines.append(clean_line)
+ last_timestamp = timestamp
+
+ last_message = message_part
+
+ except ValueError:
+ # If we can't parse the line properly, just add it as is
+ log_lines.append(clean_line)
+ continue
+
+ # If there were repeated messages at the end, log the count
+ if repeat_count > 0:
+ dedup_message = (
+ f"{last_timestamp} [LOGGING] Last message repeated {repeat_count} times"
+ )
+ log_lines.append(dedup_message)
+
+ return len(log_lines), log_lines[start:end]
+
+
+def set_file_limit() -> None:
+ # Newer versions of containerd 2.X+ impose a very low soft file limit of 1024
+ # This applies to OSs like HA OS (see https://github.com/home-assistant/operating-system/issues/4110)
+ # Attempt to increase this limit
+ soft_limit = int(os.getenv("SOFT_FILE_LIMIT", "65536") or "65536")
+
+ current_soft, current_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+ logger.debug(f"Current file limits - Soft: {current_soft}, Hard: {current_hard}")
+
+ new_soft = min(soft_limit, current_hard)
+ resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, current_hard))
+ logger.debug(
+ f"File limit set. New soft limit: {new_soft}, Hard limit remains: {current_hard}"
+ )
+
+
+def get_fs_type(path: str) -> str:
+ bestMatch = ""
+ fsType = ""
+ for part in psutil.disk_partitions(all=True):
+ if path.startswith(part.mountpoint) and len(bestMatch) < len(part.mountpoint):
+ fsType = part.fstype
+ bestMatch = part.mountpoint
+ return fsType
+
+
+def calculate_shm_requirements(config) -> dict:
+ try:
+ storage_stats = shutil.disk_usage("/dev/shm")
+ except (FileNotFoundError, OSError):
+ return {}
+
+ total_mb = round(storage_stats.total / pow(2, 20), 1)
+ used_mb = round(storage_stats.used / pow(2, 20), 1)
+ free_mb = round(storage_stats.free / pow(2, 20), 1)
+
+ # required for log files + nginx cache
+ min_req_shm = 40 + 10
+
+ if config.birdseye.restream:
+ min_req_shm += 8
+
+ available_shm = total_mb - min_req_shm
+ cam_total_frame_size = 0.0
+
+ for camera in config.cameras.values():
+ if camera.enabled_in_config and camera.detect.width and camera.detect.height:
+ cam_total_frame_size += round(
+ (camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576,
+ 1,
+ )
+
+ # leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them.
+ cam_total_frame_size += 2 * round(
+ (1280 * 720 * 1.5 + 270480) / 1048576,
+ 1,
+ )
+
+ shm_frame_count = min(
+ int(os.environ.get(SHM_FRAMES_VAR, "50")),
+ int(available_shm / cam_total_frame_size),
+ )
+
+ # minimum required shm recommendation
+ min_shm = round(min_req_shm + cam_total_frame_size * 20)
+
+ return {
+ "total": total_mb,
+ "used": used_mb,
+ "free": free_mb,
+ "mount_type": get_fs_type("/dev/shm"),
+ "available": round(available_shm, 1),
+ "camera_frame_size": cam_total_frame_size,
+ "shm_frame_count": shm_frame_count,
+ "min_shm": min_shm,
+ }
diff --git a/migrations/036_add_recording_variants.py b/migrations/036_add_recording_variants.py
new file mode 100644
index 000000000..e314fb2aa
--- /dev/null
+++ b/migrations/036_add_recording_variants.py
@@ -0,0 +1,38 @@
+"""Peewee migrations -- 036_add_recording_variants.py."""
+
+import peewee as pw
+
+from frigate.models import Recordings
+
+SQL = pw.SQL
+
+
+def migrate(migrator, database, fake=False, **kwargs):
+ existing_columns = {
+ row[1] for row in database.execute_sql('PRAGMA table_info("recordings")').fetchall()
+ }
+
+ fields_to_add = {}
+ if "variant" not in existing_columns:
+ fields_to_add["variant"] = pw.CharField(default="main", max_length=20)
+ if "codec_name" not in existing_columns:
+ fields_to_add["codec_name"] = pw.CharField(null=True, max_length=32)
+ if "width" not in existing_columns:
+ fields_to_add["width"] = pw.IntegerField(null=True)
+ if "height" not in existing_columns:
+ fields_to_add["height"] = pw.IntegerField(null=True)
+ if "bitrate" not in existing_columns:
+ fields_to_add["bitrate"] = pw.IntegerField(null=True)
+
+ if fields_to_add:
+ migrator.add_fields(Recordings, **fields_to_add)
+
+ migrator.sql(
+ 'CREATE INDEX IF NOT EXISTS "recordings_camera_variant_start_time_end_time" ON "recordings" ("camera", "variant", "start_time" DESC, "end_time" DESC)'
+ )
+
+
+def rollback(migrator, database, fake=False, **kwargs):
+ migrator.remove_fields(
+ Recordings, ["variant", "codec_name", "width", "height", "bitrate"]
+ )
diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 000000000..2fa6379a0
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,83 @@
+# Scripts
+
+## Transcode benchmarks
+
+Proof-of-concept benchmarks for **real-time VOD transcoding**: transcode a video file with FFmpeg (optionally with hardware acceleration) and measure time and throughput. Used to de-risk the real-time VOD transcoding feature (segment-level transcode + cache): we need ~10s segments to transcode in well under 10s (ideally <2s) so timeline scrubbing stays responsive.
+
+### Python (recommended)
+
+From the repo root:
+
+```bash
+# Full file, CPU
+python scripts/transcode_benchmark.py path/to/recording.mp4
+
+# First 10 seconds only (simulates one HLS segment)
+python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10
+
+# 10s segment with NVIDIA HW accel
+python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel nvidia
+
+# Simulate scrubbing: start 60s in, transcode 10s (VAAPI)
+python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --seek 60 --hwaccel vaapi
+
+# Intel QSV H.265 (preset-intel-qsv-h265)
+python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel qsv-h265
+
+# Custom FFmpeg binary (e.g. Frigate container)
+python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --ffmpeg /usr/lib/ffmpeg/7/bin/ffmpeg
+```
+
+Options:
+
+- `--duration SEC` – Transcode only this many seconds (default: full file). Use 10 to simulate one HLS segment.
+- `--seek SEC` – Start at this position (fast seek before `-i`). Simulates scrubbing into the file.
+- `--hwaccel cpu|nvidia|vaapi|qsv-h265` – Matches Frigate presets: libx264, h264_nvenc, h264_vaapi, preset-intel-qsv-h265 (hevc_qsv).
+- `--vaapi-device` – VAAPI device (default: `/dev/dri/renderD128`).
+- `--qsv-device` – Intel QSV device: on Linux defaults to `/dev/dri/renderD129` if present (else `renderD128`, else `0`). With two GPUs, the second node is often the Intel iGPU. Override if you get “No VA display found” (e.g. try the other node).
+- `--output PATH` – Write output here (default: temp file, deleted).
+- `--keep-output` – Keep the temp output file.
+
+Output: real time, speed (× realtime), output size. The script suggests whether the speed is good for ~10s segment transcode.
+
+### Shell
+
+Quick one-liners without Python:
+
+```bash
+chmod +x scripts/transcode_benchmark.sh
+
+./scripts/transcode_benchmark.sh path/to/recording.mp4
+./scripts/transcode_benchmark.sh path/to/recording.mp4 10
+./scripts/transcode_benchmark.sh path/to/recording.mp4 10 nvidia
+```
+
+Arguments: `INPUT [DURATION_SEC] [cpu|nvidia|vaapi|qsv-h265]`. Optional env: `FFMPEG`, `FFPROBE`, `VAAPI_DEVICE`, `QSV_DEVICE`.
+
+### Interpreting results
+
+- **Speed ≥ 5× realtime** – A 10s segment transcodes in ~2s or less; good for on-demand segment transcode with cache.
+- **Speed 1–5×** – Marginal; segment may take several seconds; transcode-ahead or caching helps.
+- **Speed < 1×** – Too slow for real-time; consider stronger HW or lower resolution/bitrate.
+
+Run with a real Frigate recording (or any H.264/HEVC MP4) and try both `--duration 10` and full file to see segment vs full transcode cost.
+
+### Troubleshooting `qsv-h265` (“No VA display found”)
+
+Intel QSV (`qsv-h265`) only works on **Intel GPUs** with a working **Intel VA-API** stack. If both `/dev/dri/renderD128` and `renderD129` fail with “No VA display found” or “Device creation failed: -22”, then:
+
+1. **Check which GPUs you have** – With two cards, both may be non-Intel (e.g. NVIDIA + AMD). QSV is Intel-only. Use `lspci -k | grep -A3 VGA` to see adapters and drivers.
+2. **Check VA-API** – Run `vainfo` or `vainfo --display drm --device /dev/dri/renderD128` (then `renderD129`). If it errors or shows no Intel driver, QSV won’t work. On Intel you typically need `intel-media-driver` (newer) or `intel-vaapi-driver` (i965, older).
+3. **Permissions** – Ensure your user is in the `render` (and often `video`) group: `groups`; add with `sudo usermod -aG render $USER` and log in again.
+4. **Use another HW accel** – If you have an **AMD** GPU, use `vaapi` (H.264). If you have **NVIDIA**, use `nvidia`. Otherwise use `cpu`.
+
+5. **Frigate Docker uses QSV but host benchmark fails** – The container has the Intel VA/QSV stack and device access; the host may not. Run the benchmark **inside the same environment** (e.g. inside the Frigate container):
+
+ ```bash
+ # Copy script and a sample recording into the container (adjust container name)
+ docker cp scripts/transcode_benchmark.sh frigate:/tmp/
+ docker cp /path/to/59.24.mp4 frigate:/tmp/
+ docker exec -it frigate bash -c 'chmod +x /tmp/transcode_benchmark.sh && /tmp/transcode_benchmark.sh /tmp/59.24.mp4 10 qsv-h265'
+ ```
+
+ The script auto-detects FFmpeg under `/usr/lib/ffmpeg/*/bin` when `ffmpeg` isn’t on PATH (Frigate container). If it doesn’t, set `FFMPEG` and `FFPROBE` explicitly, e.g. `docker exec ... env FFMPEG=/usr/lib/ffmpeg/7.0/bin/ffmpeg FFPROBE=/usr/lib/ffmpeg/7.0/bin/ffprobe /tmp/transcode_benchmark.sh ...`.
diff --git a/scripts/transcode_benchmark.py b/scripts/transcode_benchmark.py
new file mode 100644
index 000000000..87e6adffc
--- /dev/null
+++ b/scripts/transcode_benchmark.py
@@ -0,0 +1,289 @@
+#!/usr/bin/env python3
+"""
+Proof-of-concept benchmark: transcode a video file with FFmpeg (optionally with
+hardware acceleration) and report timing and throughput.
+
+Used to de-risk real-time VOD transcoding: we need ~10s segments to transcode
+in well under 10s (ideally <2s) so scrubbing stays responsive.
+
+Usage:
+ python scripts/transcode_benchmark.py path/to/video.mp4
+ python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --hwaccel nvidia
+ python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --seek 60 --hwaccel vaapi
+
+Output: real time, speed (x realtime), output size. Aligns with Frigate export/timelapse
+HW presets (preset-nvidia, preset-vaapi, libx264 default).
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+import tempfile
+import time
+from pathlib import Path
+from typing import Optional
+
+
+def get_ffmpeg_command(
+ ffmpeg_path: str,
+ input_path: str,
+ output_path: str,
+ *,
+ duration_sec: Optional[float] = None,
+ seek_sec: float = 0,
+ hwaccel: str = "cpu",
+ gpu_device: str = "/dev/dri/renderD128",
+ qsv_device: str = "0",
+) -> list[str]:
+ """Build argv for FFmpeg transcode (H.264 or HEVC, no audio). Matches Frigate timelapse-style encode."""
+ cmd = [ffmpeg_path, "-hide_banner", "-y", "-loglevel", "warning", "-stats"]
+
+ # Optional seek: -ss before -i for fast seek (keyframe then decode)
+ if seek_sec > 0:
+ cmd.extend(["-ss", str(seek_sec)])
+
+ if hwaccel == "nvidia":
+ cmd.extend(
+ [
+ "-hwaccel",
+ "cuda",
+ "-hwaccel_output_format",
+ "cuda",
+ "-extra_hw_frames",
+ "8",
+ ]
+ )
+ elif hwaccel == "vaapi":
+ cmd.extend(
+ [
+ "-hwaccel",
+ "vaapi",
+ "-hwaccel_device",
+ gpu_device,
+ "-hwaccel_output_format",
+ "vaapi",
+ ]
+ )
+ elif hwaccel == "qsv-h265":
+ # preset-intel-qsv-h265: load_plugin for HEVC decode, QSV device for decode+encode
+ cmd.extend(
+ [
+ "-load_plugin",
+ "hevc_hw",
+ "-hwaccel",
+ "qsv",
+ "-qsv_device",
+ qsv_device,
+ "-hwaccel_output_format",
+ "qsv",
+ ]
+ )
+
+ cmd.extend(["-i", input_path])
+
+ if duration_sec is not None and duration_sec > 0:
+ cmd.extend(["-t", str(duration_sec)])
+
+ cmd.extend(["-an"])
+
+ if hwaccel == "nvidia":
+ cmd.extend(["-c:v", "h264_nvenc"])
+ elif hwaccel == "vaapi":
+ # VAAPI encode needs frames in vaapi format; decoder outputs vaapi when hwaccel_output_format vaapi
+ cmd.extend(["-c:v", "h264_vaapi"])
+ elif hwaccel == "qsv-h265":
+ # Use CQP explicitly; profile/level can be unsupported on some QSV runtimes
+ cmd.extend(["-c:v", "hevc_qsv", "-global_quality", "23"])
+ else:
+ cmd.extend(
+ ["-c:v", "libx264", "-preset:v", "ultrafast", "-tune:v", "zerolatency"]
+ )
+
+ cmd.extend(["-f", "mp4", "-movflags", "+faststart", output_path])
+ return cmd
+
+
+def get_video_duration_sec(ffprobe_path: str, input_path: str) -> Optional[float]:
+ """Return duration in seconds or None on failure."""
+ try:
+ out = subprocess.run(
+ [
+ ffprobe_path,
+ "-v",
+ "error",
+ "-show_entries",
+ "format=duration",
+ "-of",
+ "default=noprint_wrappers=1:nokey=1",
+ input_path,
+ ],
+ capture_output=True,
+ text=True,
+ timeout=10,
+ )
+ if out.returncode == 0 and out.stdout.strip():
+ return float(out.stdout.strip())
+ except (subprocess.TimeoutExpired, ValueError, FileNotFoundError):
+ pass
+ return None
+
+
+def main() -> int:
+ parser = argparse.ArgumentParser(
+ description="Benchmark FFmpeg transcode (H.264) with optional HW accel."
+ )
+ parser.add_argument(
+ "input",
+ type=Path,
+ help="Input video file (e.g. recording segment)",
+ )
+ parser.add_argument(
+ "--duration",
+ type=float,
+ default=None,
+ metavar="SEC",
+ help="Transcode only this many seconds (default: full file). Simulates segment length.",
+ )
+ parser.add_argument(
+ "--seek",
+ type=float,
+ default=0,
+ metavar="SEC",
+ help="Start at this position (before -i for fast seek). Simulates scrubbing into file.",
+ )
+ parser.add_argument(
+ "--hwaccel",
+ choices=("cpu", "nvidia", "vaapi", "qsv-h265"),
+ default="cpu",
+ help="HW accel: cpu (libx264), nvidia (h264_nvenc), vaapi (h264_vaapi), qsv-h265 (preset-intel-qsv-h265, hevc_qsv).",
+ )
+ parser.add_argument(
+ "--vaapi-device",
+ default="/dev/dri/renderD128",
+ help="VAAPI device (default: /dev/dri/renderD128).",
+ )
+ parser.add_argument(
+ "--qsv-device",
+ default=(
+ "/dev/dri/renderD129"
+ if os.path.exists("/dev/dri/renderD129")
+ else "/dev/dri/renderD128"
+ if os.path.exists("/dev/dri/renderD128")
+ else "0"
+ ),
+ help="Intel QSV device: path (e.g. /dev/dri/renderD129 or renderD128 on Linux) or 0 (Windows). With two GPUs, try renderD129 if renderD128 fails. Used for --hwaccel qsv-h265.",
+ )
+ parser.add_argument(
+ "--ffmpeg",
+ default="ffmpeg",
+ metavar="PATH",
+ help="FFmpeg binary (default: ffmpeg in PATH).",
+ )
+ parser.add_argument(
+ "--ffprobe",
+ default="ffprobe",
+ metavar="PATH",
+ help="FFprobe binary (default: ffprobe in PATH).",
+ )
+ parser.add_argument(
+ "--output",
+ type=Path,
+ default=None,
+ help="Output file (default: temp file, deleted after).",
+ )
+ parser.add_argument(
+ "--keep-output",
+ action="store_true",
+ help="Keep output file when using default temp path.",
+ )
+ args = parser.parse_args()
+
+ input_path = args.input.resolve()
+ if not input_path.is_file():
+ print(f"Error: input file not found: {input_path}", file=sys.stderr)
+ return 1
+
+ effective_duration = args.duration
+ if effective_duration is None:
+ duration_from_probe = get_video_duration_sec(str(args.ffprobe), str(input_path))
+ if duration_from_probe is not None:
+ effective_duration = duration_from_probe - args.seek
+ if effective_duration <= 0:
+ print("Error: seek >= file duration", file=sys.stderr)
+ return 1
+ else:
+ print("Warning: could not probe duration; reporting real time only.", file=sys.stderr)
+
+ use_temp = args.output is None
+ if use_temp:
+ fd, out_path = tempfile.mkstemp(suffix=".mp4")
+ os.close(fd)
+ output_path = Path(out_path)
+ else:
+ output_path = args.output.resolve()
+
+ cmd = get_ffmpeg_command(
+ args.ffmpeg,
+ str(input_path),
+ str(output_path),
+ duration_sec=args.duration,
+ seek_sec=args.seek,
+ hwaccel=args.hwaccel,
+ gpu_device=args.vaapi_device,
+ qsv_device=args.qsv_device,
+ )
+
+ print(f"Input: {input_path}")
+ print(f"Output: {output_path}")
+ print(f"HW: {args.hwaccel}")
+ if args.duration is not None:
+ print(f"Limit: {args.duration}s")
+ if args.seek > 0:
+ print(f"Seek: {args.seek}s")
+ print(f"Run: {' '.join(cmd)}")
+ print()
+
+ start = time.perf_counter()
+ try:
+ subprocess.run(cmd, check=True, timeout=3600)
+ except subprocess.CalledProcessError as e:
+ print(f"FFmpeg failed: {e}", file=sys.stderr)
+ if use_temp and output_path.exists():
+ output_path.unlink()
+ return 1
+ except subprocess.TimeoutExpired:
+ print("FFmpeg timed out.", file=sys.stderr)
+ if use_temp and output_path.exists():
+ output_path.unlink()
+ return 1
+ elapsed = time.perf_counter() - start
+
+ size_bytes = output_path.stat().st_size if output_path.exists() else 0
+
+ print("--- Results ---")
+ print(f"Real time: {elapsed:.2f}s")
+ if effective_duration is not None and effective_duration > 0:
+ speed = effective_duration / elapsed
+ print(f"Video duration: {effective_duration:.2f}s")
+ print(f"Speed: {speed:.2f}x realtime")
+ if args.duration and args.duration <= 15:
+ if speed >= 5:
+ print("(Good for ~10s segment transcode: well under 2s.)")
+ elif speed >= 1:
+ print("(Marginal: segment may take several seconds.)")
+ else:
+ print("(Slow: segment transcode would exceed segment length.)")
+ print(f"Output size: {size_bytes / (1024*1024):.2f} MiB")
+
+ if use_temp:
+ if args.keep_output:
+ print(f"(Output kept: {output_path})")
+ else:
+ output_path.unlink(missing_ok=True)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/transcode_benchmark.sh b/scripts/transcode_benchmark.sh
new file mode 100644
index 000000000..f42349d65
--- /dev/null
+++ b/scripts/transcode_benchmark.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+# Proof-of-concept: run FFmpeg transcode and report real time.
+# Usage:
+# ./scripts/transcode_benchmark.sh path/to/video.mp4
+# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 # first 10 seconds only
+# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 nvidia
+#
+# Optional: DURATION (seconds), HWACCEL (cpu|nvidia|vaapi|qsv-h265). Default: full file, cpu.
+# Requires: ffmpeg, ffprobe. Output: temp file, then deleted. Reports real time and speed.
+
+set -e
+INPUT="${1:?Usage: $0 [duration_sec] [cpu|nvidia|vaapi|qsv-h265]}"
+DURATION="${2:-}"
+HWACCEL="${3:-cpu}"
+# On Linux, QSV needs a DRM render node. With two GPUs, renderD128 is often non-Intel and renderD129 the Intel iGPU; prefer 129 when both exist so QSV finds VA.
+if [[ -z "${QSV_DEVICE:-}" ]]; then
+ if [[ -e /dev/dri/renderD129 ]]; then
+ QSV_DEVICE="/dev/dri/renderD129"
+ elif [[ -e /dev/dri/renderD128 ]]; then
+ QSV_DEVICE="/dev/dri/renderD128"
+ else
+ QSV_DEVICE="0"
+ fi
+fi
+# Frigate container has ffmpeg under /usr/lib/ffmpeg//bin, not on PATH
+if [[ -z "${FFMPEG:-}" ]]; then
+ if command -v ffmpeg &>/dev/null; then
+ FFMPEG="ffmpeg"
+ elif [[ -d /usr/lib/ffmpeg ]] && FFMPEG_CANDIDATE=$(find /usr/lib/ffmpeg -path '*/bin/ffmpeg' -type f 2>/dev/null | head -1); [[ -n "${FFMPEG_CANDIDATE:-}" ]]; then
+ FFMPEG="$FFMPEG_CANDIDATE"
+ else
+ FFMPEG="ffmpeg"
+ fi
+fi
+FFPROBE="${FFPROBE:-$(dirname "$FFMPEG")/ffprobe}"
+if [[ ! -x "$FFPROBE" ]]; then
+ FFPROBE="ffprobe"
+fi
+OUTPUT=$(mktemp -u).mp4
+
+cleanup() { rm -f "$OUTPUT"; }
+trap cleanup EXIT
+
+# Build base decode/input args
+INPUT_ARGS=(-hide_banner -y -loglevel warning -stats -i "$INPUT")
+if [[ -n "$DURATION" && "$DURATION" =~ ^[0-9]+\.?[0-9]*$ ]]; then
+ INPUT_ARGS+=(-t "$DURATION")
+fi
+
+case "$HWACCEL" in
+ nvidia)
+ PRE=( -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 )
+ ENC=(-c:v h264_nvenc)
+ ;;
+ vaapi)
+ PRE=( -hwaccel vaapi -hwaccel_device "${VAAPI_DEVICE:-/dev/dri/renderD128}" -hwaccel_output_format vaapi )
+ ENC=(-c:v h264_vaapi)
+ ;;
+ qsv-h265)
+ PRE=( -load_plugin hevc_hw -hwaccel qsv -qsv_device "$QSV_DEVICE" -hwaccel_output_format qsv )
+ # Use CQP explicitly; -profile:v/-level can be unsupported on some QSV runtimes
+ ENC=(-c:v hevc_qsv -global_quality 23)
+ ;;
+ *)
+ PRE=()
+ ENC=(-c:v libx264 -preset:v ultrafast -tune:v zerolatency)
+ ;;
+esac
+
+echo "Input: $INPUT"
+echo "Output: $OUTPUT (temp)"
+echo "HW: $HWACCEL"
+[[ -n "$DURATION" ]] && echo "Limit: ${DURATION}s"
+# QSV is Intel-only and needs a working Intel VA-API stack; if you see 'No VA display found', see scripts/README.md troubleshooting.
+[[ "$HWACCEL" = "qsv-h265" ]] && echo "QSV device: $QSV_DEVICE"
+echo ""
+
+# Get duration for speed calculation (if not limiting, use full file length)
+if [[ -n "$DURATION" ]]; then
+ DUR_SEC="$DURATION"
+else
+ DUR_SEC=$("${FFPROBE:-ffprobe}" -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "$INPUT" 2>/dev/null || true)
+fi
+
+# Use $SECONDS (bash) so we don't rely on date %N or bc in minimal containers
+START=$SECONDS
+"$FFMPEG" "${PRE[@]}" "${INPUT_ARGS[@]}" -an "${ENC[@]}" -f mp4 -movflags +faststart "$OUTPUT"
+ELAPSED=$((SECONDS - START))
+[[ "$ELAPSED" -eq 0 ]] && ELAPSED=1
+
+SIZE=$(stat -c%s "$OUTPUT" 2>/dev/null || stat -f%z "$OUTPUT" 2>/dev/null || echo 0)
+SIZE_MB=$(awk "BEGIN {printf \"%.2f\", $SIZE/1048576}" 2>/dev/null || echo "$((SIZE / 1048576))")
+
+echo "--- Results ---"
+echo "Real time: ${ELAPSED}s"
+if [[ -n "$DUR_SEC" && "$DUR_SEC" =~ ^[0-9]+\.?[0-9]*$ ]]; then
+ SPEED=$(awk "BEGIN {printf \"%.2f\", $DUR_SEC/$ELAPSED}" 2>/dev/null || echo "?")
+ echo "Duration: ${DUR_SEC}s"
+ echo "Speed: ${SPEED}x realtime"
+fi
+echo "Output size: ${SIZE_MB} MiB"
diff --git a/transcode_proxy/DEV_WORKFLOW.md b/transcode_proxy/DEV_WORKFLOW.md
new file mode 100644
index 000000000..222c2e058
--- /dev/null
+++ b/transcode_proxy/DEV_WORKFLOW.md
@@ -0,0 +1,69 @@
+# Dev workflow: frigate-dev (single image with transcode proxy)
+
+Use **frigate-dev** so your working Docker setup keeps using the stable image. You switch between stable and dev by changing the image in compose and restarting. The transcode proxy runs **inside** the Frigate container; there is no separate proxy image.
+
+## Image names
+
+- **frigate-dev** – Frigate image built from this repo (includes transcode proxy, config + UI for transcode_proxy).
+- Your normal setup keeps using **ghcr.io/blakeblackshear/frigate:stable-tensorrt** (or whatever you use today).
+
+## Start / stop (switch between stable and dev)
+
+You can’t run both stacks at once (same ports). Use one compose file and swap the image.
+
+**Stop everything:**
+```bash
+cd ~/docker-compose # or wherever your compose file is
+docker compose down
+```
+
+**Run dev stack (Frigate with in-container transcode proxy):**
+- In `docker-compose.yml`, set the frigate service to `image: frigate-dev` and publish port 5010 if you use transcode_proxy.
+```bash
+docker compose up -d
+```
+
+**Switch back to stable:**
+- Stop: `docker compose down`
+- In `docker-compose.yml`, set frigate back to `image: ghcr.io/blakeblackshear/frigate:stable-tensorrt`.
+```bash
+docker compose up -d
+```
+
+**Useful commands:**
+- `docker compose down` – stop and remove containers.
+- `docker compose up -d` – start in the background.
+- `docker compose ps` – see what’s running.
+- `docker compose logs -f frigate` – follow Frigate logs.
+
+## Building (Ubuntu server recommended)
+
+Frigate’s image **is not** “just Python” – it has a **compile phase** (nginx, sqlite-vec, etc.). Building is done with Docker and can take a while.
+
+**Where to build:** On the **Ubuntu server** where you run Frigate. That way you get the right architecture and avoid Windows/Linux cross-build issues. Sync the repo from your Windows machine via git (clone or push from Windows to a repo and pull on the server, or copy the repo onto the server).
+
+**On the Ubuntu server:**
+
+1. Clone (or pull) the Frigate repo with this code.
+2. **Build Frigate (TensorRT variant, same as stable-tensorrt):**
+ ```bash
+ cd /path/to/frigate
+ make version
+ make local-trt
+ docker tag frigate:latest-tensorrt frigate-dev
+ ```
+ (`make local-trt` uses buildx; first time may be slow.) The resulting image includes the transcode proxy; no separate proxy image is built.
+
+**If you prefer to build on Windows:** You can use Docker buildx to build for `linux/amd64` and push to a registry, then pull `frigate-dev` on the Ubuntu server. The Frigate build is heavy and may be slower or more fragile on Windows; building on the server is simpler.
+
+## One-time setup on the server
+
+```bash
+# Clone or copy the repo, then:
+cd /path/to/frigate
+make version
+make local-trt
+docker tag frigate:latest-tensorrt frigate-dev
+```
+
+Then in your compose use `image: frigate-dev`, publish port 5010 if you use the transcode proxy, and set `transcode_proxy` in Frigate config as in the main README.
diff --git a/transcode_proxy/README.md b/transcode_proxy/README.md
new file mode 100644
index 000000000..cfef07059
--- /dev/null
+++ b/transcode_proxy/README.md
@@ -0,0 +1,55 @@
+# Frigate VOD Transcode Proxy
+
+Optional proxy that runs **inside the Frigate container** and rewrites VOD HLS playback to an H.264 transport-stream rendition on the fly. Use it when recordings are HEVC (or high bitrate) and you want compatible or lower-bitrate playback.
+
+## How it works
+
+- **Manifest requests** (e.g. `.../master.m3u8` and `.../index-v1.m3u8`): Fetched from upstream and rewritten so the browser sees a proxy-owned H.264 HLS rendition.
+- **Segment requests**: The rewritten media playlist points to proxy-owned `.transcoded.ts` segment URLs. Those requests fetch the upstream source segment, transcode it to H.264 MPEG-TS with FFmpeg, cache it in memory (LRU, configurable size), then serve it.
+- **Init fragments**: The rewritten media playlist removes upstream `#EXT-X-MAP` usage, so the browser no longer depends on upstream fragmented MP4 init files for transcoded playback.
+
+The proxy is an s6-managed service in the same Docker image as Frigate. It binds to port **5010** inside the container and starts after nginx is ready.
+
+## Configuration
+
+Environment variables (optional; defaults work when running in the same container):
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `TRANSCODE_PROXY_UPSTREAM` | `http://127.0.0.1:5000` | Upstream Frigate VOD base URL (nginx internal port when in-container). |
+| `TRANSCODE_PROXY_PATH_PREFIX` | (empty) | If the proxy is mounted at a path (e.g. `/vod-transcoded`), set this so the proxy strips it when forwarding. |
+| `TRANSCODE_PROXY_HOST` | `0.0.0.0` | Bind host. |
+| `TRANSCODE_PROXY_PORT` | `5010` | Bind port. |
+| `TRANSCODE_PROXY_CACHE_MB` | `500` | Max in-memory cache size (MB). |
+| `TRANSCODE_PROXY_FFMPEG` | (system) | FFmpeg binary path; uses Frigate’s FFmpeg when not set. |
+| `TRANSCODE_PROXY_H264_BITRATE` | `128k` | H.264 bitrate for transcoded segments. |
+| `TRANSCODE_PROXY_MAX_WIDTH` | `640` | Max output width for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
+| `TRANSCODE_PROXY_MAX_HEIGHT` | `480` | Max output height for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
+
+## Enabling in Frigate
+
+1. Build Frigate from this repo (e.g. `frigate-dev`) so the image includes the proxy and config/UI support.
+2. Expose the proxy either internally through Frigate nginx (recommended, e.g. `/vod-transcoded`) or by publishing port **5010** for direct access.
+3. In Frigate config (YAML), add:
+ ```yaml
+ transcode_proxy:
+ enabled: true
+ vod_proxy_url: "http://YOUR_FRIGATE_HOST:5010" # same host as Frigate, port 5010
+ ```
+4. Restart Frigate. The UI will use the proxy for recording playback when enabled.
+
+If Frigate is behind a reverse proxy and you expose the transcode service at a path (e.g. `https://frigate.example.com/vod-transcoded`), set `TRANSCODE_PROXY_PATH_PREFIX=/vod-transcoded` in the container environment and use that full URL as `vod_proxy_url`.
+
+## Running (single container)
+
+The proxy runs automatically inside the Frigate container. No separate container or image is needed. For same-origin playback, keep the service internal and route it through Frigate nginx on the normal UI origin.
+
+See **transcode_proxy/DEV_WORKFLOW.md** for building the dev image (e.g. `frigate-dev`) and switching between stable and dev.
+
+## Endpoints
+
+- `GET /vod/.../master.m3u8` – Rewritten HLS master playlist for the transcoded rendition.
+- `GET /vod/.../index*.m3u8` – Rewritten HLS media playlist that points at proxy-owned transcoded transport-stream segments.
+- `GET /vod/.../*.transcoded.ts` – Transcoded H.264 MPEG-TS segments.
+- `GET /cache` – Cache stats (size, entry count).
+- `GET /health` – Health check.
diff --git a/transcode_proxy/__init__.py b/transcode_proxy/__init__.py
new file mode 100644
index 000000000..9b3ddc771
--- /dev/null
+++ b/transcode_proxy/__init__.py
@@ -0,0 +1 @@
+"""Transcode proxy: sits in front of Frigate VOD and transcodes segments on the fly to H.264."""
diff --git a/transcode_proxy/__main__.py b/transcode_proxy/__main__.py
new file mode 100644
index 000000000..063fa9b03
--- /dev/null
+++ b/transcode_proxy/__main__.py
@@ -0,0 +1,5 @@
+"""Run the transcode proxy: python -m transcode_proxy."""
+from transcode_proxy.main import run
+
+if __name__ == "__main__":
+ run()
diff --git a/transcode_proxy/cache.py b/transcode_proxy/cache.py
new file mode 100644
index 000000000..ab76f7ae4
--- /dev/null
+++ b/transcode_proxy/cache.py
@@ -0,0 +1,47 @@
+"""In-memory LRU cache for transcoded segments (byte-size limited)."""
+import logging
+import threading
+from collections import OrderedDict
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+
+class ByteLRUCache:
+ """LRU cache that evicts by total byte size."""
+
+ def __init__(self, max_bytes: int):
+ self._max_bytes = max_bytes
+ self._current_bytes = 0
+ self._order: OrderedDict[str, bytes] = OrderedDict()
+ self._lock = threading.Lock()
+
+ def get(self, key: str) -> Optional[bytes]:
+ with self._lock:
+ data = self._order.pop(key, None)
+ if data is not None:
+ self._order[key] = data # move to end (most recent)
+ return data
+ return None
+
+ def set(self, key: str, value: bytes) -> None:
+ size = len(value)
+ if size > self._max_bytes:
+ logger.warning("Segment larger than cache max (%s bytes), not caching", size)
+ return
+ with self._lock:
+ while self._current_bytes + size > self._max_bytes and self._order:
+ evicted_key = next(iter(self._order))
+ evicted = self._order.pop(evicted_key)
+ self._current_bytes -= len(evicted)
+ logger.debug("Evicted %s from transcode cache", evicted_key)
+ self._order[key] = value
+ self._current_bytes += size
+
+ def size_bytes(self) -> int:
+ with self._lock:
+ return self._current_bytes
+
+ def count(self) -> int:
+ with self._lock:
+ return len(self._order)
diff --git a/transcode_proxy/config.py b/transcode_proxy/config.py
new file mode 100644
index 000000000..8f21ba01b
--- /dev/null
+++ b/transcode_proxy/config.py
@@ -0,0 +1,44 @@
+"""Configuration from environment."""
+import os
+from dataclasses import dataclass, field
+
+
+@dataclass
+class Config:
+ """Proxy configuration."""
+
+ # Upstream Frigate VOD base URL (e.g. http://nginx:80 or http://127.0.0.1:5001)
+ upstream_base: str = field(
+ default_factory=lambda: os.environ.get("TRANSCODE_PROXY_UPSTREAM", "http://127.0.0.1:80")
+ )
+ # Optional path prefix the proxy is mounted at (e.g. /vod-transcoded); strip when forwarding
+ path_prefix: str = field(
+ default_factory=lambda: os.environ.get("TRANSCODE_PROXY_PATH_PREFIX", "").rstrip("/")
+ )
+ # Host/port to bind
+ host: str = field(default_factory=lambda: os.environ.get("TRANSCODE_PROXY_HOST", "0.0.0.0"))
+ port: int = field(
+ default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_PORT", "5010"))
+ )
+ # In-memory cache max size in bytes
+ cache_max_bytes: int = field(
+ default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_CACHE_MB", "500")) * 1024 * 1024
+ )
+ # FFmpeg binary
+ ffmpeg_path: str = field(
+ default_factory=lambda: os.environ.get("TRANSCODE_PROXY_FFMPEG", "ffmpeg")
+ )
+ # H.264 bitrate for transcoded segments
+ h264_bitrate: str = field(
+ default_factory=lambda: os.environ.get("TRANSCODE_PROXY_H264_BITRATE", "128k")
+ )
+ # Max output size for transcoded playback; preserves aspect ratio and will not upscale
+ max_width: int = field(
+ default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_WIDTH", "640"))
+ )
+ max_height: int = field(
+ default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_HEIGHT", "480"))
+ )
+
+
+config = Config()
diff --git a/transcode_proxy/docker-compose.example.yml b/transcode_proxy/docker-compose.example.yml
new file mode 100644
index 000000000..410be2f73
--- /dev/null
+++ b/transcode_proxy/docker-compose.example.yml
@@ -0,0 +1,24 @@
+# Example: Frigate with in-container transcode proxy (single image).
+#
+# 1. Build Frigate from this repo (on Ubuntu recommended):
+# make version && make local-trt && docker tag frigate:latest-tensorrt frigate-dev
+#
+# 2. Use image: frigate-dev and publish port 5010 for the transcode proxy.
+# 3. In Frigate config (config.yml), set:
+# transcode_proxy:
+# enabled: true
+# vod_proxy_url: "http://YOUR_HOST:5010"
+
+services:
+ frigate:
+ container_name: frigate
+ restart: unless-stopped
+ image: frigate-dev
+ # ... your existing frigate config (gpus, shm_size, devices, volumes) ...
+ ports:
+ - "5000:5000" # or 8971:8971 depending on your setup
+ - "5010:5010" # transcode proxy (only needed if transcode_proxy.enabled is true)
+ # Optional: override proxy defaults
+ # environment:
+ # TRANSCODE_PROXY_PORT: "5010"
+ # TRANSCODE_PROXY_CACHE_MB: "500"
diff --git a/transcode_proxy/main.py b/transcode_proxy/main.py
new file mode 100644
index 000000000..97e69ea1b
--- /dev/null
+++ b/transcode_proxy/main.py
@@ -0,0 +1,419 @@
+"""FastAPI app: proxy VOD requests, transcode segments on the fly."""
+import logging
+import re
+from collections.abc import AsyncIterator
+from typing import Optional
+
+import httpx
+from fastapi import FastAPI, Request, Response
+from fastapi.responses import StreamingResponse
+from transcode_proxy.cache import ByteLRUCache
+from transcode_proxy.config import config
+from transcode_proxy.transcode import (
+ TranscodeError,
+ stream_transcode_segment_to_h264_ts,
+)
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+app = FastAPI(title="Frigate VOD Transcode Proxy", version="0.1.0")
+cache = ByteLRUCache(config.cache_max_bytes)
+
+# Segment extensions that the upstream VOD may expose.
+SEGMENT_EXTENSIONS = (".m4s", ".mp4", ".ts")
+FORWARD_HEADERS = ("cookie", "authorization", "referer")
+TRANSCODED_SEGMENT_SUFFIX = ".transcoded.ts"
+H264_CODEC = "avc1.64001f"
+LOCAL_QUERY_KEYS = {"bitrate", "max_width", "max_height"}
+
+
+def _upstream_path(path: str) -> Optional[str]:
+ """Strip path_prefix and only allow VOD paths through to upstream."""
+ p = path.lstrip("/")
+ if config.path_prefix:
+ prefix = config.path_prefix.strip("/")
+ if p.startswith(prefix + "/"):
+ p = p[len(prefix) + 1 :]
+ if p == "vod" or p.startswith("vod/"):
+ return "/" + p
+ if p.startswith("vod-transcoded/"):
+ return "/" + p[len("vod-transcoded/") :]
+ if p == "vod-transcoded":
+ return "/vod"
+ return None
+
+
+def _is_segment(path: str) -> bool:
+ return path.rstrip("/").endswith(TRANSCODED_SEGMENT_SUFFIX) or any(
+ path.rstrip("/").endswith(ext) for ext in SEGMENT_EXTENSIONS
+ )
+
+
+def _is_init_path(path: str) -> bool:
+ return bool(re.search(r"/init.*\.mp4$", path))
+
+
+def _is_master_playlist(path: str) -> bool:
+ return path.endswith("/master.m3u8") or path.endswith("master.m3u8")
+
+
+def _init_upstream_path(segment_path: str) -> Optional[str]:
+ """Infer the matching init fragment for an fMP4 media fragment path."""
+ match = re.search(r"/seg-\d+(?P.*)\.m4s$", segment_path)
+ if not match:
+ return None
+ suffix = match.group("suffix")
+ return re.sub(r"/seg-\d+.*\.m4s$", f"/init{suffix}.mp4", segment_path)
+
+
+async def _fetch_upstream_bytes(
+ client: httpx.AsyncClient, url: str, headers: dict[str, str]
+) -> Optional[bytes]:
+ try:
+ upstream_resp = await client.get(url, headers=headers)
+ upstream_resp.raise_for_status()
+ return upstream_resp.content
+ except Exception as e:
+ logger.warning("Upstream fetch failed %s: %s", url, e)
+ return None
+
+
+async def _fetch_source_init_bytes(
+ client: httpx.AsyncClient,
+ init_path: str,
+ query: str,
+ headers: dict[str, str],
+) -> Optional[bytes]:
+ init_url = f"{config.upstream_base.rstrip('/')}{init_path}"
+ if query:
+ init_url += f"?{query}"
+
+ cache_key = f"source-init:{init_url}"
+ cached = cache.get(cache_key)
+ if cached is not None:
+ return cached
+
+ init_bytes = await _fetch_upstream_bytes(client, init_url, headers)
+ if init_bytes is not None:
+ cache.set(cache_key, init_bytes)
+ return init_bytes
+
+
+async def _stream_source_segment_bytes(
+ source_url: str,
+ headers: dict[str, str],
+ init_bytes: Optional[bytes] = None,
+) -> AsyncIterator[bytes]:
+ if init_bytes is not None:
+ yield init_bytes
+
+ async with httpx.AsyncClient(timeout=60.0) as client:
+ async with client.stream("GET", source_url, headers=headers) as upstream_resp:
+ upstream_resp.raise_for_status()
+ async for chunk in upstream_resp.aiter_bytes():
+ if chunk:
+ yield chunk
+
+
+def _proxy_segment_uri(entry: str) -> str:
+ return f"{entry}{TRANSCODED_SEGMENT_SUFFIX}"
+
+
+def _source_segment_path(path: str) -> str:
+ if path.endswith(TRANSCODED_SEGMENT_SUFFIX):
+ return path[: -len(TRANSCODED_SEGMENT_SUFFIX)]
+ return path
+
+
+def _resolution_for_transcode(
+ width: int, height: int, max_width: int, max_height: int
+) -> tuple[int, int]:
+ if width <= 0 or height <= 0:
+ return (max_width, max_height)
+
+ max_width = max(max_width, 2)
+ max_height = max(max_height, 2)
+ scale = min(max_width / width, max_height / height, 1.0)
+ out_width = max(2, int(width * scale))
+ out_height = max(2, int(height * scale))
+
+ if out_width % 2:
+ out_width -= 1
+ if out_height % 2:
+ out_height -= 1
+
+ return (max(out_width, 2), max(out_height, 2))
+
+
+def _bandwidth_bits(bitrate: str) -> int:
+ match = re.fullmatch(r"(?P\d+(?:\.\d+)?)(?P[kKmMgG]?)", bitrate.strip())
+ if not match:
+ return 2_000_000
+
+ value = float(match.group("value"))
+ suffix = match.group("suffix").upper()
+ multiplier = {
+ "": 1,
+ "K": 1_000,
+ "M": 1_000_000,
+ "G": 1_000_000_000,
+ }[suffix]
+ return int(value * multiplier)
+
+
+def _transcode_request_profile(request: Request) -> tuple[str, int, int, str]:
+ bitrate = request.query_params.get("bitrate", config.h264_bitrate)
+ max_width = int(request.query_params.get("max_width", config.max_width))
+ max_height = int(request.query_params.get("max_height", config.max_height))
+ upstream_query = "&".join(
+ f"{key}={value}"
+ for key, value in request.query_params.multi_items()
+ if key not in LOCAL_QUERY_KEYS
+ )
+ return bitrate, max_width, max_height, upstream_query
+
+
+def _rewrite_master_playlist(
+ upstream_bytes: bytes, bitrate: str, max_width: int, max_height: int
+) -> bytes:
+ playlist = upstream_bytes.decode("utf-8", errors="replace")
+ lines = [line.strip() for line in playlist.splitlines() if line.strip()]
+ child_uri: Optional[str] = None
+ stream_inf_line: Optional[str] = None
+
+ for idx, line in enumerate(lines):
+ if line.startswith("#EXT-X-STREAM-INF:"):
+ stream_inf_line = line
+ for child_line in lines[idx + 1 :]:
+ if child_line and not child_line.startswith("#"):
+ child_uri = child_line
+ break
+ break
+
+ if child_uri is None or stream_inf_line is None:
+ logger.warning("Unable to parse master playlist, returning upstream manifest")
+ return upstream_bytes
+
+ attrs = [
+ f'BANDWIDTH={max(_bandwidth_bits(bitrate), 1)}',
+ f'CODECS="{H264_CODEC}"',
+ ]
+
+ resolution_match = re.search(r"RESOLUTION=(\d+)x(\d+)", stream_inf_line)
+ if resolution_match:
+ width = int(resolution_match.group(1))
+ height = int(resolution_match.group(2))
+ out_width, out_height = _resolution_for_transcode(
+ width, height, max_width, max_height
+ )
+ attrs.insert(1, f"RESOLUTION={out_width}x{out_height}")
+
+ rewritten = [
+ "#EXTM3U",
+ "#EXT-X-STREAM-INF:" + ",".join(attrs),
+ child_uri,
+ "",
+ ]
+ return "\n".join(rewritten).encode()
+
+
+def _rewrite_media_playlist(upstream_bytes: bytes) -> bytes:
+ playlist = upstream_bytes.decode("utf-8", errors="replace")
+ output_lines: list[str] = []
+ segment_index = 0
+
+ for line in playlist.splitlines():
+ stripped = line.strip()
+ if stripped.startswith("#EXT-X-MAP:"):
+ continue
+
+ if stripped.startswith("#EXTINF:") and segment_index > 0:
+ output_lines.append("#EXT-X-DISCONTINUITY")
+
+ if stripped and not stripped.startswith("#"):
+ output_lines.append(_proxy_segment_uri(stripped))
+ segment_index += 1
+ continue
+
+ output_lines.append(line)
+
+ if output_lines and output_lines[-1] != "":
+ output_lines.append("")
+
+ return "\n".join(output_lines).encode()
+
+
+async def _proxy_upstream_response(
+ client: httpx.AsyncClient, url: str, headers: dict[str, str]
+) -> Optional[httpx.Response]:
+ try:
+ upstream_resp = await client.get(url, headers=headers)
+ upstream_resp.raise_for_status()
+ return upstream_resp
+ except Exception as e:
+ logger.warning("Upstream fetch failed %s: %s", url, e)
+ return None
+
+
+async def _transcoded_segment_response(
+ source_url: str,
+ cache_key: str,
+ headers: dict[str, str],
+ init_bytes: Optional[bytes] = None,
+ bitrate: Optional[str] = None,
+ max_width: Optional[int] = None,
+ max_height: Optional[int] = None,
+) -> Response:
+ stream = await stream_transcode_segment_to_h264_ts(
+ _stream_source_segment_bytes(source_url, headers, init_bytes),
+ config.ffmpeg_path,
+ bitrate or config.h264_bitrate,
+ max_width or config.max_width,
+ max_height or config.max_height,
+ )
+
+ try:
+ first_chunk = await stream.first_chunk()
+ except TranscodeError as e:
+ await stream.aclose()
+ logger.warning("Transcode stream failed %s: %s", source_url, e)
+ return Response(status_code=502, content=b"Transcode failed")
+
+ async def body() -> AsyncIterator[bytes]:
+ try:
+ async for chunk in stream.iter_chunks(first_chunk):
+ yield chunk
+ except TranscodeError as e:
+ logger.warning("Transcode stream failed %s: %s", source_url, e)
+ raise
+ else:
+ cache.set(cache_key, stream.output_bytes)
+
+ return StreamingResponse(
+ body(),
+ media_type="video/mp2t",
+ headers={"Cache-Control": "private, max-age=300"},
+ )
+
+
+@app.get("/cache")
+async def cache_info() -> dict:
+ """Return cache size and entry count (for debugging)."""
+ return {
+ "size_bytes": cache.size_bytes(),
+ "size_mb": round(cache.size_bytes() / (1024 * 1024), 2),
+ "entries": cache.count(),
+ }
+
+
+@app.get("/health")
+async def health() -> dict:
+ return {"status": "ok"}
+
+
+@app.get("/{full_path:path}")
+async def vod_proxy(request: Request, full_path: str) -> Response:
+ """Handle /vod/... or /vod-transcoded/... (when path_prefix is set)."""
+ path = "/" + full_path.lstrip("/")
+ upstream_path = _upstream_path(path)
+ if upstream_path is None or not (
+ upstream_path == "/vod" or upstream_path.startswith("/vod/")
+ ):
+ return Response(status_code=404, content=b"Not found")
+ bitrate, max_width, max_height, upstream_query = _transcode_request_profile(request)
+ upstream_url = f"{config.upstream_base.rstrip('/')}{upstream_path}"
+ if upstream_query:
+ upstream_url += f"?{upstream_query}"
+
+ headers = {
+ k: v for k, v in request.headers.items() if k.lower() in FORWARD_HEADERS
+ }
+
+ if upstream_path.endswith(TRANSCODED_SEGMENT_SUFFIX):
+ cache_key = f"{upstream_url}|{bitrate}|{max_width}x{max_height}"
+ cached = cache.get(cache_key)
+ if cached is not None:
+ return Response(
+ content=cached,
+ media_type="video/mp2t",
+ headers={"Cache-Control": "private, max-age=300"},
+ )
+
+ source_path = _source_segment_path(upstream_path)
+ source_url = f"{config.upstream_base.rstrip('/')}{source_path}"
+ if upstream_query:
+ source_url += f"?{upstream_query}"
+
+ init_bytes: Optional[bytes] = None
+ if source_path.endswith(".m4s"):
+ init_path = _init_upstream_path(source_path)
+ if init_path is None:
+ return Response(status_code=502, content=b"Init segment inference failed")
+
+ async with httpx.AsyncClient(timeout=30.0) as client:
+ init_bytes = await _fetch_source_init_bytes(
+ client, init_path, upstream_query, headers
+ )
+
+ if init_bytes is None:
+ return Response(status_code=502, content=b"Init segment fetch failed")
+
+ return await _transcoded_segment_response(
+ source_url=source_url,
+ cache_key=cache_key,
+ headers=headers,
+ init_bytes=init_bytes,
+ bitrate=bitrate,
+ max_width=max_width,
+ max_height=max_height,
+ )
+
+ async with httpx.AsyncClient(timeout=30.0) as client:
+ if _is_master_playlist(upstream_path):
+ upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
+ if upstream_resp is None:
+ return Response(status_code=502, content=b"Upstream fetch failed")
+
+ return Response(
+ content=_rewrite_master_playlist(
+ upstream_resp.content, bitrate, max_width, max_height
+ ),
+ media_type="application/vnd.apple.mpegurl",
+ headers={"Cache-Control": "no-store"},
+ )
+
+ if upstream_path.endswith(".m3u8"):
+ upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
+ if upstream_resp is None:
+ return Response(status_code=502, content=b"Upstream fetch failed")
+
+ return Response(
+ content=_rewrite_media_playlist(upstream_resp.content),
+ media_type="application/vnd.apple.mpegurl",
+ headers={"Cache-Control": "no-store"},
+ )
+
+ upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
+ if upstream_resp is None:
+ return Response(status_code=502, content=b"Upstream fetch failed")
+
+ return Response(
+ content=upstream_resp.content,
+ media_type=upstream_resp.headers.get("content-type", "application/octet-stream"),
+ headers={"Cache-Control": "no-store"},
+ )
+
+
+def run() -> None:
+ import uvicorn
+ uvicorn.run(
+ "transcode_proxy.main:app",
+ host=config.host,
+ port=config.port,
+ log_level="info",
+ )
+
+
+if __name__ == "__main__":
+ run()
diff --git a/transcode_proxy/requirements.txt b/transcode_proxy/requirements.txt
new file mode 100644
index 000000000..849740e1a
--- /dev/null
+++ b/transcode_proxy/requirements.txt
@@ -0,0 +1,5 @@
+# Dependencies for running the transcode proxy standalone (e.g. in a separate container).
+# Frigate's main container may already have these; the proxy can share the same env.
+fastapi>=0.100.0
+uvicorn>=0.22.0
+httpx>=0.24.0
diff --git a/transcode_proxy/transcode.py b/transcode_proxy/transcode.py
new file mode 100644
index 000000000..a72b8f612
--- /dev/null
+++ b/transcode_proxy/transcode.py
@@ -0,0 +1,256 @@
+"""Transcode media segments to H.264 transport stream bytes using FFmpeg."""
+import asyncio
+import logging
+import subprocess
+from collections.abc import AsyncIterable, AsyncIterator
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+
+class TranscodeError(RuntimeError):
+ """Raised when FFmpeg cannot produce a valid transcoded segment."""
+
+
+def _build_scale_filter(max_width: int, max_height: int) -> Optional[str]:
+ if max_width <= 0 or max_height <= 0:
+ return None
+
+ return (
+ f"scale=w={max_width}:h={max_height}:"
+ "force_original_aspect_ratio=decrease:"
+ "force_divisible_by=2"
+ )
+
+
+def _build_ffmpeg_cmd(
+ ffmpeg_path: str,
+ bitrate: str,
+ max_width: int,
+ max_height: int,
+) -> list[str]:
+ cmd = [
+ ffmpeg_path,
+ "-hide_banner",
+ "-loglevel",
+ "error",
+ "-i",
+ "pipe:0",
+ "-an",
+ "-pix_fmt",
+ "yuv420p",
+ "-c:v",
+ "libx264",
+ "-preset",
+ "fast",
+ "-profile:v",
+ "high",
+ "-level:v",
+ "3.1",
+ "-b:v",
+ bitrate,
+ "-maxrate",
+ bitrate,
+ "-bufsize",
+ bitrate,
+ "-muxdelay",
+ "0",
+ "-muxpreload",
+ "0",
+ "-f",
+ "mpegts",
+ "-mpegts_flags",
+ "+initial_discontinuity",
+ "pipe:1",
+ ]
+
+ scale_filter = _build_scale_filter(max_width, max_height)
+ if scale_filter:
+ cmd[7:7] = ["-vf", scale_filter]
+
+ return cmd
+
+
+class H264TSStream:
+ """Manage a streaming FFmpeg transcode process."""
+
+ def __init__(self, process: asyncio.subprocess.Process):
+ self._process = process
+ self._stderr = bytearray()
+ self._output = bytearray()
+ self._input_error: Exception | None = None
+ self._closed = False
+ self._stdin_task: asyncio.Task[None] | None = None
+ self._stderr_task: asyncio.Task[None] | None = None
+
+ @classmethod
+ async def start(
+ cls,
+ source_chunks: AsyncIterable[bytes],
+ ffmpeg_path: str,
+ bitrate: str = "2M",
+ max_width: int = 640,
+ max_height: int = 480,
+ ) -> "H264TSStream":
+ process = await asyncio.create_subprocess_exec(
+ *_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
+ stdin=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stream = cls(process)
+ stream._stdin_task = asyncio.create_task(stream._feed_stdin(source_chunks))
+ stream._stderr_task = asyncio.create_task(stream._drain_stderr())
+ return stream
+
+ async def _feed_stdin(self, source_chunks: AsyncIterable[bytes]) -> None:
+ assert self._process.stdin is not None
+
+ try:
+ async for chunk in source_chunks:
+ if not chunk:
+ continue
+ self._process.stdin.write(chunk)
+ await self._process.stdin.drain()
+ except (BrokenPipeError, ConnectionResetError) as exc:
+ self._input_error = exc
+ except Exception as exc: # pragma: no cover - depends on upstream/network failures
+ self._input_error = exc
+ finally:
+ stdin = self._process.stdin
+ if stdin is not None and not stdin.is_closing():
+ stdin.close()
+ try:
+ await stdin.wait_closed()
+ except Exception:
+ pass
+
+ async def _drain_stderr(self) -> None:
+ assert self._process.stderr is not None
+
+ while True:
+ chunk = await self._process.stderr.read(8192)
+ if not chunk:
+ break
+ self._stderr.extend(chunk)
+
+ async def _read_stdout_chunk(self) -> bytes:
+ assert self._process.stdout is not None
+ chunk = await self._process.stdout.read(65536)
+ if chunk:
+ self._output.extend(chunk)
+ return chunk
+
+ def _error_message(self) -> str:
+ if self._input_error is not None:
+ return f"Source stream failed: {self._input_error}"
+ if self._stderr:
+ return self._stderr.decode(errors="replace")
+ return "unknown FFmpeg error"
+
+ async def _ensure_success(self) -> bytes:
+ if self._stdin_task is not None:
+ await self._stdin_task
+ if self._stderr_task is not None:
+ await self._stderr_task
+
+ returncode = await self._process.wait()
+ if returncode != 0:
+ raise TranscodeError(self._error_message())
+
+ return bytes(self._output)
+
+ async def first_chunk(self) -> bytes:
+ chunk = await self._read_stdout_chunk()
+ if chunk:
+ return chunk
+
+ try:
+ await self._ensure_success()
+ finally:
+ self._closed = True
+
+ raise TranscodeError("FFmpeg produced no output")
+
+ async def iter_chunks(self, first_chunk: bytes) -> AsyncIterator[bytes]:
+ try:
+ yield first_chunk
+ while True:
+ chunk = await self._read_stdout_chunk()
+ if not chunk:
+ break
+ yield chunk
+
+ await self._ensure_success()
+ finally:
+ await self.aclose()
+
+ async def aclose(self) -> None:
+ if self._closed:
+ return
+
+ self._closed = True
+
+ if self._process.returncode is None:
+ self._process.kill()
+ await self._process.wait()
+
+ for task in (self._stdin_task, self._stderr_task):
+ if task is None or task.done():
+ continue
+ task.cancel()
+ try:
+ await task
+ except asyncio.CancelledError:
+ pass
+
+ @property
+ def output_bytes(self) -> bytes:
+ return bytes(self._output)
+
+
+async def stream_transcode_segment_to_h264_ts(
+ source_chunks: AsyncIterable[bytes],
+ ffmpeg_path: str,
+ bitrate: str = "2M",
+ max_width: int = 640,
+ max_height: int = 480,
+) -> H264TSStream:
+ """Start an FFmpeg process that streams H.264 MPEG-TS output."""
+ return await H264TSStream.start(
+ source_chunks,
+ ffmpeg_path,
+ bitrate,
+ max_width,
+ max_height,
+ )
+
+
+def transcode_segment_to_h264_ts(
+ segment_bytes: bytes,
+ ffmpeg_path: str,
+ bitrate: str = "2M",
+ max_width: int = 640,
+ max_height: int = 480,
+) -> Optional[bytes]:
+ """Decode a segment and re-encode it as H.264 MPEG-TS bytes."""
+ try:
+ result = subprocess.run(
+ _build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
+ input=segment_bytes,
+ capture_output=True,
+ timeout=60,
+ )
+ if result.returncode != 0:
+ logger.warning(
+ "FFmpeg transcode failed: %s",
+ result.stderr.decode(errors="replace") if result.stderr else "unknown",
+ )
+ return None
+ return result.stdout
+ except subprocess.TimeoutExpired:
+ logger.warning("FFmpeg transcode timed out")
+ return None
+ except Exception as e:
+ logger.warning("FFmpeg transcode error: %s", e)
+ return None
diff --git a/web/src/components/overlay/ExportDialog.tsx b/web/src/components/overlay/ExportDialog.tsx
index 6912ebf46..8e81426d2 100644
--- a/web/src/components/overlay/ExportDialog.tsx
+++ b/web/src/components/overlay/ExportDialog.tsx
@@ -1,457 +1,469 @@
-import { useCallback, useState } from "react";
-import {
- Dialog,
- DialogContent,
- DialogDescription,
- DialogFooter,
- DialogHeader,
- DialogTitle,
- DialogTrigger,
-} from "../ui/dialog";
-import { Label } from "../ui/label";
-import { RadioGroup, RadioGroupItem } from "../ui/radio-group";
-import { Button } from "../ui/button";
-import { ExportMode } from "@/types/filter";
-import { FaArrowDown } from "react-icons/fa";
-import axios from "axios";
-import { toast } from "sonner";
-import { Input } from "../ui/input";
-import { TimeRange } from "@/types/timeline";
-import useSWR from "swr";
-import {
- Select,
- SelectContent,
- SelectItem,
- SelectSeparator,
- SelectTrigger,
- SelectValue,
-} from "../ui/select";
-import { isDesktop, isMobile } from "react-device-detect";
-import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
-import SaveExportOverlay from "./SaveExportOverlay";
-import { baseUrl } from "@/api/baseUrl";
-import { cn } from "@/lib/utils";
-import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
-import { useTranslation } from "react-i18next";
-import { ExportCase } from "@/types/export";
-import { CustomTimeSelector } from "./CustomTimeSelector";
-
-const EXPORT_OPTIONS = [
- "1",
- "4",
- "8",
- "12",
- "24",
- "timeline",
- "custom",
-] as const;
-type ExportOption = (typeof EXPORT_OPTIONS)[number];
-
-type ExportDialogProps = {
- camera: string;
- latestTime: number;
- currentTime: number;
- range?: TimeRange;
- mode: ExportMode;
- showPreview: boolean;
- setRange: (range: TimeRange | undefined) => void;
- setMode: (mode: ExportMode) => void;
- setShowPreview: (showPreview: boolean) => void;
-};
-export default function ExportDialog({
- camera,
- latestTime,
- currentTime,
- range,
- mode,
- showPreview,
- setRange,
- setMode,
- setShowPreview,
-}: ExportDialogProps) {
- const { t } = useTranslation(["components/dialog"]);
- const [name, setName] = useState("");
- const [selectedCaseId, setSelectedCaseId] = useState(
- undefined,
- );
-
- const onStartExport = useCallback(() => {
- if (!range) {
- toast.error(t("export.toast.error.noVaildTimeSelected"), {
- position: "top-center",
- });
- return;
- }
-
- if (range.before < range.after) {
- toast.error(t("export.toast.error.endTimeMustAfterStartTime"), {
- position: "top-center",
- });
- return;
- }
-
- axios
- .post(
- `export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
- {
- playback: "realtime",
- name,
- export_case_id: selectedCaseId || undefined,
- },
- )
- .then((response) => {
- if (response.status == 200) {
- toast.success(t("export.toast.success"), {
- position: "top-center",
- action: (
-
- {t("export.toast.view")}
-
- ),
- });
- setName("");
- setSelectedCaseId(undefined);
- setRange(undefined);
- setMode("none");
- }
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
- toast.error(
- t("export.toast.error.failed", {
- error: errorMessage,
- }),
- { position: "top-center" },
- );
- });
- }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]);
-
- const handleCancel = useCallback(() => {
- setName("");
- setSelectedCaseId(undefined);
- setMode("none");
- setRange(undefined);
- }, [setMode, setRange]);
-
- const Overlay = isDesktop ? Dialog : Drawer;
- const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
- const Content = isDesktop ? DialogContent : DrawerContent;
-
- return (
- <>
-
- setShowPreview(true)}
- onSave={() => onStartExport()}
- onCancel={handleCancel}
- />
- {
- if (!open) {
- setMode("none");
- }
- }}
- >
- {!isDesktop && (
-
- {
- const now = new Date(latestTime * 1000);
- let start = 0;
- now.setHours(now.getHours() - 1);
- start = now.getTime() / 1000;
- setRange({
- before: latestTime,
- after: start,
- });
- setMode("select");
- }}
- >
-
- {isDesktop && (
-
- {t("menu.export", { ns: "common" })}
-
- )}
-
-
- )}
-
-
-
-
- >
- );
-}
-
-type ExportContentProps = {
- latestTime: number;
- currentTime: number;
- range?: TimeRange;
- name: string;
- selectedCaseId?: string;
- onStartExport: () => void;
- setName: (name: string) => void;
- setSelectedCaseId: (caseId: string | undefined) => void;
- setRange: (range: TimeRange | undefined) => void;
- setMode: (mode: ExportMode) => void;
- onCancel: () => void;
-};
-export function ExportContent({
- latestTime,
- currentTime,
- range,
- name,
- selectedCaseId,
- onStartExport,
- setName,
- setSelectedCaseId,
- setRange,
- setMode,
- onCancel,
-}: ExportContentProps) {
- const { t } = useTranslation(["components/dialog"]);
- const [selectedOption, setSelectedOption] = useState("1");
- const { data: cases } = useSWR("cases");
-
- const onSelectTime = useCallback(
- (option: ExportOption) => {
- setSelectedOption(option);
-
- const now = new Date(latestTime * 1000);
- let start = 0;
- switch (option) {
- case "1":
- now.setHours(now.getHours() - 1);
- start = now.getTime() / 1000;
- break;
- case "4":
- now.setHours(now.getHours() - 4);
- start = now.getTime() / 1000;
- break;
- case "8":
- now.setHours(now.getHours() - 8);
- start = now.getTime() / 1000;
- break;
- case "12":
- now.setHours(now.getHours() - 12);
- start = now.getTime() / 1000;
- break;
- case "24":
- now.setHours(now.getHours() - 24);
- start = now.getTime() / 1000;
- break;
- case "custom":
- start = latestTime - 3600;
- break;
- }
-
- setRange({
- before: latestTime,
- after: start,
- });
- },
- [latestTime, setRange],
- );
-
- return (
-
- {isDesktop && (
- <>
-
- {t("menu.export", { ns: "common" })}
-
-
- >
- )}
-
onSelectTime(value as ExportOption)}
- >
- {EXPORT_OPTIONS.map((opt) => {
- return (
-
-
-
- {isNaN(parseInt(opt))
- ? opt == "timeline"
- ? t("export.time.fromTimeline")
- : t("export.time." + opt)
- : t("export.time.lastHour", {
- count: parseInt(opt),
- })}
-
-
- );
- })}
-
- {selectedOption == "custom" && (
-
- )}
-
setName(e.target.value)}
- />
-
-
- {t("export.case.label", { defaultValue: "Case (optional)" })}
-
-
- setSelectedCaseId(value === "none" ? undefined : value)
- }
- >
-
-
-
-
-
- {t("label.none", { ns: "common" })}
-
- {cases
- ?.sort((a, b) => a.name.localeCompare(b.name))
- .map((caseItem) => (
-
- {caseItem.name}
-
- ))}
-
-
-
- {isDesktop &&
}
-
-
- {t("button.cancel", { ns: "common" })}
-
- {
- if (selectedOption == "timeline") {
- setRange({ before: currentTime + 30, after: currentTime - 30 });
- setMode("timeline");
- } else {
- onStartExport();
- setSelectedOption("1");
- setMode("none");
- }
- }}
- >
- {selectedOption == "timeline"
- ? t("export.select")
- : t("export.export")}
-
-
-
- );
-}
-
-type ExportPreviewDialogProps = {
- camera: string;
- range?: TimeRange;
- showPreview: boolean;
- setShowPreview: (showPreview: boolean) => void;
-};
-
-export function ExportPreviewDialog({
- camera,
- range,
- showPreview,
- setShowPreview,
-}: ExportPreviewDialogProps) {
- const { t } = useTranslation(["components/dialog"]);
- if (!range) {
- return null;
- }
-
- const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`;
-
- return (
-
-
-
- {t("export.fromTimeline.previewExport")}
-
- {t("export.fromTimeline.previewExport")}
-
-
-
-
-
- );
-}
+import { useCallback, useState } from "react";
+import {
+ Dialog,
+ DialogContent,
+ DialogDescription,
+ DialogFooter,
+ DialogHeader,
+ DialogTitle,
+ DialogTrigger,
+} from "../ui/dialog";
+import { Label } from "../ui/label";
+import { RadioGroup, RadioGroupItem } from "../ui/radio-group";
+import { Button } from "../ui/button";
+import { ExportMode } from "@/types/filter";
+import { FaArrowDown } from "react-icons/fa";
+import axios from "axios";
+import { toast } from "sonner";
+import { Input } from "../ui/input";
+import { TimeRange } from "@/types/timeline";
+import useSWR from "swr";
+import {
+ Select,
+ SelectContent,
+ SelectItem,
+ SelectSeparator,
+ SelectTrigger,
+ SelectValue,
+} from "../ui/select";
+import { isDesktop, isMobile } from "react-device-detect";
+import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
+import SaveExportOverlay from "./SaveExportOverlay";
+import { baseUrl } from "@/api/baseUrl";
+import { cn } from "@/lib/utils";
+import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
+import { useTranslation } from "react-i18next";
+import { ExportCase } from "@/types/export";
+import { CustomTimeSelector } from "./CustomTimeSelector";
+import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
+
+const EXPORT_OPTIONS = [
+ "1",
+ "4",
+ "8",
+ "12",
+ "24",
+ "timeline",
+ "custom",
+] as const;
+type ExportOption = (typeof EXPORT_OPTIONS)[number];
+
+type ExportDialogProps = {
+ camera: string;
+ latestTime: number;
+ currentTime: number;
+ range?: TimeRange;
+ mode: ExportMode;
+ showPreview: boolean;
+ setRange: (range: TimeRange | undefined) => void;
+ setMode: (mode: ExportMode) => void;
+ setShowPreview: (showPreview: boolean) => void;
+};
+export default function ExportDialog({
+ camera,
+ latestTime,
+ currentTime,
+ range,
+ mode,
+ showPreview,
+ setRange,
+ setMode,
+ setShowPreview,
+}: ExportDialogProps) {
+ const { t } = useTranslation(["components/dialog"]);
+ const [name, setName] = useState("");
+ const [selectedCaseId, setSelectedCaseId] = useState(
+ undefined,
+ );
+
+ const onStartExport = useCallback(() => {
+ if (!range) {
+ toast.error(t("export.toast.error.noVaildTimeSelected"), {
+ position: "top-center",
+ });
+ return;
+ }
+
+ if (range.before < range.after) {
+ toast.error(t("export.toast.error.endTimeMustAfterStartTime"), {
+ position: "top-center",
+ });
+ return;
+ }
+
+ axios
+ .post(
+ `export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
+ {
+ playback: "realtime",
+ name,
+ export_case_id: selectedCaseId || undefined,
+ },
+ )
+ .then((response) => {
+ if (response.status == 200) {
+ toast.success(t("export.toast.success"), {
+ position: "top-center",
+ action: (
+
+ {t("export.toast.view")}
+
+ ),
+ });
+ setName("");
+ setSelectedCaseId(undefined);
+ setRange(undefined);
+ setMode("none");
+ }
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+ toast.error(
+ t("export.toast.error.failed", {
+ error: errorMessage,
+ }),
+ { position: "top-center" },
+ );
+ });
+ }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]);
+
+ const handleCancel = useCallback(() => {
+ setName("");
+ setSelectedCaseId(undefined);
+ setMode("none");
+ setRange(undefined);
+ }, [setMode, setRange]);
+
+ const Overlay = isDesktop ? Dialog : Drawer;
+ const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
+ const Content = isDesktop ? DialogContent : DrawerContent;
+
+ return (
+ <>
+
+ setShowPreview(true)}
+ onSave={() => onStartExport()}
+ onCancel={handleCancel}
+ />
+ {
+ if (!open) {
+ setMode("none");
+ }
+ }}
+ >
+ {!isDesktop && (
+
+ {
+ const now = new Date(latestTime * 1000);
+ let start = 0;
+ now.setHours(now.getHours() - 1);
+ start = now.getTime() / 1000;
+ setRange({
+ before: latestTime,
+ after: start,
+ });
+ setMode("select");
+ }}
+ >
+
+ {isDesktop && (
+
+ {t("menu.export", { ns: "common" })}
+
+ )}
+
+
+ )}
+
+
+
+
+ >
+ );
+}
+
+type ExportContentProps = {
+ latestTime: number;
+ currentTime: number;
+ range?: TimeRange;
+ name: string;
+ selectedCaseId?: string;
+ onStartExport: () => void;
+ setName: (name: string) => void;
+ setSelectedCaseId: (caseId: string | undefined) => void;
+ setRange: (range: TimeRange | undefined) => void;
+ setMode: (mode: ExportMode) => void;
+ onCancel: () => void;
+};
+export function ExportContent({
+ latestTime,
+ currentTime,
+ range,
+ name,
+ selectedCaseId,
+ onStartExport,
+ setName,
+ setSelectedCaseId,
+ setRange,
+ setMode,
+ onCancel,
+}: ExportContentProps) {
+ const { t } = useTranslation(["components/dialog"]);
+ const [selectedOption, setSelectedOption] = useState("1");
+ const { data: cases } = useSWR("cases");
+
+ const onSelectTime = useCallback(
+ (option: ExportOption) => {
+ setSelectedOption(option);
+
+ const now = new Date(latestTime * 1000);
+ let start = 0;
+ switch (option) {
+ case "1":
+ now.setHours(now.getHours() - 1);
+ start = now.getTime() / 1000;
+ break;
+ case "4":
+ now.setHours(now.getHours() - 4);
+ start = now.getTime() / 1000;
+ break;
+ case "8":
+ now.setHours(now.getHours() - 8);
+ start = now.getTime() / 1000;
+ break;
+ case "12":
+ now.setHours(now.getHours() - 12);
+ start = now.getTime() / 1000;
+ break;
+ case "24":
+ now.setHours(now.getHours() - 24);
+ start = now.getTime() / 1000;
+ break;
+ case "custom":
+ start = latestTime - 3600;
+ break;
+ }
+
+ setRange({
+ before: latestTime,
+ after: start,
+ });
+ },
+ [latestTime, setRange],
+ );
+
+ return (
+
+ {isDesktop && (
+ <>
+
+ {t("menu.export", { ns: "common" })}
+
+
+ >
+ )}
+
onSelectTime(value as ExportOption)}
+ >
+ {EXPORT_OPTIONS.map((opt) => {
+ return (
+
+
+
+ {isNaN(parseInt(opt))
+ ? opt == "timeline"
+ ? t("export.time.fromTimeline")
+ : t("export.time." + opt)
+ : t("export.time.lastHour", {
+ count: parseInt(opt),
+ })}
+
+
+ );
+ })}
+
+ {selectedOption == "custom" && (
+
+ )}
+
setName(e.target.value)}
+ />
+
+
+ {t("export.case.label", { defaultValue: "Case (optional)" })}
+
+
+ setSelectedCaseId(value === "none" ? undefined : value)
+ }
+ >
+
+
+
+
+
+ {t("label.none", { ns: "common" })}
+
+ {cases
+ ?.sort((a, b) => a.name.localeCompare(b.name))
+ .map((caseItem) => (
+
+ {caseItem.name}
+
+ ))}
+
+
+
+ {isDesktop &&
}
+
+
+ {t("button.cancel", { ns: "common" })}
+
+ {
+ if (selectedOption == "timeline") {
+ setRange({ before: currentTime + 30, after: currentTime - 30 });
+ setMode("timeline");
+ } else {
+ onStartExport();
+ setSelectedOption("1");
+ setMode("none");
+ }
+ }}
+ >
+ {selectedOption == "timeline"
+ ? t("export.select")
+ : t("export.export")}
+
+
+
+ );
+}
+
+type ExportPreviewDialogProps = {
+ camera: string;
+ range?: TimeRange;
+ showPreview: boolean;
+ setShowPreview: (showPreview: boolean) => void;
+};
+
+export function ExportPreviewDialog({
+ camera,
+ range,
+ showPreview,
+ setShowPreview,
+}: ExportPreviewDialogProps) {
+ const { t } = useTranslation(["components/dialog"]);
+ const vodPath = range
+ ? `/vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`
+ : `/vod/${camera}/start/0/end/0/index.m3u8`;
+ const playbackSource = useRecordingPlaybackSource({
+ camera,
+ after: range?.after ?? 0,
+ before: range?.before ?? 0,
+ vodPath,
+ enabled: !!range,
+ });
+
+ if (!range) {
+ return null;
+ }
+
+ const source = playbackSource ?? `${baseUrl}${vodPath}`;
+
+ return (
+
+
+
+ {t("export.fromTimeline.previewExport")}
+
+ {t("export.fromTimeline.previewExport")}
+
+
+
+
+
+ );
+}
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index 1c58add7c..683eecb74 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -1,1878 +1,1887 @@
-import { isDesktop, isIOS, isMobile, isSafari } from "react-device-detect";
-import { SearchResult } from "@/types/search";
-import useSWR from "swr";
-import { FrigateConfig } from "@/types/frigateConfig";
-import { useFormattedTimestamp } from "@/hooks/use-date-utils";
-import { getIconForLabel } from "@/utils/iconUtil";
-import { useApiHost } from "@/api";
-import { Button } from "../../ui/button";
-import {
- useCallback,
- useEffect,
- useLayoutEffect,
- useMemo,
- useRef,
- useState,
-} from "react";
-import axios from "axios";
-import { toast } from "sonner";
-import { Textarea } from "../../ui/textarea";
-import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
-import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
-import useOptimisticState from "@/hooks/use-optimistic-state";
-import {
- Dialog,
- DialogContent,
- DialogDescription,
- DialogHeader,
- DialogTitle,
-} from "@/components/ui/dialog";
-import { Event } from "@/types/event";
-import { baseUrl } from "@/api/baseUrl";
-import { cn } from "@/lib/utils";
-import ActivityIndicator from "@/components/indicators/activity-indicator";
-import {
- FaArrowRight,
- FaCheckCircle,
- FaChevronLeft,
- FaChevronRight,
- FaMicrophone,
- FaCheck,
- FaTimes,
-} from "react-icons/fa";
-import { TrackingDetails } from "./TrackingDetails";
-import { AnnotationSettingsPane } from "./AnnotationSettingsPane";
-import { DetailStreamProvider } from "@/context/detail-stream-context";
-import {
- MobilePage,
- MobilePageContent,
- MobilePageDescription,
- MobilePageHeader,
- MobilePageTitle,
-} from "@/components/mobile/MobilePage";
-import {
- Tooltip,
- TooltipContent,
- TooltipTrigger,
-} from "@/components/ui/tooltip";
-import { REVIEW_PADDING } from "@/types/review";
-import { capitalizeAll } from "@/utils/stringUtil";
-import useGlobalMutation from "@/hooks/use-global-mutate";
-import DetailActionsMenu from "./DetailActionsMenu";
-import {
- DropdownMenu,
- DropdownMenuContent,
- DropdownMenuItem,
- DropdownMenuTrigger,
-} from "@/components/ui/dropdown-menu";
-import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
-import useImageLoaded from "@/hooks/use-image-loaded";
-import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
-import { GenericVideoPlayer } from "@/components/player/GenericVideoPlayer";
-import {
- Popover,
- PopoverContent,
- PopoverTrigger,
-} from "@/components/ui/popover";
-import {
- Drawer,
- DrawerContent,
- DrawerTitle,
- DrawerTrigger,
-} from "@/components/ui/drawer";
-import { LuInfo } from "react-icons/lu";
-import { TooltipPortal } from "@radix-ui/react-tooltip";
-import { FaPencilAlt } from "react-icons/fa";
-import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog";
-import AttributeSelectDialog from "@/components/overlay/dialog/AttributeSelectDialog";
-import { Trans, useTranslation } from "react-i18next";
-import { useIsAdmin } from "@/hooks/use-is-admin";
-import { getTranslatedLabel } from "@/utils/i18n";
-import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
-import { DialogPortal } from "@radix-ui/react-dialog";
-import { useDetailStream } from "@/context/detail-stream-context";
-import { PiSlidersHorizontalBold } from "react-icons/pi";
-import { HiSparkles } from "react-icons/hi";
-import { useAudioTranscriptionProcessState } from "@/api/ws";
-
-const SEARCH_TABS = ["snapshot", "tracking_details"] as const;
-export type SearchTab = (typeof SEARCH_TABS)[number];
-
-type TabsWithActionsProps = {
- search: SearchResult;
- searchTabs: SearchTab[];
- pageToggle: SearchTab;
- setPageToggle: (v: SearchTab) => void;
- config?: FrigateConfig;
- setSearch: (s: SearchResult | undefined) => void;
- setSimilarity?: () => void;
- isPopoverOpen: boolean;
- setIsPopoverOpen: (open: boolean) => void;
- dialogContainer: HTMLDivElement | null;
-};
-
-function TabsWithActions({
- search,
- searchTabs,
- pageToggle,
- setPageToggle,
- config,
- setSearch,
- setSimilarity,
- isPopoverOpen,
- setIsPopoverOpen,
- dialogContainer,
-}: TabsWithActionsProps) {
- const { t } = useTranslation(["views/explore", "views/faceLibrary"]);
-
- useEffect(() => {
- if (pageToggle !== "tracking_details" && isPopoverOpen) {
- setIsPopoverOpen(false);
- }
- }, [pageToggle, isPopoverOpen, setIsPopoverOpen]);
-
- if (!search) return null;
-
- return (
-
-
-
-
{
- if (value) {
- setPageToggle(value);
- }
- }}
- >
- {Object.values(searchTabs).map((item) => (
-
-
- {item === "snapshot"
- ? search?.has_snapshot
- ? t("type.snapshot")
- : t("type.thumbnail")
- : t(`type.${item}`)}
-
-
- ))}
-
-
-
-
-
- {pageToggle === "tracking_details" && (
-
- )}
-
- );
-}
-
-type AnnotationSettingsProps = {
- search: SearchResult;
- open: boolean;
- setIsOpen: (open: boolean) => void;
- container?: HTMLElement | null;
-};
-
-function AnnotationSettings({
- search,
- open,
- setIsOpen,
- container,
-}: AnnotationSettingsProps) {
- const { t } = useTranslation(["views/explore"]);
- const { annotationOffset, setAnnotationOffset } = useDetailStream();
-
- const ignoreNextOpenRef = useRef(false);
-
- useEffect(() => {
- setIsOpen(false);
- ignoreNextOpenRef.current = false;
- }, [search, setIsOpen]);
-
- const handleOpenChange = useCallback(
- (nextOpen: boolean) => {
- if (nextOpen) {
- if (ignoreNextOpenRef.current) {
- ignoreNextOpenRef.current = false;
- return;
- }
- setIsOpen(true);
- } else {
- setIsOpen(false);
- }
- },
- [setIsOpen],
- );
-
- const registerTriggerCloseIntent = useCallback(() => {
- if (open) {
- ignoreNextOpenRef.current = true;
- }
- }, [open]);
-
- const Overlay = isDesktop ? Popover : Drawer;
- const Trigger = isDesktop ? PopoverTrigger : DrawerTrigger;
- const Content = isDesktop ? PopoverContent : DrawerContent;
- const Title = isDesktop ? "div" : DrawerTitle;
- const contentProps = isDesktop
- ? { align: "end" as const, container: container ?? undefined }
- : {};
-
- return (
-
-
-
- {
- if (open && (event.key === "Enter" || event.key === " ")) {
- registerTriggerCloseIntent();
- }
- }}
- >
-
-
-
-
- {t("trackingDetails.adjustAnnotationSettings")}
-
-
-
-
-
-
- );
-}
-
-type DialogContentComponentProps = {
- page: SearchTab;
- search: SearchResult;
- isDesktop: boolean;
- apiHost: string;
- config?: FrigateConfig;
- searchTabs: SearchTab[];
- pageToggle: SearchTab;
- setPageToggle: (v: SearchTab) => void;
- setSearch: (s: SearchResult | undefined) => void;
- setInputFocused: React.Dispatch>;
- setSimilarity?: () => void;
- isPopoverOpen: boolean;
- setIsPopoverOpen: (open: boolean) => void;
- dialogContainer: HTMLDivElement | null;
- setShowNavigationButtons: React.Dispatch>;
-};
-
-function DialogContentComponent({
- page,
- search,
- isDesktop,
- apiHost,
- config,
- searchTabs,
- pageToggle,
- setPageToggle,
- setSearch,
- setInputFocused,
- setSimilarity,
- isPopoverOpen,
- setIsPopoverOpen,
- dialogContainer,
- setShowNavigationButtons,
-}: DialogContentComponentProps) {
- if (page === "tracking_details") {
- return (
-
- ) : undefined
- }
- />
- );
- }
-
- // Snapshot page content
- const snapshotElement = search.has_snapshot ? (
-
- ) : (
-
-
-
- );
-
- if (isDesktop) {
- return (
-
-
- {snapshotElement}
-
-
-
- );
- }
-
- // mobile
- return (
- <>
- {snapshotElement}
-
- >
- );
-}
-
-type SearchDetailDialogProps = {
- search?: SearchResult;
- page: SearchTab;
- setSearch: (search: SearchResult | undefined) => void;
- setSearchPage: (page: SearchTab) => void;
- setSimilarity?: () => void;
- setInputFocused: React.Dispatch>;
- onPrevious?: () => void;
- onNext?: () => void;
-};
-
-export default function SearchDetailDialog({
- search,
- page,
- setSearch,
- setSearchPage,
- setSimilarity,
- setInputFocused,
- onPrevious,
- onNext,
-}: SearchDetailDialogProps) {
- const { t } = useTranslation(["views/explore", "views/faceLibrary"]);
- const { data: config } = useSWR("config", {
- revalidateOnFocus: false,
- });
- const apiHost = useApiHost();
-
- // tabs
-
- const [pageToggle, setPageToggle] = useOptimisticState(
- page,
- setSearchPage,
- 100,
- );
-
- // dialog and mobile page
-
- const [isOpen, setIsOpen] = useState(search != undefined);
- const [isPopoverOpen, setIsPopoverOpen] = useState(false);
- const [showNavigationButtons, setShowNavigationButtons] = useState(false);
- const dialogContentRef = useRef(null);
- const [dialogContainer, setDialogContainer] = useState(
- null,
- );
-
- const handleOpenChange = useCallback(
- (open: boolean) => {
- setIsOpen(open);
- if (!open) {
- setIsPopoverOpen(false);
- // short timeout to allow the mobile page animation
- // to complete before updating the state
- setTimeout(() => {
- setSearch(undefined);
- }, 300);
- }
- },
- [setSearch],
- );
-
- useLayoutEffect(() => {
- setDialogContainer(dialogContentRef.current);
- }, [isOpen, search?.id]);
-
- useEffect(() => {
- if (search) {
- setIsOpen(search != undefined);
- }
- }, [search]);
-
- useEffect(() => {
- if (!isDesktop || !onPrevious || !onNext) {
- setShowNavigationButtons(false);
- return;
- }
-
- setShowNavigationButtons(isOpen);
- }, [isOpen, onNext, onPrevious]);
-
- // show/hide annotation settings is handled inside TabsWithActions
-
- const searchTabs = useMemo(() => {
- if (!config || !search) {
- return [];
- }
-
- const views = [...SEARCH_TABS];
-
- if (!search.has_clip) {
- const index = views.indexOf("tracking_details");
- views.splice(index, 1);
- }
-
- return views;
- }, [config, search]);
-
- useEffect(() => {
- if (searchTabs.length == 0) {
- return;
- }
-
- if (!searchTabs.includes(pageToggle)) {
- setSearchPage("snapshot");
- }
- }, [pageToggle, searchTabs, setSearchPage]);
-
- if (!search) {
- return;
- }
-
- // content
-
- const Overlay = isDesktop ? Dialog : MobilePage;
- const Content = isDesktop ? DialogContent : MobilePageContent;
- const Header = isDesktop ? DialogHeader : MobilePageHeader;
- const Title = isDesktop ? DialogTitle : MobilePageTitle;
- const Description = isDesktop ? DialogDescription : MobilePageDescription;
-
- return (
-
-
- {isDesktop && onPrevious && onNext && showNavigationButtons && (
-
-
-
-
-
- {
- e.stopPropagation();
- onPrevious?.();
- }}
- className="nav-button pointer-events-auto absolute -left-16 rounded-lg border bg-secondary/60 p-2 text-primary-variant shadow-lg backdrop-blur-sm hover:bg-secondary/80 hover:text-primary"
- aria-label={t("searchResult.previousTrackedObject")}
- >
-
-
-
-
- {t("searchResult.previousTrackedObject")}
-
-
-
-
-
- {
- e.stopPropagation();
- onNext?.();
- }}
- className="nav-button pointer-events-auto absolute -right-16 rounded-lg border bg-secondary/60 p-2 text-primary-variant shadow-lg backdrop-blur-sm hover:bg-secondary/80 hover:text-primary"
- aria-label={t("searchResult.nextTrackedObject")}
- >
-
-
-
-
- {t("searchResult.nextTrackedObject")}
-
-
-
-
-
- )}
- {
- if (isPopoverOpen) {
- event.preventDefault();
- }
- }}
- onInteractOutside={(e) => {
- if (isPopoverOpen) {
- e.preventDefault();
- }
- const target = e.target as HTMLElement;
- if (target.closest(".nav-button")) {
- e.preventDefault();
- }
- }}
- >
-
- {t("trackedObjectDetails")}
-
- {t("trackedObjectDetails")}
-
-
-
-
- {!isDesktop && (
-
-
-
- )}
-
-
-
-
-
- );
-}
-
-type ObjectDetailsTabProps = {
- search: SearchResult;
- config?: FrigateConfig;
- setSearch: (search: SearchResult | undefined) => void;
- setInputFocused: React.Dispatch>;
- setShowNavigationButtons?: React.Dispatch>;
-};
-function ObjectDetailsTab({
- search,
- config,
- setSearch,
- setInputFocused,
- setShowNavigationButtons,
-}: ObjectDetailsTabProps) {
- const { t, i18n } = useTranslation([
- "views/explore",
- "views/faceLibrary",
- "components/dialog",
- ]);
-
- const apiHost = useApiHost();
- const hasCustomClassificationModels = useMemo(
- () => Object.keys(config?.classification?.custom ?? {}).length > 0,
- [config],
- );
- const { data: modelAttributes } = useSWR>(
- hasCustomClassificationModels && search
- ? `classification/attributes?object_type=${encodeURIComponent(search.label)}&group_by_model=true`
- : null,
- );
-
- // mutation / revalidation
-
- const mutate = useGlobalMutation();
-
- // Helper to map over SWR cached search results while preserving
- // either paginated format (SearchResult[][]) or flat format (SearchResult[])
- const mapSearchResults = useCallback(
- (
- currentData: SearchResult[][] | SearchResult[] | undefined,
- fn: (event: SearchResult) => SearchResult,
- ) => {
- if (!currentData) return currentData;
- if (Array.isArray(currentData[0])) {
- return (currentData as SearchResult[][]).map((page) => page.map(fn));
- }
- return (currentData as SearchResult[]).map(fn);
- },
- [],
- );
-
- // users
-
- const isAdmin = useIsAdmin();
-
- // data
-
- const [desc, setDesc] = useState(search?.data.description);
- const [isSubLabelDialogOpen, setIsSubLabelDialogOpen] = useState(false);
- const [isLPRDialogOpen, setIsLPRDialogOpen] = useState(false);
- const [isAttributesDialogOpen, setIsAttributesDialogOpen] = useState(false);
- const [isEditingDesc, setIsEditingDesc] = useState(false);
- const originalDescRef = useRef(null);
-
- const handleDescriptionFocus = useCallback(() => {
- setInputFocused(true);
- }, [setInputFocused]);
-
- const handleDescriptionBlur = useCallback(() => {
- setInputFocused(false);
- }, [setInputFocused]);
-
- // we have to make sure the current selected search item stays in sync
- useEffect(() => setDesc(search?.data.description ?? ""), [search]);
-
- useEffect(() => setIsAttributesDialogOpen(false), [search?.id]);
-
- useEffect(() => {
- const anyDialogOpen =
- isSubLabelDialogOpen || isLPRDialogOpen || isAttributesDialogOpen;
- setShowNavigationButtons?.(!anyDialogOpen);
- }, [
- isSubLabelDialogOpen,
- isLPRDialogOpen,
- isAttributesDialogOpen,
- setShowNavigationButtons,
- ]);
-
- const formattedDate = useFormattedTimestamp(
- search?.start_time ?? 0,
- config?.ui.time_format == "24hour"
- ? t("time.formattedTimestampMonthDayYearHourMinute.24hour", {
- ns: "common",
- })
- : t("time.formattedTimestampMonthDayYearHourMinute.12hour", {
- ns: "common",
- }),
- config?.ui.timezone,
- );
-
- const topScore = useMemo(() => {
- if (!search) {
- return 0;
- }
-
- const value = search.data.top_score ?? search.top_score ?? 0;
-
- return Math.round(value * 100);
- }, [search]);
-
- const subLabelScore = useMemo(() => {
- if (!search) {
- return undefined;
- }
-
- if (search.sub_label && search.data?.sub_label_score) {
- return Math.round((search.data?.sub_label_score ?? 0) * 100);
- } else {
- return undefined;
- }
- }, [search]);
-
- const recognizedLicensePlateScore = useMemo(() => {
- if (!search) {
- return undefined;
- }
-
- if (
- search.data.recognized_license_plate &&
- search.data?.recognized_license_plate_score
- ) {
- return Math.round(
- (search.data?.recognized_license_plate_score ?? 0) * 100,
- );
- } else {
- return undefined;
- }
- }, [search]);
-
- const snapScore = useMemo(() => {
- if (!search?.has_snapshot) {
- return undefined;
- }
-
- const value = search.data.score ?? search.score ?? 0;
-
- return Math.floor(value * 100);
- }, [search]);
-
- const averageEstimatedSpeed = useMemo(() => {
- if (!search || !search.data?.average_estimated_speed) {
- return undefined;
- }
-
- if (search.data?.average_estimated_speed != 0) {
- return search.data?.average_estimated_speed.toFixed(1);
- } else {
- return undefined;
- }
- }, [search]);
-
- const velocityAngle = useMemo(() => {
- if (!search || !search.data?.velocity_angle) {
- return undefined;
- }
-
- if (search.data?.velocity_angle != 0) {
- return search.data?.velocity_angle.toFixed(1);
- } else {
- return undefined;
- }
- }, [search]);
-
- // Extract current attribute selections grouped by model
- const selectedAttributesByModel = useMemo(() => {
- if (!search || !modelAttributes) {
- return {};
- }
-
- const dataAny = search.data as Record;
- const selections: Record = {};
-
- // Initialize all models with null
- Object.keys(modelAttributes).forEach((modelName) => {
- selections[modelName] = null;
- });
-
- // Find which attribute is selected for each model
- Object.keys(modelAttributes).forEach((modelName) => {
- const value = dataAny[modelName];
- if (
- typeof value === "string" &&
- modelAttributes[modelName].includes(value)
- ) {
- selections[modelName] = value;
- }
- });
-
- return selections;
- }, [search, modelAttributes]);
-
- // Get flat list of selected attributes for display
- const eventAttributes = useMemo(() => {
- return Object.values(selectedAttributesByModel)
- .filter((attr): attr is string => attr !== null)
- .sort((a, b) => a.localeCompare(b));
- }, [selectedAttributesByModel]);
-
- const isEventsKey = useCallback((key: unknown): boolean => {
- const candidate = Array.isArray(key) ? key[0] : key;
- const EVENTS_KEY_PATTERNS = ["events", "events/search", "events/explore"];
- return (
- typeof candidate === "string" &&
- EVENTS_KEY_PATTERNS.some((p) => candidate.includes(p))
- );
- }, []);
-
- const updateDescription = useCallback(() => {
- if (!search) {
- return;
- }
-
- axios
- .post(`events/${search.id}/description`, { description: desc })
- .then((resp) => {
- if (resp.status == 200) {
- toast.success(t("details.tips.descriptionSaved"), {
- position: "top-center",
- });
- }
- mutate(
- (key) => isEventsKey(key),
- (currentData: SearchResult[][] | SearchResult[] | undefined) =>
- mapSearchResults(currentData, (event) =>
- event.id === search.id
- ? { ...event, data: { ...event.data, description: desc } }
- : event,
- ),
- {
- optimisticData: true,
- rollbackOnError: true,
- revalidate: false,
- },
- );
- setSearch({ ...search, data: { ...search.data, description: desc } });
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
- toast.error(
- t("details.tips.saveDescriptionFailed", {
- errorMessage,
- }),
- {
- position: "top-center",
- },
- );
- setDesc(search.data.description);
- });
- }, [desc, search, mutate, t, mapSearchResults, isEventsKey, setSearch]);
-
- const regenerateDescription = useCallback(
- (source: "snapshot" | "thumbnails") => {
- if (!search) {
- return;
- }
-
- axios
- .put(`events/${search.id}/description/regenerate?source=${source}`)
- .then((resp) => {
- if (resp.status == 200) {
- toast.success(
- t("details.item.toast.success.regenerate", {
- provider: capitalizeAll(
- config?.genai.provider.replaceAll("_", " ") ??
- t("generativeAI"),
- ),
- }),
- {
- position: "top-center",
- duration: 7000,
- },
- );
- }
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
- toast.error(
- t("details.item.toast.error.regenerate", {
- provider: capitalizeAll(
- config?.genai.provider.replaceAll("_", " ") ??
- t("generativeAI"),
- ),
- errorMessage,
- }),
- { position: "top-center" },
- );
- });
- },
- [search, config, t],
- );
-
- const handleSubLabelSave = useCallback(
- (text: string) => {
- if (!search) return;
-
- // set score to 1.0 if we're manually entering a sub label
- const subLabelScore =
- text === "" ? undefined : search.data?.sub_label_score || 1.0;
-
- axios
- .post(`${apiHost}api/events/${search.id}/sub_label`, {
- camera: search.camera,
- subLabel: text,
- subLabelScore: subLabelScore,
- })
- .then((response) => {
- if (response.status === 200) {
- toast.success(t("details.item.toast.success.updatedSublabel"), {
- position: "top-center",
- });
-
- mutate(
- (key) => isEventsKey(key),
- (currentData: SearchResult[][] | SearchResult[] | undefined) =>
- mapSearchResults(currentData, (event) =>
- event.id === search.id
- ? {
- ...event,
- sub_label: text,
- data: {
- ...event.data,
- sub_label_score: subLabelScore,
- },
- }
- : event,
- ),
- {
- optimisticData: true,
- rollbackOnError: true,
- revalidate: false,
- },
- );
-
- setSearch({
- ...search,
- sub_label: text,
- data: {
- ...search.data,
- sub_label_score: subLabelScore,
- },
- });
- setIsSubLabelDialogOpen(false);
- }
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
- toast.error(
- t("details.item.toast.error.updatedSublabelFailed", {
- errorMessage,
- }),
- {
- position: "top-center",
- },
- );
- });
- },
- [search, apiHost, mutate, setSearch, t, mapSearchResults, isEventsKey],
- );
-
- // recognized plate
-
- const handleLPRSave = useCallback(
- (text: string) => {
- if (!search) return;
-
- // set score to 1.0 if we're manually entering a new plate
- const plateScore = text === "" ? undefined : 1.0;
-
- axios
- .post(`${apiHost}api/events/${search.id}/recognized_license_plate`, {
- recognizedLicensePlate: text,
- recognizedLicensePlateScore: plateScore,
- })
- .then((response) => {
- if (response.status === 200) {
- toast.success(t("details.item.toast.success.updatedLPR"), {
- position: "top-center",
- });
-
- mutate(
- (key) => isEventsKey(key),
- (currentData: SearchResult[][] | SearchResult[] | undefined) =>
- mapSearchResults(currentData, (event) =>
- event.id === search.id
- ? {
- ...event,
- data: {
- ...event.data,
- recognized_license_plate: text,
- recognized_license_plate_score: plateScore,
- },
- }
- : event,
- ),
- {
- optimisticData: true,
- rollbackOnError: true,
- revalidate: false,
- },
- );
-
- setSearch({
- ...search,
- data: {
- ...search.data,
- recognized_license_plate: text,
- recognized_license_plate_score: plateScore,
- },
- });
- setIsLPRDialogOpen(false);
- }
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
- toast.error(
- t("details.item.toast.error.updatedLPRFailed", {
- errorMessage,
- }),
- {
- position: "top-center",
- },
- );
- });
- },
- [search, apiHost, mutate, setSearch, t, mapSearchResults, isEventsKey],
- );
-
- const handleAttributesSave = useCallback(
- (selectedAttributes: string[]) => {
- if (!search) return;
-
- axios
- .post(`${apiHost}api/events/${search.id}/attributes`, {
- attributes: selectedAttributes,
- })
- .then((response) => {
- const applied = Array.isArray(response.data?.applied)
- ? (response.data.applied as {
- model?: string;
- label?: string | null;
- score?: number | null;
- }[])
- : [];
-
- toast.success(t("details.item.toast.success.updatedAttributes"), {
- position: "top-center",
- });
-
- const applyUpdatedAttributes = (event: SearchResult) => {
- if (event.id !== search.id) return event;
-
- const updatedData: Record = { ...event.data };
-
- applied.forEach(({ model, label, score }) => {
- if (!model) return;
- updatedData[model] = label ?? null;
- updatedData[`${model}_score`] = score ?? null;
- });
-
- return { ...event, data: updatedData } as SearchResult;
- };
-
- mutate(
- (key) => isEventsKey(key),
- (currentData: SearchResult[][] | SearchResult[] | undefined) =>
- mapSearchResults(currentData, applyUpdatedAttributes),
- {
- optimisticData: true,
- rollbackOnError: true,
- revalidate: false,
- },
- );
-
- setSearch(applyUpdatedAttributes(search));
- setIsAttributesDialogOpen(false);
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
-
- toast.error(
- t("details.item.toast.error.updatedAttributesFailed", {
- errorMessage,
- }),
- {
- position: "top-center",
- },
- );
- });
- },
- [search, apiHost, mutate, t, mapSearchResults, isEventsKey, setSearch],
- );
-
- // speech transcription
-
- const onTranscribe = useCallback(() => {
- axios
- .put(`/audio/transcribe`, { event_id: search.id })
- .then((resp) => {
- if (resp.status == 202) {
- toast.success(t("details.item.toast.success.audioTranscription"), {
- position: "top-center",
- });
- }
- })
- .catch((error) => {
- const errorMessage =
- error.response?.data?.message ||
- error.response?.data?.detail ||
- "Unknown error";
- toast.error(
- t("details.item.toast.error.audioTranscription", {
- errorMessage,
- }),
- {
- position: "top-center",
- },
- );
- });
- }, [search, t]);
-
- // audio transcription processing state
-
- const { payload: audioTranscriptionProcessState } =
- useAudioTranscriptionProcessState();
-
- // frigate+ submission
-
- type SubmissionState = "reviewing" | "uploading" | "submitted";
- const [state, setState] = useState(
- search?.plus_id ? "submitted" : "reviewing",
- );
-
- useEffect(
- () => setState(search?.plus_id ? "submitted" : "reviewing"),
- [search],
- );
-
- const onSubmitToPlus = useCallback(
- async (falsePositive: boolean) => {
- if (!search) {
- return;
- }
-
- falsePositive
- ? axios.put(`events/${search.id}/false_positive`)
- : axios.post(`events/${search.id}/plus`, {
- include_annotation: 1,
- });
-
- setState("submitted");
- setSearch({ ...search, plus_id: "new_upload" });
- mutate(
- (key) => isEventsKey(key),
- (currentData: SearchResult[][] | SearchResult[] | undefined) =>
- mapSearchResults(currentData, (event) =>
- event.id === search.id
- ? { ...event, plus_id: "new_upload" }
- : event,
- ),
- {
- optimisticData: true,
- rollbackOnError: true,
- revalidate: false,
- },
- );
- },
- [search, mutate, mapSearchResults, setSearch, isEventsKey],
- );
-
- const popoverContainerRef = useRef(null);
- const canRegenerate = !!(
- config?.cameras[search.camera].objects.genai.enabled && search.end_time
- );
- const showGenAIPlaceholder = !!(
- config?.cameras[search.camera].objects.genai.enabled &&
- !search.end_time &&
- (config.cameras[search.camera].objects.genai.required_zones.length === 0 ||
- search.zones.some((zone) =>
- config.cameras[search.camera].objects.genai.required_zones.includes(
- zone,
- ),
- )) &&
- (config.cameras[search.camera].objects.genai.objects.length === 0 ||
- config.cameras[search.camera].objects.genai.objects.includes(
- search.label,
- ))
- );
- return (
-
-
-
-
-
-
-
-
-
- {t("details.label")}
-
-
- {getIconForLabel(
- search.label,
- search.data.type,
- "size-4 text-primary",
- )}
- {getTranslatedLabel(search.label, search.data.type)}
- {search.sub_label && ` (${search.sub_label})`}
- {isAdmin && search.end_time && (
-
-
-
- setIsSubLabelDialogOpen(true)}
- />
-
-
-
-
- {t("details.editSubLabel.title")}
-
-
-
- )}
-
-
-
-
-
-
- {t("details.topScore.label")}
-
-
-
-
- Info
-
-
-
- {t("details.topScore.info")}
-
-
-
-
-
- {topScore}%{subLabelScore && ` (${subLabelScore}%)`}
-
-
-
-
-
- {t("details.camera")}
-
-
-
-
-
-
-
-
-
-
- {snapScore != undefined && (
-
-
-
- {t("details.snapshotScore.label")}
-
-
-
{snapScore}%
-
- )}
-
- {averageEstimatedSpeed && (
-
-
- {t("details.estimatedSpeed")}
-
-
-
- {averageEstimatedSpeed}{" "}
- {config?.ui.unit_system == "imperial"
- ? t("unit.speed.mph", { ns: "common" })
- : t("unit.speed.kph", { ns: "common" })}
- {velocityAngle != undefined && (
-
-
-
- )}
-
-
-
- )}
-
-
-
- {t("details.timestamp")}
-
-
{formattedDate}
-
-
-
-
-
- {search?.data.recognized_license_plate && (
-
-
- {t("details.recognizedLicensePlate")}
-
-
-
- {search.data.recognized_license_plate}{" "}
- {recognizedLicensePlateScore &&
- ` (${recognizedLicensePlateScore}%)`}
- {isAdmin && (
-
-
-
- setIsLPRDialogOpen(true)}
- />
-
-
-
-
- {t("details.editLPR.title")}
-
-
-
- )}
-
-
-
- )}
-
- {hasCustomClassificationModels &&
- modelAttributes &&
- Object.keys(modelAttributes).length > 0 && (
-
-
- {t("details.attributes")}
- {isAdmin && (
-
-
-
- setIsAttributesDialogOpen(true)}
- />
-
-
-
-
- {t("button.edit", { ns: "common" })}
-
-
-
- )}
-
-
- {eventAttributes.length > 0
- ? eventAttributes.join(", ")
- : t("label.none", { ns: "common" })}
-
-
- )}
-
-
-
- {isAdmin &&
- search.data.type === "object" &&
- config?.plus?.enabled &&
- search.end_time != undefined &&
- search.has_snapshot && (
-
-
-
- {t("explore.plus.submitToPlus.label", {
- ns: "components/dialog",
- })}
-
-
-
-
- Info
-
-
-
- {t("explore.plus.submitToPlus.desc", {
- ns: "components/dialog",
- })}
-
-
-
-
-
-
- {state == "reviewing" && (
- <>
-
- {i18n.language === "en" ? (
- // English with a/an logic plus label
- <>
- {/^[aeiou]/i.test(search?.label || "") ? (
-
- explore.plus.review.question.ask_an
-
- ) : (
-
- explore.plus.review.question.ask_a
-
- )}
- >
- ) : (
- // For other languages
-
- explore.plus.review.question.ask_full
-
- )}
-
-
- {
- setState("uploading");
- onSubmitToPlus(false);
- }}
- >
- {t("button.yes", { ns: "common" })}
-
- {
- setState("uploading");
- onSubmitToPlus(true);
- }}
- >
- {t("button.no", { ns: "common" })}
-
-
- >
- )}
- {state == "uploading" &&
}
- {state == "submitted" && (
-
-
- {t("explore.plus.review.state.submitted", {
- ns: "components/dialog",
- })}
-
- )}
-
-
- )}
-
-
-
- {t("details.description.label")}
-
-
-
-
- {
- originalDescRef.current = desc ?? "";
- setIsEditingDesc(true);
- }}
- >
-
-
-
-
- {t("button.edit", { ns: "common" })}
-
-
-
- {config?.cameras[search?.camera].audio_transcription.enabled &&
- search?.label == "speech" &&
- search?.end_time &&
- search?.has_clip && (
-
-
-
- {audioTranscriptionProcessState === "processing" ? (
-
- ) : (
-
- )}
-
-
-
- {t("itemMenu.audioTranscription.label")}
-
-
- )}
-
- {canRegenerate && (
-
-
-
-
-
-
-
-
-
-
-
- {t("details.button.regenerate.title")}
-
-
-
- {search.has_snapshot && (
- regenerateDescription("snapshot")}
- >
- {t("details.regenerateFromSnapshot")}
-
- )}
- regenerateDescription("thumbnails")}
- >
- {t("details.regenerateFromThumbnails")}
-
-
-
-
- )}
-
-
-
- {!isEditingDesc ? (
- showGenAIPlaceholder ? (
-
-
-
{t("details.description.aiTips")}
-
- ) : (
-
- {desc || t("label.none", { ns: "common" })}
-
- )
- ) : (
-
- )}
-
-
-
-
-
-
- );
-}
-
-type ObjectSnapshotTabProps = {
- search: Event;
- className?: string;
- onEventUploaded?: () => void;
-};
-export function ObjectSnapshotTab({
- search,
- className,
-}: ObjectSnapshotTabProps) {
- const [imgRef, imgLoaded, onImgLoad] = useImageLoaded();
-
- return (
-
-
-
-
-
-
- {search?.id && (
-
-
{
- onImgLoad();
- }}
- />
-
- )}
-
-
-
-
-
- );
-}
-
-type VideoTabProps = {
- search: SearchResult;
-};
-
-export function VideoTab({ search }: VideoTabProps) {
- const clipTimeRange = useMemo(() => {
- const startTime = search.start_time - REVIEW_PADDING;
- const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
- return `start/${startTime}/end/${endTime}`;
- }, [search]);
-
- const source = `${baseUrl}vod/${search.camera}/${clipTimeRange}/index.m3u8`;
-
- return (
- <>
-
-
- >
- );
-}
+import { isDesktop, isIOS, isMobile, isSafari } from "react-device-detect";
+import { SearchResult } from "@/types/search";
+import useSWR from "swr";
+import { FrigateConfig } from "@/types/frigateConfig";
+import { useFormattedTimestamp } from "@/hooks/use-date-utils";
+import { getIconForLabel } from "@/utils/iconUtil";
+import { useApiHost } from "@/api";
+import { Button } from "../../ui/button";
+import {
+ useCallback,
+ useEffect,
+ useLayoutEffect,
+ useMemo,
+ useRef,
+ useState,
+} from "react";
+import axios from "axios";
+import { toast } from "sonner";
+import { Textarea } from "../../ui/textarea";
+import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
+import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
+import useOptimisticState from "@/hooks/use-optimistic-state";
+import {
+ Dialog,
+ DialogContent,
+ DialogDescription,
+ DialogHeader,
+ DialogTitle,
+} from "@/components/ui/dialog";
+import { Event } from "@/types/event";
+import { baseUrl } from "@/api/baseUrl";
+import { cn } from "@/lib/utils";
+import ActivityIndicator from "@/components/indicators/activity-indicator";
+import {
+ FaArrowRight,
+ FaCheckCircle,
+ FaChevronLeft,
+ FaChevronRight,
+ FaMicrophone,
+ FaCheck,
+ FaTimes,
+} from "react-icons/fa";
+import { TrackingDetails } from "./TrackingDetails";
+import { AnnotationSettingsPane } from "./AnnotationSettingsPane";
+import { DetailStreamProvider } from "@/context/detail-stream-context";
+import {
+ MobilePage,
+ MobilePageContent,
+ MobilePageDescription,
+ MobilePageHeader,
+ MobilePageTitle,
+} from "@/components/mobile/MobilePage";
+import {
+ Tooltip,
+ TooltipContent,
+ TooltipTrigger,
+} from "@/components/ui/tooltip";
+import { REVIEW_PADDING } from "@/types/review";
+import { capitalizeAll } from "@/utils/stringUtil";
+import useGlobalMutation from "@/hooks/use-global-mutate";
+import DetailActionsMenu from "./DetailActionsMenu";
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuTrigger,
+} from "@/components/ui/dropdown-menu";
+import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
+import useImageLoaded from "@/hooks/use-image-loaded";
+import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
+import { GenericVideoPlayer } from "@/components/player/GenericVideoPlayer";
+import {
+ Popover,
+ PopoverContent,
+ PopoverTrigger,
+} from "@/components/ui/popover";
+import {
+ Drawer,
+ DrawerContent,
+ DrawerTitle,
+ DrawerTrigger,
+} from "@/components/ui/drawer";
+import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
+import { LuInfo } from "react-icons/lu";
+import { TooltipPortal } from "@radix-ui/react-tooltip";
+import { FaPencilAlt } from "react-icons/fa";
+import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog";
+import AttributeSelectDialog from "@/components/overlay/dialog/AttributeSelectDialog";
+import { Trans, useTranslation } from "react-i18next";
+import { useIsAdmin } from "@/hooks/use-is-admin";
+import { getTranslatedLabel } from "@/utils/i18n";
+import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
+import { DialogPortal } from "@radix-ui/react-dialog";
+import { useDetailStream } from "@/context/detail-stream-context";
+import { PiSlidersHorizontalBold } from "react-icons/pi";
+import { HiSparkles } from "react-icons/hi";
+import { useAudioTranscriptionProcessState } from "@/api/ws";
+
+const SEARCH_TABS = ["snapshot", "tracking_details"] as const;
+export type SearchTab = (typeof SEARCH_TABS)[number];
+
+type TabsWithActionsProps = {
+ search: SearchResult;
+ searchTabs: SearchTab[];
+ pageToggle: SearchTab;
+ setPageToggle: (v: SearchTab) => void;
+ config?: FrigateConfig;
+ setSearch: (s: SearchResult | undefined) => void;
+ setSimilarity?: () => void;
+ isPopoverOpen: boolean;
+ setIsPopoverOpen: (open: boolean) => void;
+ dialogContainer: HTMLDivElement | null;
+};
+
+function TabsWithActions({
+ search,
+ searchTabs,
+ pageToggle,
+ setPageToggle,
+ config,
+ setSearch,
+ setSimilarity,
+ isPopoverOpen,
+ setIsPopoverOpen,
+ dialogContainer,
+}: TabsWithActionsProps) {
+ const { t } = useTranslation(["views/explore", "views/faceLibrary"]);
+
+ useEffect(() => {
+ if (pageToggle !== "tracking_details" && isPopoverOpen) {
+ setIsPopoverOpen(false);
+ }
+ }, [pageToggle, isPopoverOpen, setIsPopoverOpen]);
+
+ if (!search) return null;
+
+ return (
+
+
+
+
{
+ if (value) {
+ setPageToggle(value);
+ }
+ }}
+ >
+ {Object.values(searchTabs).map((item) => (
+
+
+ {item === "snapshot"
+ ? search?.has_snapshot
+ ? t("type.snapshot")
+ : t("type.thumbnail")
+ : t(`type.${item}`)}
+
+
+ ))}
+
+
+
+
+
+ {pageToggle === "tracking_details" && (
+
+ )}
+
+ );
+}
+
+type AnnotationSettingsProps = {
+ search: SearchResult;
+ open: boolean;
+ setIsOpen: (open: boolean) => void;
+ container?: HTMLElement | null;
+};
+
+function AnnotationSettings({
+ search,
+ open,
+ setIsOpen,
+ container,
+}: AnnotationSettingsProps) {
+ const { t } = useTranslation(["views/explore"]);
+ const { annotationOffset, setAnnotationOffset } = useDetailStream();
+
+ const ignoreNextOpenRef = useRef(false);
+
+ useEffect(() => {
+ setIsOpen(false);
+ ignoreNextOpenRef.current = false;
+ }, [search, setIsOpen]);
+
+ const handleOpenChange = useCallback(
+ (nextOpen: boolean) => {
+ if (nextOpen) {
+ if (ignoreNextOpenRef.current) {
+ ignoreNextOpenRef.current = false;
+ return;
+ }
+ setIsOpen(true);
+ } else {
+ setIsOpen(false);
+ }
+ },
+ [setIsOpen],
+ );
+
+ const registerTriggerCloseIntent = useCallback(() => {
+ if (open) {
+ ignoreNextOpenRef.current = true;
+ }
+ }, [open]);
+
+ const Overlay = isDesktop ? Popover : Drawer;
+ const Trigger = isDesktop ? PopoverTrigger : DrawerTrigger;
+ const Content = isDesktop ? PopoverContent : DrawerContent;
+ const Title = isDesktop ? "div" : DrawerTitle;
+ const contentProps = isDesktop
+ ? { align: "end" as const, container: container ?? undefined }
+ : {};
+
+ return (
+
+
+
+ {
+ if (open && (event.key === "Enter" || event.key === " ")) {
+ registerTriggerCloseIntent();
+ }
+ }}
+ >
+
+
+
+
+ {t("trackingDetails.adjustAnnotationSettings")}
+
+
+
+
+
+
+ );
+}
+
+type DialogContentComponentProps = {
+ page: SearchTab;
+ search: SearchResult;
+ isDesktop: boolean;
+ apiHost: string;
+ config?: FrigateConfig;
+ searchTabs: SearchTab[];
+ pageToggle: SearchTab;
+ setPageToggle: (v: SearchTab) => void;
+ setSearch: (s: SearchResult | undefined) => void;
+ setInputFocused: React.Dispatch>;
+ setSimilarity?: () => void;
+ isPopoverOpen: boolean;
+ setIsPopoverOpen: (open: boolean) => void;
+ dialogContainer: HTMLDivElement | null;
+ setShowNavigationButtons: React.Dispatch>;
+};
+
+function DialogContentComponent({
+ page,
+ search,
+ isDesktop,
+ apiHost,
+ config,
+ searchTabs,
+ pageToggle,
+ setPageToggle,
+ setSearch,
+ setInputFocused,
+ setSimilarity,
+ isPopoverOpen,
+ setIsPopoverOpen,
+ dialogContainer,
+ setShowNavigationButtons,
+}: DialogContentComponentProps) {
+ if (page === "tracking_details") {
+ return (
+
+ ) : undefined
+ }
+ />
+ );
+ }
+
+ // Snapshot page content
+ const snapshotElement = search.has_snapshot ? (
+
+ ) : (
+
+
+
+ );
+
+ if (isDesktop) {
+ return (
+
+
+ {snapshotElement}
+
+
+
+ );
+ }
+
+ // mobile
+ return (
+ <>
+ {snapshotElement}
+
+ >
+ );
+}
+
+type SearchDetailDialogProps = {
+ search?: SearchResult;
+ page: SearchTab;
+ setSearch: (search: SearchResult | undefined) => void;
+ setSearchPage: (page: SearchTab) => void;
+ setSimilarity?: () => void;
+ setInputFocused: React.Dispatch>;
+ onPrevious?: () => void;
+ onNext?: () => void;
+};
+
+export default function SearchDetailDialog({
+ search,
+ page,
+ setSearch,
+ setSearchPage,
+ setSimilarity,
+ setInputFocused,
+ onPrevious,
+ onNext,
+}: SearchDetailDialogProps) {
+ const { t } = useTranslation(["views/explore", "views/faceLibrary"]);
+ const { data: config } = useSWR("config", {
+ revalidateOnFocus: false,
+ });
+ const apiHost = useApiHost();
+
+ // tabs
+
+ const [pageToggle, setPageToggle] = useOptimisticState(
+ page,
+ setSearchPage,
+ 100,
+ );
+
+ // dialog and mobile page
+
+ const [isOpen, setIsOpen] = useState(search != undefined);
+ const [isPopoverOpen, setIsPopoverOpen] = useState(false);
+ const [showNavigationButtons, setShowNavigationButtons] = useState(false);
+ const dialogContentRef = useRef(null);
+ const [dialogContainer, setDialogContainer] = useState(
+ null,
+ );
+
+ const handleOpenChange = useCallback(
+ (open: boolean) => {
+ setIsOpen(open);
+ if (!open) {
+ setIsPopoverOpen(false);
+ // short timeout to allow the mobile page animation
+ // to complete before updating the state
+ setTimeout(() => {
+ setSearch(undefined);
+ }, 300);
+ }
+ },
+ [setSearch],
+ );
+
+ useLayoutEffect(() => {
+ setDialogContainer(dialogContentRef.current);
+ }, [isOpen, search?.id]);
+
+ useEffect(() => {
+ if (search) {
+ setIsOpen(search != undefined);
+ }
+ }, [search]);
+
+ useEffect(() => {
+ if (!isDesktop || !onPrevious || !onNext) {
+ setShowNavigationButtons(false);
+ return;
+ }
+
+ setShowNavigationButtons(isOpen);
+ }, [isOpen, onNext, onPrevious]);
+
+ // show/hide annotation settings is handled inside TabsWithActions
+
+ const searchTabs = useMemo(() => {
+ if (!config || !search) {
+ return [];
+ }
+
+ const views = [...SEARCH_TABS];
+
+ if (!search.has_clip) {
+ const index = views.indexOf("tracking_details");
+ views.splice(index, 1);
+ }
+
+ return views;
+ }, [config, search]);
+
+ useEffect(() => {
+ if (searchTabs.length == 0) {
+ return;
+ }
+
+ if (!searchTabs.includes(pageToggle)) {
+ setSearchPage("snapshot");
+ }
+ }, [pageToggle, searchTabs, setSearchPage]);
+
+ if (!search) {
+ return;
+ }
+
+ // content
+
+ const Overlay = isDesktop ? Dialog : MobilePage;
+ const Content = isDesktop ? DialogContent : MobilePageContent;
+ const Header = isDesktop ? DialogHeader : MobilePageHeader;
+ const Title = isDesktop ? DialogTitle : MobilePageTitle;
+ const Description = isDesktop ? DialogDescription : MobilePageDescription;
+
+ return (
+
+
+ {isDesktop && onPrevious && onNext && showNavigationButtons && (
+
+
+
+
+
+ {
+ e.stopPropagation();
+ onPrevious?.();
+ }}
+ className="nav-button pointer-events-auto absolute -left-16 rounded-lg border bg-secondary/60 p-2 text-primary-variant shadow-lg backdrop-blur-sm hover:bg-secondary/80 hover:text-primary"
+ aria-label={t("searchResult.previousTrackedObject")}
+ >
+
+
+
+
+ {t("searchResult.previousTrackedObject")}
+
+
+
+
+
+ {
+ e.stopPropagation();
+ onNext?.();
+ }}
+ className="nav-button pointer-events-auto absolute -right-16 rounded-lg border bg-secondary/60 p-2 text-primary-variant shadow-lg backdrop-blur-sm hover:bg-secondary/80 hover:text-primary"
+ aria-label={t("searchResult.nextTrackedObject")}
+ >
+
+
+
+
+ {t("searchResult.nextTrackedObject")}
+
+
+
+
+
+ )}
+ {
+ if (isPopoverOpen) {
+ event.preventDefault();
+ }
+ }}
+ onInteractOutside={(e) => {
+ if (isPopoverOpen) {
+ e.preventDefault();
+ }
+ const target = e.target as HTMLElement;
+ if (target.closest(".nav-button")) {
+ e.preventDefault();
+ }
+ }}
+ >
+
+ {t("trackedObjectDetails")}
+
+ {t("trackedObjectDetails")}
+
+
+
+
+ {!isDesktop && (
+
+
+
+ )}
+
+
+
+
+
+ );
+}
+
+type ObjectDetailsTabProps = {
+ search: SearchResult;
+ config?: FrigateConfig;
+ setSearch: (search: SearchResult | undefined) => void;
+ setInputFocused: React.Dispatch>;
+ setShowNavigationButtons?: React.Dispatch>;
+};
+function ObjectDetailsTab({
+ search,
+ config,
+ setSearch,
+ setInputFocused,
+ setShowNavigationButtons,
+}: ObjectDetailsTabProps) {
+ const { t, i18n } = useTranslation([
+ "views/explore",
+ "views/faceLibrary",
+ "components/dialog",
+ ]);
+
+ const apiHost = useApiHost();
+ const hasCustomClassificationModels = useMemo(
+ () => Object.keys(config?.classification?.custom ?? {}).length > 0,
+ [config],
+ );
+ const { data: modelAttributes } = useSWR>(
+ hasCustomClassificationModels && search
+ ? `classification/attributes?object_type=${encodeURIComponent(search.label)}&group_by_model=true`
+ : null,
+ );
+
+ // mutation / revalidation
+
+ const mutate = useGlobalMutation();
+
+ // Helper to map over SWR cached search results while preserving
+ // either paginated format (SearchResult[][]) or flat format (SearchResult[])
+ const mapSearchResults = useCallback(
+ (
+ currentData: SearchResult[][] | SearchResult[] | undefined,
+ fn: (event: SearchResult) => SearchResult,
+ ) => {
+ if (!currentData) return currentData;
+ if (Array.isArray(currentData[0])) {
+ return (currentData as SearchResult[][]).map((page) => page.map(fn));
+ }
+ return (currentData as SearchResult[]).map(fn);
+ },
+ [],
+ );
+
+ // users
+
+ const isAdmin = useIsAdmin();
+
+ // data
+
+ const [desc, setDesc] = useState(search?.data.description);
+ const [isSubLabelDialogOpen, setIsSubLabelDialogOpen] = useState(false);
+ const [isLPRDialogOpen, setIsLPRDialogOpen] = useState(false);
+ const [isAttributesDialogOpen, setIsAttributesDialogOpen] = useState(false);
+ const [isEditingDesc, setIsEditingDesc] = useState(false);
+ const originalDescRef = useRef(null);
+
+ const handleDescriptionFocus = useCallback(() => {
+ setInputFocused(true);
+ }, [setInputFocused]);
+
+ const handleDescriptionBlur = useCallback(() => {
+ setInputFocused(false);
+ }, [setInputFocused]);
+
+ // we have to make sure the current selected search item stays in sync
+ useEffect(() => setDesc(search?.data.description ?? ""), [search]);
+
+ useEffect(() => setIsAttributesDialogOpen(false), [search?.id]);
+
+ useEffect(() => {
+ const anyDialogOpen =
+ isSubLabelDialogOpen || isLPRDialogOpen || isAttributesDialogOpen;
+ setShowNavigationButtons?.(!anyDialogOpen);
+ }, [
+ isSubLabelDialogOpen,
+ isLPRDialogOpen,
+ isAttributesDialogOpen,
+ setShowNavigationButtons,
+ ]);
+
+ const formattedDate = useFormattedTimestamp(
+ search?.start_time ?? 0,
+ config?.ui.time_format == "24hour"
+ ? t("time.formattedTimestampMonthDayYearHourMinute.24hour", {
+ ns: "common",
+ })
+ : t("time.formattedTimestampMonthDayYearHourMinute.12hour", {
+ ns: "common",
+ }),
+ config?.ui.timezone,
+ );
+
+ const topScore = useMemo(() => {
+ if (!search) {
+ return 0;
+ }
+
+ const value = search.data.top_score ?? search.top_score ?? 0;
+
+ return Math.round(value * 100);
+ }, [search]);
+
+ const subLabelScore = useMemo(() => {
+ if (!search) {
+ return undefined;
+ }
+
+ if (search.sub_label && search.data?.sub_label_score) {
+ return Math.round((search.data?.sub_label_score ?? 0) * 100);
+ } else {
+ return undefined;
+ }
+ }, [search]);
+
+ const recognizedLicensePlateScore = useMemo(() => {
+ if (!search) {
+ return undefined;
+ }
+
+ if (
+ search.data.recognized_license_plate &&
+ search.data?.recognized_license_plate_score
+ ) {
+ return Math.round(
+ (search.data?.recognized_license_plate_score ?? 0) * 100,
+ );
+ } else {
+ return undefined;
+ }
+ }, [search]);
+
+ const snapScore = useMemo(() => {
+ if (!search?.has_snapshot) {
+ return undefined;
+ }
+
+ const value = search.data.score ?? search.score ?? 0;
+
+ return Math.floor(value * 100);
+ }, [search]);
+
+ const averageEstimatedSpeed = useMemo(() => {
+ if (!search || !search.data?.average_estimated_speed) {
+ return undefined;
+ }
+
+ if (search.data?.average_estimated_speed != 0) {
+ return search.data?.average_estimated_speed.toFixed(1);
+ } else {
+ return undefined;
+ }
+ }, [search]);
+
+ const velocityAngle = useMemo(() => {
+ if (!search || !search.data?.velocity_angle) {
+ return undefined;
+ }
+
+ if (search.data?.velocity_angle != 0) {
+ return search.data?.velocity_angle.toFixed(1);
+ } else {
+ return undefined;
+ }
+ }, [search]);
+
+ // Extract current attribute selections grouped by model
+ const selectedAttributesByModel = useMemo(() => {
+ if (!search || !modelAttributes) {
+ return {};
+ }
+
+ const dataAny = search.data as Record;
+ const selections: Record = {};
+
+ // Initialize all models with null
+ Object.keys(modelAttributes).forEach((modelName) => {
+ selections[modelName] = null;
+ });
+
+ // Find which attribute is selected for each model
+ Object.keys(modelAttributes).forEach((modelName) => {
+ const value = dataAny[modelName];
+ if (
+ typeof value === "string" &&
+ modelAttributes[modelName].includes(value)
+ ) {
+ selections[modelName] = value;
+ }
+ });
+
+ return selections;
+ }, [search, modelAttributes]);
+
+ // Get flat list of selected attributes for display
+ const eventAttributes = useMemo(() => {
+ return Object.values(selectedAttributesByModel)
+ .filter((attr): attr is string => attr !== null)
+ .sort((a, b) => a.localeCompare(b));
+ }, [selectedAttributesByModel]);
+
+ const isEventsKey = useCallback((key: unknown): boolean => {
+ const candidate = Array.isArray(key) ? key[0] : key;
+ const EVENTS_KEY_PATTERNS = ["events", "events/search", "events/explore"];
+ return (
+ typeof candidate === "string" &&
+ EVENTS_KEY_PATTERNS.some((p) => candidate.includes(p))
+ );
+ }, []);
+
+ const updateDescription = useCallback(() => {
+ if (!search) {
+ return;
+ }
+
+ axios
+ .post(`events/${search.id}/description`, { description: desc })
+ .then((resp) => {
+ if (resp.status == 200) {
+ toast.success(t("details.tips.descriptionSaved"), {
+ position: "top-center",
+ });
+ }
+ mutate(
+ (key) => isEventsKey(key),
+ (currentData: SearchResult[][] | SearchResult[] | undefined) =>
+ mapSearchResults(currentData, (event) =>
+ event.id === search.id
+ ? { ...event, data: { ...event.data, description: desc } }
+ : event,
+ ),
+ {
+ optimisticData: true,
+ rollbackOnError: true,
+ revalidate: false,
+ },
+ );
+ setSearch({ ...search, data: { ...search.data, description: desc } });
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+ toast.error(
+ t("details.tips.saveDescriptionFailed", {
+ errorMessage,
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ setDesc(search.data.description);
+ });
+ }, [desc, search, mutate, t, mapSearchResults, isEventsKey, setSearch]);
+
+ const regenerateDescription = useCallback(
+ (source: "snapshot" | "thumbnails") => {
+ if (!search) {
+ return;
+ }
+
+ axios
+ .put(`events/${search.id}/description/regenerate?source=${source}`)
+ .then((resp) => {
+ if (resp.status == 200) {
+ toast.success(
+ t("details.item.toast.success.regenerate", {
+ provider: capitalizeAll(
+ config?.genai.provider.replaceAll("_", " ") ??
+ t("generativeAI"),
+ ),
+ }),
+ {
+ position: "top-center",
+ duration: 7000,
+ },
+ );
+ }
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+ toast.error(
+ t("details.item.toast.error.regenerate", {
+ provider: capitalizeAll(
+ config?.genai.provider.replaceAll("_", " ") ??
+ t("generativeAI"),
+ ),
+ errorMessage,
+ }),
+ { position: "top-center" },
+ );
+ });
+ },
+ [search, config, t],
+ );
+
+ const handleSubLabelSave = useCallback(
+ (text: string) => {
+ if (!search) return;
+
+ // set score to 1.0 if we're manually entering a sub label
+ const subLabelScore =
+ text === "" ? undefined : search.data?.sub_label_score || 1.0;
+
+ axios
+ .post(`${apiHost}api/events/${search.id}/sub_label`, {
+ camera: search.camera,
+ subLabel: text,
+ subLabelScore: subLabelScore,
+ })
+ .then((response) => {
+ if (response.status === 200) {
+ toast.success(t("details.item.toast.success.updatedSublabel"), {
+ position: "top-center",
+ });
+
+ mutate(
+ (key) => isEventsKey(key),
+ (currentData: SearchResult[][] | SearchResult[] | undefined) =>
+ mapSearchResults(currentData, (event) =>
+ event.id === search.id
+ ? {
+ ...event,
+ sub_label: text,
+ data: {
+ ...event.data,
+ sub_label_score: subLabelScore,
+ },
+ }
+ : event,
+ ),
+ {
+ optimisticData: true,
+ rollbackOnError: true,
+ revalidate: false,
+ },
+ );
+
+ setSearch({
+ ...search,
+ sub_label: text,
+ data: {
+ ...search.data,
+ sub_label_score: subLabelScore,
+ },
+ });
+ setIsSubLabelDialogOpen(false);
+ }
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+ toast.error(
+ t("details.item.toast.error.updatedSublabelFailed", {
+ errorMessage,
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ });
+ },
+ [search, apiHost, mutate, setSearch, t, mapSearchResults, isEventsKey],
+ );
+
+ // recognized plate
+
+ const handleLPRSave = useCallback(
+ (text: string) => {
+ if (!search) return;
+
+ // set score to 1.0 if we're manually entering a new plate
+ const plateScore = text === "" ? undefined : 1.0;
+
+ axios
+ .post(`${apiHost}api/events/${search.id}/recognized_license_plate`, {
+ recognizedLicensePlate: text,
+ recognizedLicensePlateScore: plateScore,
+ })
+ .then((response) => {
+ if (response.status === 200) {
+ toast.success(t("details.item.toast.success.updatedLPR"), {
+ position: "top-center",
+ });
+
+ mutate(
+ (key) => isEventsKey(key),
+ (currentData: SearchResult[][] | SearchResult[] | undefined) =>
+ mapSearchResults(currentData, (event) =>
+ event.id === search.id
+ ? {
+ ...event,
+ data: {
+ ...event.data,
+ recognized_license_plate: text,
+ recognized_license_plate_score: plateScore,
+ },
+ }
+ : event,
+ ),
+ {
+ optimisticData: true,
+ rollbackOnError: true,
+ revalidate: false,
+ },
+ );
+
+ setSearch({
+ ...search,
+ data: {
+ ...search.data,
+ recognized_license_plate: text,
+ recognized_license_plate_score: plateScore,
+ },
+ });
+ setIsLPRDialogOpen(false);
+ }
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+ toast.error(
+ t("details.item.toast.error.updatedLPRFailed", {
+ errorMessage,
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ });
+ },
+ [search, apiHost, mutate, setSearch, t, mapSearchResults, isEventsKey],
+ );
+
+ const handleAttributesSave = useCallback(
+ (selectedAttributes: string[]) => {
+ if (!search) return;
+
+ axios
+ .post(`${apiHost}api/events/${search.id}/attributes`, {
+ attributes: selectedAttributes,
+ })
+ .then((response) => {
+ const applied = Array.isArray(response.data?.applied)
+ ? (response.data.applied as {
+ model?: string;
+ label?: string | null;
+ score?: number | null;
+ }[])
+ : [];
+
+ toast.success(t("details.item.toast.success.updatedAttributes"), {
+ position: "top-center",
+ });
+
+ const applyUpdatedAttributes = (event: SearchResult) => {
+ if (event.id !== search.id) return event;
+
+ const updatedData: Record = { ...event.data };
+
+ applied.forEach(({ model, label, score }) => {
+ if (!model) return;
+ updatedData[model] = label ?? null;
+ updatedData[`${model}_score`] = score ?? null;
+ });
+
+ return { ...event, data: updatedData } as SearchResult;
+ };
+
+ mutate(
+ (key) => isEventsKey(key),
+ (currentData: SearchResult[][] | SearchResult[] | undefined) =>
+ mapSearchResults(currentData, applyUpdatedAttributes),
+ {
+ optimisticData: true,
+ rollbackOnError: true,
+ revalidate: false,
+ },
+ );
+
+ setSearch(applyUpdatedAttributes(search));
+ setIsAttributesDialogOpen(false);
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+
+ toast.error(
+ t("details.item.toast.error.updatedAttributesFailed", {
+ errorMessage,
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ });
+ },
+ [search, apiHost, mutate, t, mapSearchResults, isEventsKey, setSearch],
+ );
+
+ // speech transcription
+
+ const onTranscribe = useCallback(() => {
+ axios
+ .put(`/audio/transcribe`, { event_id: search.id })
+ .then((resp) => {
+ if (resp.status == 202) {
+ toast.success(t("details.item.toast.success.audioTranscription"), {
+ position: "top-center",
+ });
+ }
+ })
+ .catch((error) => {
+ const errorMessage =
+ error.response?.data?.message ||
+ error.response?.data?.detail ||
+ "Unknown error";
+ toast.error(
+ t("details.item.toast.error.audioTranscription", {
+ errorMessage,
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ });
+ }, [search, t]);
+
+ // audio transcription processing state
+
+ const { payload: audioTranscriptionProcessState } =
+ useAudioTranscriptionProcessState();
+
+ // frigate+ submission
+
+ type SubmissionState = "reviewing" | "uploading" | "submitted";
+ const [state, setState] = useState(
+ search?.plus_id ? "submitted" : "reviewing",
+ );
+
+ useEffect(
+ () => setState(search?.plus_id ? "submitted" : "reviewing"),
+ [search],
+ );
+
+ const onSubmitToPlus = useCallback(
+ async (falsePositive: boolean) => {
+ if (!search) {
+ return;
+ }
+
+ falsePositive
+ ? axios.put(`events/${search.id}/false_positive`)
+ : axios.post(`events/${search.id}/plus`, {
+ include_annotation: 1,
+ });
+
+ setState("submitted");
+ setSearch({ ...search, plus_id: "new_upload" });
+ mutate(
+ (key) => isEventsKey(key),
+ (currentData: SearchResult[][] | SearchResult[] | undefined) =>
+ mapSearchResults(currentData, (event) =>
+ event.id === search.id
+ ? { ...event, plus_id: "new_upload" }
+ : event,
+ ),
+ {
+ optimisticData: true,
+ rollbackOnError: true,
+ revalidate: false,
+ },
+ );
+ },
+ [search, mutate, mapSearchResults, setSearch, isEventsKey],
+ );
+
+ const popoverContainerRef = useRef(null);
+ const canRegenerate = !!(
+ config?.cameras[search.camera].objects.genai.enabled && search.end_time
+ );
+ const showGenAIPlaceholder = !!(
+ config?.cameras[search.camera].objects.genai.enabled &&
+ !search.end_time &&
+ (config.cameras[search.camera].objects.genai.required_zones.length === 0 ||
+ search.zones.some((zone) =>
+ config.cameras[search.camera].objects.genai.required_zones.includes(
+ zone,
+ ),
+ )) &&
+ (config.cameras[search.camera].objects.genai.objects.length === 0 ||
+ config.cameras[search.camera].objects.genai.objects.includes(
+ search.label,
+ ))
+ );
+ return (
+
+
+
+
+
+
+
+
+
+ {t("details.label")}
+
+
+ {getIconForLabel(
+ search.label,
+ search.data.type,
+ "size-4 text-primary",
+ )}
+ {getTranslatedLabel(search.label, search.data.type)}
+ {search.sub_label && ` (${search.sub_label})`}
+ {isAdmin && search.end_time && (
+
+
+
+ setIsSubLabelDialogOpen(true)}
+ />
+
+
+
+
+ {t("details.editSubLabel.title")}
+
+
+
+ )}
+
+
+
+
+
+
+ {t("details.topScore.label")}
+
+
+
+
+ Info
+
+
+
+ {t("details.topScore.info")}
+
+
+
+
+
+ {topScore}%{subLabelScore && ` (${subLabelScore}%)`}
+
+
+
+
+
+ {t("details.camera")}
+
+
+
+
+
+
+
+
+
+
+ {snapScore != undefined && (
+
+
+
+ {t("details.snapshotScore.label")}
+
+
+
{snapScore}%
+
+ )}
+
+ {averageEstimatedSpeed && (
+
+
+ {t("details.estimatedSpeed")}
+
+
+
+ {averageEstimatedSpeed}{" "}
+ {config?.ui.unit_system == "imperial"
+ ? t("unit.speed.mph", { ns: "common" })
+ : t("unit.speed.kph", { ns: "common" })}
+ {velocityAngle != undefined && (
+
+
+
+ )}
+
+
+
+ )}
+
+
+
+ {t("details.timestamp")}
+
+
{formattedDate}
+
+
+
+
+
+ {search?.data.recognized_license_plate && (
+
+
+ {t("details.recognizedLicensePlate")}
+
+
+
+ {search.data.recognized_license_plate}{" "}
+ {recognizedLicensePlateScore &&
+ ` (${recognizedLicensePlateScore}%)`}
+ {isAdmin && (
+
+
+
+ setIsLPRDialogOpen(true)}
+ />
+
+
+
+
+ {t("details.editLPR.title")}
+
+
+
+ )}
+
+
+
+ )}
+
+ {hasCustomClassificationModels &&
+ modelAttributes &&
+ Object.keys(modelAttributes).length > 0 && (
+
+
+ {t("details.attributes")}
+ {isAdmin && (
+
+
+
+ setIsAttributesDialogOpen(true)}
+ />
+
+
+
+
+ {t("button.edit", { ns: "common" })}
+
+
+
+ )}
+
+
+ {eventAttributes.length > 0
+ ? eventAttributes.join(", ")
+ : t("label.none", { ns: "common" })}
+
+
+ )}
+
+
+
+ {isAdmin &&
+ search.data.type === "object" &&
+ config?.plus?.enabled &&
+ search.end_time != undefined &&
+ search.has_snapshot && (
+
+
+
+ {t("explore.plus.submitToPlus.label", {
+ ns: "components/dialog",
+ })}
+
+
+
+
+ Info
+
+
+
+ {t("explore.plus.submitToPlus.desc", {
+ ns: "components/dialog",
+ })}
+
+
+
+
+
+
+ {state == "reviewing" && (
+ <>
+
+ {i18n.language === "en" ? (
+ // English with a/an logic plus label
+ <>
+ {/^[aeiou]/i.test(search?.label || "") ? (
+
+ explore.plus.review.question.ask_an
+
+ ) : (
+
+ explore.plus.review.question.ask_a
+
+ )}
+ >
+ ) : (
+ // For other languages
+
+ explore.plus.review.question.ask_full
+
+ )}
+
+
+ {
+ setState("uploading");
+ onSubmitToPlus(false);
+ }}
+ >
+ {t("button.yes", { ns: "common" })}
+
+ {
+ setState("uploading");
+ onSubmitToPlus(true);
+ }}
+ >
+ {t("button.no", { ns: "common" })}
+
+
+ >
+ )}
+ {state == "uploading" &&
}
+ {state == "submitted" && (
+
+
+ {t("explore.plus.review.state.submitted", {
+ ns: "components/dialog",
+ })}
+
+ )}
+
+
+ )}
+
+
+
+ {t("details.description.label")}
+
+
+
+
+ {
+ originalDescRef.current = desc ?? "";
+ setIsEditingDesc(true);
+ }}
+ >
+
+
+
+
+ {t("button.edit", { ns: "common" })}
+
+
+
+ {config?.cameras[search?.camera].audio_transcription.enabled &&
+ search?.label == "speech" &&
+ search?.end_time &&
+ search?.has_clip && (
+
+
+
+ {audioTranscriptionProcessState === "processing" ? (
+
+ ) : (
+
+ )}
+
+
+
+ {t("itemMenu.audioTranscription.label")}
+
+
+ )}
+
+ {canRegenerate && (
+
+
+
+
+
+
+
+
+
+
+
+ {t("details.button.regenerate.title")}
+
+
+
+ {search.has_snapshot && (
+ regenerateDescription("snapshot")}
+ >
+ {t("details.regenerateFromSnapshot")}
+
+ )}
+ regenerateDescription("thumbnails")}
+ >
+ {t("details.regenerateFromThumbnails")}
+
+
+
+
+ )}
+
+
+
+ {!isEditingDesc ? (
+ showGenAIPlaceholder ? (
+
+
+
{t("details.description.aiTips")}
+
+ ) : (
+
+ {desc || t("label.none", { ns: "common" })}
+
+ )
+ ) : (
+
+ )}
+
+
+
+
+
+
+ );
+}
+
+type ObjectSnapshotTabProps = {
+ search: Event;
+ className?: string;
+ onEventUploaded?: () => void;
+};
+export function ObjectSnapshotTab({
+ search,
+ className,
+}: ObjectSnapshotTabProps) {
+ const [imgRef, imgLoaded, onImgLoad] = useImageLoaded();
+
+ return (
+
+
+
+
+
+
+ {search?.id && (
+
+
{
+ onImgLoad();
+ }}
+ />
+
+ )}
+
+
+
+
+
+ );
+}
+
+type VideoTabProps = {
+ search: SearchResult;
+};
+
+export function VideoTab({ search }: VideoTabProps) {
+ const clipTimeRange = useMemo(() => {
+ const startTime = search.start_time - REVIEW_PADDING;
+ const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
+ return `start/${startTime}/end/${endTime}`;
+ }, [search]);
+ const startTime = search.start_time - REVIEW_PADDING;
+ const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
+ const vodPath = `/vod/${search.camera}/${clipTimeRange}/index.m3u8`;
+ const playbackSource = useRecordingPlaybackSource({
+ camera: search.camera,
+ after: startTime,
+ before: endTime,
+ vodPath,
+ });
+ const source = playbackSource ?? `${baseUrl}${vodPath}`;
+
+ return (
+ <>
+
+
+ >
+ );
+}
diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx
index 351370ad8..7d8d4c5b8 100644
--- a/web/src/components/overlay/detail/TrackingDetails.tsx
+++ b/web/src/components/overlay/detail/TrackingDetails.tsx
@@ -1,1148 +1,1160 @@
-import useSWR from "swr";
-import { useCallback, useEffect, useMemo, useRef, useState } from "react";
-import { useResizeObserver } from "@/hooks/resize-observer";
-import { useFullscreen } from "@/hooks/use-fullscreen";
-import { Event } from "@/types/event";
-import ActivityIndicator from "@/components/indicators/activity-indicator";
-import { TrackingDetailsSequence } from "@/types/timeline";
-import { FrigateConfig } from "@/types/frigateConfig";
-import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
-import { getIconForLabel } from "@/utils/iconUtil";
-import { LuCircle, LuFolderX } from "react-icons/lu";
-import { cn } from "@/lib/utils";
-import HlsVideoPlayer from "@/components/player/HlsVideoPlayer";
-import { baseUrl } from "@/api/baseUrl";
-import { REVIEW_PADDING } from "@/types/review";
-import {
- ASPECT_PORTRAIT_LAYOUT,
- ASPECT_WIDE_LAYOUT,
- Recording,
-} from "@/types/record";
-import {
- DropdownMenu,
- DropdownMenuTrigger,
- DropdownMenuContent,
- DropdownMenuItem,
- DropdownMenuPortal,
-} from "@/components/ui/dropdown-menu";
-import { Link, useNavigate } from "react-router-dom";
-import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
-import { useTranslation } from "react-i18next";
-import { getTranslatedLabel } from "@/utils/i18n";
-import { resolveZoneName } from "@/hooks/use-zone-friendly-name";
-import { Badge } from "@/components/ui/badge";
-import { HiDotsHorizontal } from "react-icons/hi";
-import axios from "axios";
-import { toast } from "sonner";
-import { useDetailStream } from "@/context/detail-stream-context";
-import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect";
-import { useApiHost } from "@/api";
-import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
-import ObjectTrackOverlay from "../ObjectTrackOverlay";
-import { useIsAdmin } from "@/hooks/use-is-admin";
-import { VideoResolutionType } from "@/types/live";
-
-type TrackingDetailsProps = {
- className?: string;
- event: Event;
- fullscreen?: boolean;
- tabs?: React.ReactNode;
- isAnnotationSettingsOpen?: boolean;
-};
-
-export function TrackingDetails({
- className,
- event,
- tabs,
- isAnnotationSettingsOpen = false,
-}: TrackingDetailsProps) {
- const videoRef = useRef(null);
- const { t } = useTranslation(["views/explore"]);
- const apiHost = useApiHost();
- const imgRef = useRef(null);
- const [imgLoaded, setImgLoaded] = useState(false);
- const [isVideoLoading, setIsVideoLoading] = useState(true);
- const [displaySource, _setDisplaySource] = useState<"video" | "image">(
- "video",
- );
- const { setSelectedObjectIds, annotationOffset } = useDetailStream();
-
- // manualOverride holds a record-stream timestamp explicitly chosen by the
- // user (eg, clicking a lifecycle row). When null we display `currentTime`.
- const [manualOverride, setManualOverride] = useState(null);
-
- // Capture the annotation offset used for building the video source URL.
- // This only updates when the event changes, NOT on every slider drag,
- // so the HLS player doesn't reload while the user is adjusting the offset.
- const sourceOffsetRef = useRef(annotationOffset);
- useEffect(() => {
- sourceOffsetRef.current = annotationOffset;
- }, [event.id]); // eslint-disable-line react-hooks/exhaustive-deps
-
- // event.start_time is detect time, convert to record, then subtract padding
- const [currentTime, setCurrentTime] = useState(
- (event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
- );
-
- useEffect(() => {
- setIsVideoLoading(true);
- }, [event.id]);
-
- const { data: eventSequence } = useSWR(
- ["timeline", { source_id: event.id }],
- null,
- {
- revalidateOnFocus: false,
- revalidateOnReconnect: false,
- dedupingInterval: 30000,
- },
- );
-
- const { data: config } = useSWR("config");
-
- // Fetch recording segments for the event's time range to handle motion-only gaps.
- // Use the source offset (stable per event) so recordings don't refetch on every
- // slider drag while adjusting annotation offset.
- const eventStartRecord = useMemo(
- () => (event.start_time ?? 0) + sourceOffsetRef.current / 1000,
- // eslint-disable-next-line react-hooks/exhaustive-deps
- [event.start_time, event.id],
- );
- const eventEndRecord = useMemo(
- () =>
- (event.end_time ?? Date.now() / 1000) + sourceOffsetRef.current / 1000,
- // eslint-disable-next-line react-hooks/exhaustive-deps
- [event.end_time, event.id],
- );
-
- const { data: recordings } = useSWR(
- event.camera
- ? [
- `${event.camera}/recordings`,
- {
- after: eventStartRecord - REVIEW_PADDING,
- before: eventEndRecord + REVIEW_PADDING,
- },
- ]
- : null,
- null,
- {
- revalidateOnFocus: false,
- revalidateOnReconnect: false,
- dedupingInterval: 30000,
- },
- );
-
- // Convert a timeline timestamp to actual video player time, accounting for
- // motion-only recording gaps. Uses the same algorithm as DynamicVideoController.
- const timestampToVideoTime = useCallback(
- (timestamp: number): number => {
- if (!recordings || recordings.length === 0) {
- // Fallback to simple calculation if no recordings data
- return timestamp - (eventStartRecord - REVIEW_PADDING);
- }
-
- const videoStartTime = eventStartRecord - REVIEW_PADDING;
-
- // If timestamp is before video start, return 0
- if (timestamp < videoStartTime) return 0;
-
- // Check if timestamp is before the first recording or after the last
- if (
- timestamp < recordings[0].start_time ||
- timestamp > recordings[recordings.length - 1].end_time
- ) {
- // No recording available at this timestamp
- return 0;
- }
-
- // Calculate the inpoint offset - the HLS video may start partway through the first segment
- let inpointOffset = 0;
- if (
- videoStartTime > recordings[0].start_time &&
- videoStartTime < recordings[0].end_time
- ) {
- inpointOffset = videoStartTime - recordings[0].start_time;
- }
-
- let seekSeconds = 0;
- for (const segment of recordings) {
- // Skip segments that end before our timestamp
- if (segment.end_time <= timestamp) {
- // Add this segment's duration, but subtract inpoint offset from first segment
- if (segment === recordings[0]) {
- seekSeconds += segment.duration - inpointOffset;
- } else {
- seekSeconds += segment.duration;
- }
- } else if (segment.start_time <= timestamp) {
- // The timestamp is within this segment
- if (segment === recordings[0]) {
- // For the first segment, account for the inpoint offset
- seekSeconds +=
- timestamp - Math.max(segment.start_time, videoStartTime);
- } else {
- seekSeconds += timestamp - segment.start_time;
- }
- break;
- }
- }
-
- return seekSeconds;
- },
- [recordings, eventStartRecord],
- );
-
- // Convert video player time back to timeline timestamp, accounting for
- // motion-only recording gaps. Reverse of timestampToVideoTime.
- const videoTimeToTimestamp = useCallback(
- (playerTime: number): number => {
- if (!recordings || recordings.length === 0) {
- // Fallback to simple calculation if no recordings data
- const videoStartTime = eventStartRecord - REVIEW_PADDING;
- return playerTime + videoStartTime;
- }
-
- const videoStartTime = eventStartRecord - REVIEW_PADDING;
-
- // Calculate the inpoint offset - the video may start partway through the first segment
- let inpointOffset = 0;
- if (
- videoStartTime > recordings[0].start_time &&
- videoStartTime < recordings[0].end_time
- ) {
- inpointOffset = videoStartTime - recordings[0].start_time;
- }
-
- let timestamp = 0;
- let totalTime = 0;
-
- for (const segment of recordings) {
- const segmentDuration =
- segment === recordings[0]
- ? segment.duration - inpointOffset
- : segment.duration;
-
- if (totalTime + segmentDuration > playerTime) {
- // The player time is within this segment
- if (segment === recordings[0]) {
- // For the first segment, add the inpoint offset
- timestamp =
- Math.max(segment.start_time, videoStartTime) +
- (playerTime - totalTime);
- } else {
- timestamp = segment.start_time + (playerTime - totalTime);
- }
- break;
- } else {
- totalTime += segmentDuration;
- }
- }
-
- return timestamp;
- },
- [recordings, eventStartRecord],
- );
-
- eventSequence?.map((event) => {
- event.data.zones_friendly_names = event.data?.zones?.map((zone) => {
- return resolveZoneName(config, zone);
- });
- });
-
- // Use manualOverride (set when seeking in image mode) if present so
- // lifecycle rows and overlays follow image-mode seeks. Otherwise fall
- // back to currentTime used for video mode.
- const effectiveTime = useMemo(() => {
- const displayedRecordTime = manualOverride ?? currentTime;
- return displayedRecordTime - annotationOffset / 1000;
- }, [manualOverride, currentTime, annotationOffset]);
-
- const containerRef = useRef(null);
- const { fullscreen, toggleFullscreen, supportsFullScreen } =
- useFullscreen(containerRef);
- const timelineContainerRef = useRef(null);
- const rowRefs = useRef<(HTMLDivElement | null)[]>([]);
- const [_selectedZone, setSelectedZone] = useState("");
- const [_lifecycleZones, setLifecycleZones] = useState([]);
- const [seekToTimestamp, setSeekToTimestamp] = useState(null);
- const [lineBottomOffsetPx, setLineBottomOffsetPx] = useState(32);
- const [lineTopOffsetPx, setLineTopOffsetPx] = useState(8);
- const [blueLineHeightPx, setBlueLineHeightPx] = useState(0);
-
- const [timelineSize] = useResizeObserver(timelineContainerRef);
-
- const [fullResolution, setFullResolution] = useState({
- width: 0,
- height: 0,
- });
-
- const aspectRatio = useMemo(() => {
- if (!config) {
- return 16 / 9;
- }
-
- if (fullResolution.width && fullResolution.height) {
- return fullResolution.width / fullResolution.height;
- }
-
- return (
- config.cameras[event.camera].detect.width /
- config.cameras[event.camera].detect.height
- );
- }, [config, event, fullResolution]);
-
- const label = event.sub_label
- ? event.sub_label
- : getTranslatedLabel(event.label, event.data.type);
-
- const getZoneColor = useCallback(
- (zoneName: string) => {
- const zoneColor =
- config?.cameras?.[event.camera]?.zones?.[zoneName]?.color;
- if (zoneColor) {
- const reversed = [...zoneColor].reverse();
- return reversed;
- }
- },
- [config, event],
- );
-
- // Set the selected object ID in the context so ObjectTrackOverlay can display it
- useEffect(() => {
- setSelectedObjectIds([event.id]);
- }, [event.id, setSelectedObjectIds]);
-
- // When the annotation settings popover is open, pin the video to a specific
- // lifecycle event (detect-stream timestamp). As the user drags the offset
- // slider, the video re-seeks to show the recording frame at
- // pinnedTimestamp + newOffset, while the bounding box stays fixed at the
- // pinned detect timestamp. This lets the user visually align the box to
- // the car in the video.
- const pinnedDetectTimestampRef = useRef(null);
- const wasAnnotationOpenRef = useRef(false);
-
- // On popover open: pause, pin first lifecycle item, and seek.
- useEffect(() => {
- if (isAnnotationSettingsOpen && !wasAnnotationOpenRef.current) {
- if (videoRef.current && displaySource === "video") {
- videoRef.current.pause();
- }
- if (eventSequence && eventSequence.length > 0) {
- pinnedDetectTimestampRef.current = eventSequence[0].timestamp;
- }
- }
- if (!isAnnotationSettingsOpen) {
- pinnedDetectTimestampRef.current = null;
- }
- wasAnnotationOpenRef.current = isAnnotationSettingsOpen;
- }, [isAnnotationSettingsOpen, displaySource, eventSequence]);
-
- // When the pinned timestamp or offset changes, re-seek the video and
- // explicitly update currentTime so the overlay shows the pinned event's box.
- useEffect(() => {
- const pinned = pinnedDetectTimestampRef.current;
- if (!isAnnotationSettingsOpen || pinned == null) return;
- if (!videoRef.current || displaySource !== "video") return;
-
- const targetTimeRecord = pinned + annotationOffset / 1000;
- const relativeTime = timestampToVideoTime(targetTimeRecord);
- videoRef.current.currentTime = relativeTime;
-
- // Explicitly update currentTime state so the overlay's effectiveCurrentTime
- // resolves back to the pinned detect timestamp:
- // effectiveCurrentTime = targetTimeRecord - annotationOffset/1000 = pinned
- setCurrentTime(targetTimeRecord);
- }, [
- isAnnotationSettingsOpen,
- annotationOffset,
- displaySource,
- timestampToVideoTime,
- ]);
-
- const handleLifecycleClick = useCallback(
- (item: TrackingDetailsSequence) => {
- if (!videoRef.current && !imgRef.current) return;
-
- // Convert lifecycle timestamp (detect stream) to record stream time
- const targetTimeRecord = item.timestamp + annotationOffset / 1000;
-
- if (displaySource === "image") {
- // For image mode: set a manual override timestamp and update
- // currentTime so overlays render correctly.
- setManualOverride(targetTimeRecord);
- setCurrentTime(targetTimeRecord);
- return;
- }
-
- // For video mode: convert to video-relative time (accounting for motion-only gaps)
- const relativeTime = timestampToVideoTime(targetTimeRecord);
-
- if (videoRef.current) {
- videoRef.current.currentTime = relativeTime;
- }
- },
- [annotationOffset, displaySource, timestampToVideoTime],
- );
-
- const formattedStart = config
- ? formatUnixTimestampToDateTime(event.start_time ?? 0, {
- timezone: config.ui.timezone,
- date_format:
- config.ui.time_format == "24hour"
- ? t("time.formattedTimestamp.24hour", {
- ns: "common",
- })
- : t("time.formattedTimestamp.12hour", {
- ns: "common",
- }),
- time_style: "medium",
- date_style: "medium",
- })
- : "";
-
- const formattedEnd =
- config && event.end_time != null
- ? formatUnixTimestampToDateTime(event.end_time, {
- timezone: config.ui.timezone,
- date_format:
- config.ui.time_format == "24hour"
- ? t("time.formattedTimestamp.24hour", {
- ns: "common",
- })
- : t("time.formattedTimestamp.12hour", {
- ns: "common",
- }),
- time_style: "medium",
- date_style: "medium",
- })
- : "";
-
- useEffect(() => {
- if (!eventSequence || eventSequence.length === 0) return;
- setLifecycleZones(eventSequence[0]?.data.zones);
- }, [eventSequence]);
-
- useEffect(() => {
- if (seekToTimestamp === null) return;
-
- if (displaySource === "image") {
- // For image mode, set the manual override so the snapshot updates to
- // the exact record timestamp.
- setManualOverride(seekToTimestamp);
- setSeekToTimestamp(null);
- return;
- }
-
- // seekToTimestamp is a record stream timestamp
- // Convert to video position (accounting for motion-only recording gaps)
- if (!videoRef.current) return;
- const relativeTime = timestampToVideoTime(seekToTimestamp);
- if (relativeTime >= 0) {
- videoRef.current.currentTime = relativeTime;
- }
- setSeekToTimestamp(null);
- }, [seekToTimestamp, displaySource, timestampToVideoTime]);
-
- const isWithinEventRange = useMemo(() => {
- if (effectiveTime === undefined || event.start_time === undefined) {
- return false;
- }
- // If an event has not ended yet, fall back to last timestamp in eventSequence
- let eventEnd = event.end_time;
- if (eventEnd == null && eventSequence && eventSequence.length > 0) {
- const last = eventSequence[eventSequence.length - 1];
- if (last && last.timestamp !== undefined) {
- eventEnd = last.timestamp;
- }
- }
-
- if (eventEnd == null) {
- return false;
- }
- return effectiveTime >= event.start_time && effectiveTime <= eventEnd;
- }, [effectiveTime, event.start_time, event.end_time, eventSequence]);
-
- // Dynamically compute pixel offsets so the timeline line starts at the
- // first row midpoint and ends at the last row midpoint. For accuracy,
- // measure the center Y of each lifecycle row and interpolate the current
- // effective time into a pixel position; then set the blue line height
- // so it reaches the center dot at the same time the dot becomes active.
- useEffect(() => {
- if (!timelineContainerRef.current || !eventSequence) return;
-
- const containerRect = timelineContainerRef.current.getBoundingClientRect();
- const validRefs = rowRefs.current.filter((r) => r !== null);
- if (validRefs.length === 0) return;
-
- const centers = validRefs.map((n) => {
- const r = n.getBoundingClientRect();
- return r.top + r.height / 2 - containerRect.top;
- });
-
- const topOffset = Math.max(0, centers[0]);
- const bottomOffset = Math.max(
- 0,
- containerRect.height - centers[centers.length - 1],
- );
-
- setLineTopOffsetPx(Math.round(topOffset));
- setLineBottomOffsetPx(Math.round(bottomOffset));
-
- const eff = effectiveTime ?? 0;
- const timestamps = eventSequence.map((s) => s.timestamp ?? 0);
-
- let pixelPos = centers[0];
- if (eff <= timestamps[0]) {
- pixelPos = centers[0];
- } else if (eff >= timestamps[timestamps.length - 1]) {
- pixelPos = centers[centers.length - 1];
- } else {
- for (let i = 0; i < timestamps.length - 1; i++) {
- const t1 = timestamps[i];
- const t2 = timestamps[i + 1];
- if (eff >= t1 && eff <= t2) {
- const ratio = t2 > t1 ? (eff - t1) / (t2 - t1) : 0;
- pixelPos = centers[i] + ratio * (centers[i + 1] - centers[i]);
- break;
- }
- }
- }
-
- const bluePx = Math.round(Math.max(0, pixelPos - topOffset));
- setBlueLineHeightPx(bluePx);
- }, [eventSequence, timelineSize.width, timelineSize.height, effectiveTime]);
-
- const videoSource = useMemo(() => {
- // event.start_time and event.end_time are in DETECT stream time
- // Convert to record stream time, then create video clip with padding.
- // Use sourceOffsetRef (stable per event) so the HLS player doesn't
- // reload while the user is dragging the annotation offset slider.
- const sourceOffset = sourceOffsetRef.current;
- const eventStartRec = event.start_time + sourceOffset / 1000;
- const eventEndRec =
- (event.end_time ?? Date.now() / 1000) + sourceOffset / 1000;
- const startTime = eventStartRec - REVIEW_PADDING;
- const endTime = eventEndRec + REVIEW_PADDING;
- const playlist = `${baseUrl}vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`;
-
- return {
- playlist,
- startPosition: 0,
- };
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [event]);
-
- // Determine camera aspect ratio category
- const cameraAspect = useMemo(() => {
- if (!aspectRatio) {
- return "normal";
- } else if (aspectRatio > ASPECT_WIDE_LAYOUT) {
- return "wide";
- } else if (aspectRatio < ASPECT_PORTRAIT_LAYOUT) {
- return "tall";
- } else {
- return "normal";
- }
- }, [aspectRatio]);
-
- const handleSeekToTime = useCallback((timestamp: number, _play?: boolean) => {
- // Set the target timestamp to seek to
- setSeekToTimestamp(timestamp);
- }, []);
-
- const handleTimeUpdate = useCallback(
- (time: number) => {
- // Convert video player time back to timeline timestamp
- // accounting for motion-only recording gaps
- const absoluteTime = videoTimeToTimestamp(time);
-
- setCurrentTime(absoluteTime);
- },
- [videoTimeToTimestamp],
- );
-
- const [src, setSrc] = useState(
- `${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
- );
- const [hasError, setHasError] = useState(false);
-
- // Derive the record timestamp to display: manualOverride if present,
- // otherwise use currentTime.
- const displayedRecordTime = manualOverride ?? currentTime;
-
- useEffect(() => {
- if (displayedRecordTime) {
- const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
- setSrc(newSrc);
- }
- setImgLoaded(false);
- setHasError(false);
-
- // we know that these deps are correct
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [displayedRecordTime]);
-
- const onUploadFrameToPlus = useCallback(() => {
- return axios.post(`/${event.camera}/plus/${currentTime}`);
- }, [event.camera, currentTime]);
-
- if (!config) {
- return ;
- }
-
- return (
-
-
-
-
-
- {displaySource == "video" && (
- <>
-
setIsVideoLoading(false)}
- setFullResolution={setFullResolution}
- toggleFullscreen={toggleFullscreen}
- isDetailMode={true}
- camera={event.camera}
- currentTimeOverride={currentTime}
- />
- {isVideoLoading && (
-
- )}
- >
- )}
- {displaySource == "image" && (
- <>
-
- {hasError && (
-
-
-
- {t("objectLifecycle.noImageFound")}
-
-
- )}
-
-
-
-
-
setImgLoaded(true)}
- onError={() => setHasError(true)}
- />
-
- >
- )}
-
-
-
-
1 && aspectRatio < ASPECT_PORTRAIT_LAYOUT
- ? "lg:basis-3/5"
- : "lg:basis-2/5",
- )}
- >
- {isDesktop && tabs && (
-
- )}
-
- {config?.cameras[event.camera]?.onvif.autotracking
- .enabled_in_config && (
-
- {t("trackingDetails.autoTrackingTips")}
-
- )}
-
-
-
-
{
- e.stopPropagation();
- // event.start_time is detect time, convert to record
- handleSeekToTime(
- (event.start_time ?? 0) + annotationOffset / 1000,
- );
- }}
- role="button"
- >
-
- {getIconForLabel(
- event.sub_label ? event.label + "-verified" : event.label,
- event.data.type,
- "size-4 text-white",
- )}
-
-
-
{label}
-
- {formattedStart ?? ""}
- {event.end_time != null ? (
- <> - {formattedEnd}>
- ) : (
-
- )}
-
- {event.data?.recognized_license_plate && (
- <>
-
·
-
-
- {event.data.recognized_license_plate}
-
-
- >
- )}
-
-
-
-
-
- {!eventSequence ? (
-
- ) : eventSequence.length === 0 ? (
-
- {t("detail.noObjectDetailData", { ns: "views/events" })}
-
- ) : (
-
-
- {isWithinEventRange && (
-
- )}
-
- {eventSequence.map((item, idx) => {
- return (
-
{
- rowRefs.current[idx] = el;
- }}
- >
- handleLifecycleClick(item)}
- setSelectedZone={setSelectedZone}
- getZoneColor={getZoneColor}
- effectiveTime={effectiveTime}
- isTimelineActive={isWithinEventRange}
- />
-
- );
- })}
-
-
- )}
-
-
-
-
-
- );
-}
-
-type LifecycleIconRowProps = {
- item: TrackingDetailsSequence;
- event: Event;
- onClick: () => void;
- setSelectedZone: (z: string) => void;
- getZoneColor: (zoneName: string) => number[] | undefined;
- effectiveTime?: number;
- isTimelineActive?: boolean;
-};
-
-function LifecycleIconRow({
- item,
- event,
- onClick,
- setSelectedZone,
- getZoneColor,
- effectiveTime,
- isTimelineActive,
-}: LifecycleIconRowProps) {
- const { t } = useTranslation(["views/explore", "components/player"]);
- const { data: config } = useSWR("config");
- const [isOpen, setIsOpen] = useState(false);
- const navigate = useNavigate();
- const isAdmin = useIsAdmin();
-
- const aspectRatio = useMemo(() => {
- if (!config) {
- return 16 / 9;
- }
-
- return (
- config.cameras[event.camera].detect.width /
- config.cameras[event.camera].detect.height
- );
- }, [config, event]);
-
- const isActive = useMemo(
- () => Math.abs((effectiveTime ?? 0) - (item.timestamp ?? 0)) <= 0.5,
- [effectiveTime, item.timestamp],
- );
-
- const formattedEventTimestamp = useMemo(
- () =>
- config
- ? formatUnixTimestampToDateTime(item.timestamp ?? 0, {
- timezone: config.ui.timezone,
- date_format:
- config.ui.time_format == "24hour"
- ? t("time.formattedTimestampHourMinuteSecond.24hour", {
- ns: "common",
- })
- : t("time.formattedTimestampHourMinuteSecond.12hour", {
- ns: "common",
- }),
- time_style: "medium",
- date_style: "medium",
- })
- : "",
- [config, item.timestamp, t],
- );
-
- const ratio = useMemo(
- () =>
- Array.isArray(item.data.box) && item.data.box.length >= 4
- ? (aspectRatio * (item.data.box[2] / item.data.box[3])).toFixed(2)
- : "N/A",
- [aspectRatio, item.data.box],
- );
-
- const areaPx = useMemo(
- () =>
- Array.isArray(item.data.box) && item.data.box.length >= 4
- ? Math.round(
- (config?.cameras[event.camera]?.detect?.width ?? 0) *
- (config?.cameras[event.camera]?.detect?.height ?? 0) *
- (item.data.box[2] * item.data.box[3]),
- )
- : undefined,
- [config, event.camera, item.data.box],
- );
-
- const attributeAreaPx = useMemo(
- () =>
- Array.isArray(item.data.attribute_box) &&
- item.data.attribute_box.length >= 4
- ? Math.round(
- (config?.cameras[event.camera]?.detect?.width ?? 0) *
- (config?.cameras[event.camera]?.detect?.height ?? 0) *
- (item.data.attribute_box[2] * item.data.attribute_box[3]),
- )
- : undefined,
- [config, event.camera, item.data.attribute_box],
- );
-
- const attributeAreaPct = useMemo(
- () =>
- Array.isArray(item.data.attribute_box) &&
- item.data.attribute_box.length >= 4
- ? (
- item.data.attribute_box[2] *
- item.data.attribute_box[3] *
- 100
- ).toFixed(2)
- : undefined,
- [item.data.attribute_box],
- );
-
- const areaPct = useMemo(
- () =>
- Array.isArray(item.data.box) && item.data.box.length >= 4
- ? (item.data.box[2] * item.data.box[3] * 100).toFixed(2)
- : undefined,
- [item.data.box],
- );
-
- const score = useMemo(() => {
- if (item.data.score !== undefined) {
- return (item.data.score * 100).toFixed(0) + "%";
- }
- return "N/A";
- }, [item.data.score]);
-
- return (
-
-
-
- = (item?.timestamp ?? 0)) &&
- isTimelineActive &&
- "fill-selected duration-300",
- )}
- />
-
-
-
-
-
- {getLifecycleItemDescription(item)}
-
- {/* Only show Score/Ratio/Area for object events, not for audio (heard) or manual API (external) events */}
- {item.class_type !== "heard" && item.class_type !== "external" && (
-
-
-
- {t("trackingDetails.lifecycleItemDesc.header.score")}
-
- {score}
-
-
-
- {t("trackingDetails.lifecycleItemDesc.header.ratio")}
-
- {ratio}
-
-
-
- {t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
- {attributeAreaPx !== undefined &&
- attributeAreaPct !== undefined && (
-
- ({getTranslatedLabel(item.data.label)})
-
- )}
-
- {areaPx !== undefined && areaPct !== undefined ? (
-
- {t("information.pixels", { ns: "common", area: areaPx })}{" "}
- · {areaPct}%
-
- ) : (
- N/A
- )}
-
- {attributeAreaPx !== undefined &&
- attributeAreaPct !== undefined && (
-
-
- {t("trackingDetails.lifecycleItemDesc.header.area")} (
- {getTranslatedLabel(item.data.attribute)})
-
-
- {t("information.pixels", {
- ns: "common",
- area: attributeAreaPx,
- })}{" "}
- · {attributeAreaPct}%
-
-
- )}
-
- )}
-
- {item.data?.zones && item.data.zones.length > 0 && (
-
- {item.data.zones.map((zone, zidx) => {
- const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
- return (
- {
- e.stopPropagation();
- setSelectedZone(zone);
- }}
- style={{
- borderColor: `rgba(${color}, 0.6)`,
- background: `rgba(${color}, 0.08)`,
- }}
- >
-
-
- {item.data?.zones_friendly_names?.[zidx]}
-
-
- );
- })}
-
- )}
-
-
-
-
-
{formattedEventTimestamp}
- {isAdmin && config?.plus?.enabled && item.data.box && (
-
-
-
-
-
-
-
-
- {isAdmin && config?.plus?.enabled && (
- {
- const resp = await axios.post(
- `/${item.camera}/plus/${item.timestamp}`,
- );
-
- if (resp && resp.status == 200) {
- toast.success(
- t("toast.success.submittedFrigatePlus", {
- ns: "components/player",
- }),
- {
- position: "top-center",
- },
- );
- } else {
- toast.success(
- t("toast.error.submitFrigatePlusFailed", {
- ns: "components/player",
- }),
- {
- position: "top-center",
- },
- );
- }
- }}
- >
- {t("itemMenu.submitToPlus.label")}
-
- )}
- {item.data.box && (
- {
- setIsOpen(false);
- setTimeout(() => {
- navigate(
- `/settings?page=masksAndZones&camera=${item.camera}&object_mask=${item.data.box}`,
- );
- }, 0);
- }}
- >
- {t("trackingDetails.createObjectMask")}
-
- )}
-
-
-
- )}
-
-
-
-
- );
-}
+import useSWR from "swr";
+import { useCallback, useEffect, useMemo, useRef, useState } from "react";
+import { useResizeObserver } from "@/hooks/resize-observer";
+import { useFullscreen } from "@/hooks/use-fullscreen";
+import { Event } from "@/types/event";
+import ActivityIndicator from "@/components/indicators/activity-indicator";
+import { TrackingDetailsSequence } from "@/types/timeline";
+import { FrigateConfig } from "@/types/frigateConfig";
+import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
+import { getIconForLabel } from "@/utils/iconUtil";
+import { LuCircle, LuFolderX } from "react-icons/lu";
+import { cn } from "@/lib/utils";
+import HlsVideoPlayer from "@/components/player/HlsVideoPlayer";
+import { baseUrl } from "@/api/baseUrl";
+import { REVIEW_PADDING } from "@/types/review";
+import {
+ ASPECT_PORTRAIT_LAYOUT,
+ ASPECT_WIDE_LAYOUT,
+ Recording,
+} from "@/types/record";
+import {
+ DropdownMenu,
+ DropdownMenuTrigger,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuPortal,
+} from "@/components/ui/dropdown-menu";
+import { Link, useNavigate } from "react-router-dom";
+import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
+import { useTranslation } from "react-i18next";
+import { getTranslatedLabel } from "@/utils/i18n";
+import { resolveZoneName } from "@/hooks/use-zone-friendly-name";
+import { Badge } from "@/components/ui/badge";
+import { HiDotsHorizontal } from "react-icons/hi";
+import axios from "axios";
+import { toast } from "sonner";
+import { useDetailStream } from "@/context/detail-stream-context";
+import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect";
+import { useApiHost } from "@/api";
+import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
+import ObjectTrackOverlay from "../ObjectTrackOverlay";
+import { useIsAdmin } from "@/hooks/use-is-admin";
+import { VideoResolutionType } from "@/types/live";
+import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
+
+type TrackingDetailsProps = {
+ className?: string;
+ event: Event;
+ fullscreen?: boolean;
+ tabs?: React.ReactNode;
+ isAnnotationSettingsOpen?: boolean;
+};
+
+export function TrackingDetails({
+ className,
+ event,
+ tabs,
+ isAnnotationSettingsOpen = false,
+}: TrackingDetailsProps) {
+ const videoRef = useRef(null);
+ const { t } = useTranslation(["views/explore"]);
+ const apiHost = useApiHost();
+ const imgRef = useRef(null);
+ const [imgLoaded, setImgLoaded] = useState(false);
+ const [isVideoLoading, setIsVideoLoading] = useState(true);
+ const [displaySource, _setDisplaySource] = useState<"video" | "image">(
+ "video",
+ );
+ const { setSelectedObjectIds, annotationOffset } = useDetailStream();
+
+ // manualOverride holds a record-stream timestamp explicitly chosen by the
+ // user (eg, clicking a lifecycle row). When null we display `currentTime`.
+ const [manualOverride, setManualOverride] = useState(null);
+
+ // Capture the annotation offset used for building the video source URL.
+ // This only updates when the event changes, NOT on every slider drag,
+ // so the HLS player doesn't reload while the user is adjusting the offset.
+ const sourceOffsetRef = useRef(annotationOffset);
+ useEffect(() => {
+ sourceOffsetRef.current = annotationOffset;
+ }, [event.id]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // event.start_time is detect time, convert to record, then subtract padding
+ const [currentTime, setCurrentTime] = useState(
+ (event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
+ );
+
+ useEffect(() => {
+ setIsVideoLoading(true);
+ }, [event.id]);
+
+ const { data: eventSequence } = useSWR(
+ ["timeline", { source_id: event.id }],
+ null,
+ {
+ revalidateOnFocus: false,
+ revalidateOnReconnect: false,
+ dedupingInterval: 30000,
+ },
+ );
+
+ const { data: config } = useSWR("config");
+
+ // Fetch recording segments for the event's time range to handle motion-only gaps.
+ // Use the source offset (stable per event) so recordings don't refetch on every
+ // slider drag while adjusting annotation offset.
+ const eventStartRecord = useMemo(
+ () => (event.start_time ?? 0) + sourceOffsetRef.current / 1000,
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ [event.start_time, event.id],
+ );
+ const eventEndRecord = useMemo(
+ () =>
+ (event.end_time ?? Date.now() / 1000) + sourceOffsetRef.current / 1000,
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ [event.end_time, event.id],
+ );
+
+ const { data: recordings } = useSWR(
+ event.camera
+ ? [
+ `${event.camera}/recordings`,
+ {
+ after: eventStartRecord - REVIEW_PADDING,
+ before: eventEndRecord + REVIEW_PADDING,
+ },
+ ]
+ : null,
+ null,
+ {
+ revalidateOnFocus: false,
+ revalidateOnReconnect: false,
+ dedupingInterval: 30000,
+ },
+ );
+
+ // Convert a timeline timestamp to actual video player time, accounting for
+ // motion-only recording gaps. Uses the same algorithm as DynamicVideoController.
+ const timestampToVideoTime = useCallback(
+ (timestamp: number): number => {
+ if (!recordings || recordings.length === 0) {
+ // Fallback to simple calculation if no recordings data
+ return timestamp - (eventStartRecord - REVIEW_PADDING);
+ }
+
+ const videoStartTime = eventStartRecord - REVIEW_PADDING;
+
+ // If timestamp is before video start, return 0
+ if (timestamp < videoStartTime) return 0;
+
+ // Check if timestamp is before the first recording or after the last
+ if (
+ timestamp < recordings[0].start_time ||
+ timestamp > recordings[recordings.length - 1].end_time
+ ) {
+ // No recording available at this timestamp
+ return 0;
+ }
+
+ // Calculate the inpoint offset - the HLS video may start partway through the first segment
+ let inpointOffset = 0;
+ if (
+ videoStartTime > recordings[0].start_time &&
+ videoStartTime < recordings[0].end_time
+ ) {
+ inpointOffset = videoStartTime - recordings[0].start_time;
+ }
+
+ let seekSeconds = 0;
+ for (const segment of recordings) {
+ // Skip segments that end before our timestamp
+ if (segment.end_time <= timestamp) {
+ // Add this segment's duration, but subtract inpoint offset from first segment
+ if (segment === recordings[0]) {
+ seekSeconds += segment.duration - inpointOffset;
+ } else {
+ seekSeconds += segment.duration;
+ }
+ } else if (segment.start_time <= timestamp) {
+ // The timestamp is within this segment
+ if (segment === recordings[0]) {
+ // For the first segment, account for the inpoint offset
+ seekSeconds +=
+ timestamp - Math.max(segment.start_time, videoStartTime);
+ } else {
+ seekSeconds += timestamp - segment.start_time;
+ }
+ break;
+ }
+ }
+
+ return seekSeconds;
+ },
+ [recordings, eventStartRecord],
+ );
+
+ // Convert video player time back to timeline timestamp, accounting for
+ // motion-only recording gaps. Reverse of timestampToVideoTime.
+ const videoTimeToTimestamp = useCallback(
+ (playerTime: number): number => {
+ if (!recordings || recordings.length === 0) {
+ // Fallback to simple calculation if no recordings data
+ const videoStartTime = eventStartRecord - REVIEW_PADDING;
+ return playerTime + videoStartTime;
+ }
+
+ const videoStartTime = eventStartRecord - REVIEW_PADDING;
+
+ // Calculate the inpoint offset - the video may start partway through the first segment
+ let inpointOffset = 0;
+ if (
+ videoStartTime > recordings[0].start_time &&
+ videoStartTime < recordings[0].end_time
+ ) {
+ inpointOffset = videoStartTime - recordings[0].start_time;
+ }
+
+ let timestamp = 0;
+ let totalTime = 0;
+
+ for (const segment of recordings) {
+ const segmentDuration =
+ segment === recordings[0]
+ ? segment.duration - inpointOffset
+ : segment.duration;
+
+ if (totalTime + segmentDuration > playerTime) {
+ // The player time is within this segment
+ if (segment === recordings[0]) {
+ // For the first segment, add the inpoint offset
+ timestamp =
+ Math.max(segment.start_time, videoStartTime) +
+ (playerTime - totalTime);
+ } else {
+ timestamp = segment.start_time + (playerTime - totalTime);
+ }
+ break;
+ } else {
+ totalTime += segmentDuration;
+ }
+ }
+
+ return timestamp;
+ },
+ [recordings, eventStartRecord],
+ );
+
+ eventSequence?.map((event) => {
+ event.data.zones_friendly_names = event.data?.zones?.map((zone) => {
+ return resolveZoneName(config, zone);
+ });
+ });
+
+ // Use manualOverride (set when seeking in image mode) if present so
+ // lifecycle rows and overlays follow image-mode seeks. Otherwise fall
+ // back to currentTime used for video mode.
+ const effectiveTime = useMemo(() => {
+ const displayedRecordTime = manualOverride ?? currentTime;
+ return displayedRecordTime - annotationOffset / 1000;
+ }, [manualOverride, currentTime, annotationOffset]);
+
+ const containerRef = useRef(null);
+ const { fullscreen, toggleFullscreen, supportsFullScreen } =
+ useFullscreen(containerRef);
+ const timelineContainerRef = useRef(null);
+ const rowRefs = useRef<(HTMLDivElement | null)[]>([]);
+ const [_selectedZone, setSelectedZone] = useState("");
+ const [_lifecycleZones, setLifecycleZones] = useState([]);
+ const [seekToTimestamp, setSeekToTimestamp] = useState(null);
+ const [lineBottomOffsetPx, setLineBottomOffsetPx] = useState(32);
+ const [lineTopOffsetPx, setLineTopOffsetPx] = useState(8);
+ const [blueLineHeightPx, setBlueLineHeightPx] = useState(0);
+
+ const [timelineSize] = useResizeObserver(timelineContainerRef);
+
+ const [fullResolution, setFullResolution] = useState({
+ width: 0,
+ height: 0,
+ });
+
+ const aspectRatio = useMemo(() => {
+ if (!config) {
+ return 16 / 9;
+ }
+
+ if (fullResolution.width && fullResolution.height) {
+ return fullResolution.width / fullResolution.height;
+ }
+
+ return (
+ config.cameras[event.camera].detect.width /
+ config.cameras[event.camera].detect.height
+ );
+ }, [config, event, fullResolution]);
+
+ const label = event.sub_label
+ ? event.sub_label
+ : getTranslatedLabel(event.label, event.data.type);
+
+ const getZoneColor = useCallback(
+ (zoneName: string) => {
+ const zoneColor =
+ config?.cameras?.[event.camera]?.zones?.[zoneName]?.color;
+ if (zoneColor) {
+ const reversed = [...zoneColor].reverse();
+ return reversed;
+ }
+ },
+ [config, event],
+ );
+
+ // Set the selected object ID in the context so ObjectTrackOverlay can display it
+ useEffect(() => {
+ setSelectedObjectIds([event.id]);
+ }, [event.id, setSelectedObjectIds]);
+
+ // When the annotation settings popover is open, pin the video to a specific
+ // lifecycle event (detect-stream timestamp). As the user drags the offset
+ // slider, the video re-seeks to show the recording frame at
+ // pinnedTimestamp + newOffset, while the bounding box stays fixed at the
+ // pinned detect timestamp. This lets the user visually align the box to
+ // the car in the video.
+ const pinnedDetectTimestampRef = useRef(null);
+ const wasAnnotationOpenRef = useRef(false);
+
+ // On popover open: pause, pin first lifecycle item, and seek.
+ useEffect(() => {
+ if (isAnnotationSettingsOpen && !wasAnnotationOpenRef.current) {
+ if (videoRef.current && displaySource === "video") {
+ videoRef.current.pause();
+ }
+ if (eventSequence && eventSequence.length > 0) {
+ pinnedDetectTimestampRef.current = eventSequence[0].timestamp;
+ }
+ }
+ if (!isAnnotationSettingsOpen) {
+ pinnedDetectTimestampRef.current = null;
+ }
+ wasAnnotationOpenRef.current = isAnnotationSettingsOpen;
+ }, [isAnnotationSettingsOpen, displaySource, eventSequence]);
+
+ // When the pinned timestamp or offset changes, re-seek the video and
+ // explicitly update currentTime so the overlay shows the pinned event's box.
+ useEffect(() => {
+ const pinned = pinnedDetectTimestampRef.current;
+ if (!isAnnotationSettingsOpen || pinned == null) return;
+ if (!videoRef.current || displaySource !== "video") return;
+
+ const targetTimeRecord = pinned + annotationOffset / 1000;
+ const relativeTime = timestampToVideoTime(targetTimeRecord);
+ videoRef.current.currentTime = relativeTime;
+
+ // Explicitly update currentTime state so the overlay's effectiveCurrentTime
+ // resolves back to the pinned detect timestamp:
+ // effectiveCurrentTime = targetTimeRecord - annotationOffset/1000 = pinned
+ setCurrentTime(targetTimeRecord);
+ }, [
+ isAnnotationSettingsOpen,
+ annotationOffset,
+ displaySource,
+ timestampToVideoTime,
+ ]);
+
+ const handleLifecycleClick = useCallback(
+ (item: TrackingDetailsSequence) => {
+ if (!videoRef.current && !imgRef.current) return;
+
+ // Convert lifecycle timestamp (detect stream) to record stream time
+ const targetTimeRecord = item.timestamp + annotationOffset / 1000;
+
+ if (displaySource === "image") {
+ // For image mode: set a manual override timestamp and update
+ // currentTime so overlays render correctly.
+ setManualOverride(targetTimeRecord);
+ setCurrentTime(targetTimeRecord);
+ return;
+ }
+
+ // For video mode: convert to video-relative time (accounting for motion-only gaps)
+ const relativeTime = timestampToVideoTime(targetTimeRecord);
+
+ if (videoRef.current) {
+ videoRef.current.currentTime = relativeTime;
+ }
+ },
+ [annotationOffset, displaySource, timestampToVideoTime],
+ );
+
+ const formattedStart = config
+ ? formatUnixTimestampToDateTime(event.start_time ?? 0, {
+ timezone: config.ui.timezone,
+ date_format:
+ config.ui.time_format == "24hour"
+ ? t("time.formattedTimestamp.24hour", {
+ ns: "common",
+ })
+ : t("time.formattedTimestamp.12hour", {
+ ns: "common",
+ }),
+ time_style: "medium",
+ date_style: "medium",
+ })
+ : "";
+
+ const formattedEnd =
+ config && event.end_time != null
+ ? formatUnixTimestampToDateTime(event.end_time, {
+ timezone: config.ui.timezone,
+ date_format:
+ config.ui.time_format == "24hour"
+ ? t("time.formattedTimestamp.24hour", {
+ ns: "common",
+ })
+ : t("time.formattedTimestamp.12hour", {
+ ns: "common",
+ }),
+ time_style: "medium",
+ date_style: "medium",
+ })
+ : "";
+
+ useEffect(() => {
+ if (!eventSequence || eventSequence.length === 0) return;
+ setLifecycleZones(eventSequence[0]?.data.zones);
+ }, [eventSequence]);
+
+ useEffect(() => {
+ if (seekToTimestamp === null) return;
+
+ if (displaySource === "image") {
+ // For image mode, set the manual override so the snapshot updates to
+ // the exact record timestamp.
+ setManualOverride(seekToTimestamp);
+ setSeekToTimestamp(null);
+ return;
+ }
+
+ // seekToTimestamp is a record stream timestamp
+ // Convert to video position (accounting for motion-only recording gaps)
+ if (!videoRef.current) return;
+ const relativeTime = timestampToVideoTime(seekToTimestamp);
+ if (relativeTime >= 0) {
+ videoRef.current.currentTime = relativeTime;
+ }
+ setSeekToTimestamp(null);
+ }, [seekToTimestamp, displaySource, timestampToVideoTime]);
+
+ const isWithinEventRange = useMemo(() => {
+ if (effectiveTime === undefined || event.start_time === undefined) {
+ return false;
+ }
+ // If an event has not ended yet, fall back to last timestamp in eventSequence
+ let eventEnd = event.end_time;
+ if (eventEnd == null && eventSequence && eventSequence.length > 0) {
+ const last = eventSequence[eventSequence.length - 1];
+ if (last && last.timestamp !== undefined) {
+ eventEnd = last.timestamp;
+ }
+ }
+
+ if (eventEnd == null) {
+ return false;
+ }
+ return effectiveTime >= event.start_time && effectiveTime <= eventEnd;
+ }, [effectiveTime, event.start_time, event.end_time, eventSequence]);
+
+ // Dynamically compute pixel offsets so the timeline line starts at the
+ // first row midpoint and ends at the last row midpoint. For accuracy,
+ // measure the center Y of each lifecycle row and interpolate the current
+ // effective time into a pixel position; then set the blue line height
+ // so it reaches the center dot at the same time the dot becomes active.
+ useEffect(() => {
+ if (!timelineContainerRef.current || !eventSequence) return;
+
+ const containerRect = timelineContainerRef.current.getBoundingClientRect();
+ const validRefs = rowRefs.current.filter((r) => r !== null);
+ if (validRefs.length === 0) return;
+
+ const centers = validRefs.map((n) => {
+ const r = n.getBoundingClientRect();
+ return r.top + r.height / 2 - containerRect.top;
+ });
+
+ const topOffset = Math.max(0, centers[0]);
+ const bottomOffset = Math.max(
+ 0,
+ containerRect.height - centers[centers.length - 1],
+ );
+
+ setLineTopOffsetPx(Math.round(topOffset));
+ setLineBottomOffsetPx(Math.round(bottomOffset));
+
+ const eff = effectiveTime ?? 0;
+ const timestamps = eventSequence.map((s) => s.timestamp ?? 0);
+
+ let pixelPos = centers[0];
+ if (eff <= timestamps[0]) {
+ pixelPos = centers[0];
+ } else if (eff >= timestamps[timestamps.length - 1]) {
+ pixelPos = centers[centers.length - 1];
+ } else {
+ for (let i = 0; i < timestamps.length - 1; i++) {
+ const t1 = timestamps[i];
+ const t2 = timestamps[i + 1];
+ if (eff >= t1 && eff <= t2) {
+ const ratio = t2 > t1 ? (eff - t1) / (t2 - t1) : 0;
+ pixelPos = centers[i] + ratio * (centers[i + 1] - centers[i]);
+ break;
+ }
+ }
+ }
+
+ const bluePx = Math.round(Math.max(0, pixelPos - topOffset));
+ setBlueLineHeightPx(bluePx);
+ }, [eventSequence, timelineSize.width, timelineSize.height, effectiveTime]);
+
+ const videoWindow = useMemo(() => {
+ const sourceOffset = sourceOffsetRef.current;
+ const eventStartRec = event.start_time + sourceOffset / 1000;
+ const eventEndRec =
+ (event.end_time ?? Date.now() / 1000) + sourceOffset / 1000;
+ const startTime = eventStartRec - REVIEW_PADDING;
+ const endTime = eventEndRec + REVIEW_PADDING;
+
+ return {
+ startTime,
+ endTime,
+ vodPath: `/vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`,
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [event]);
+ const playbackSource = useRecordingPlaybackSource({
+ camera: event.camera,
+ after: videoWindow.startTime,
+ before: videoWindow.endTime,
+ vodPath: videoWindow.vodPath,
+ });
+ const videoSource = useMemo(() => {
+ const playlist = playbackSource ?? `${baseUrl}${videoWindow.vodPath}`;
+
+ return {
+ playlist,
+ startPosition: 0,
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [playbackSource, videoWindow]);
+
+ // Determine camera aspect ratio category
+ const cameraAspect = useMemo(() => {
+ if (!aspectRatio) {
+ return "normal";
+ } else if (aspectRatio > ASPECT_WIDE_LAYOUT) {
+ return "wide";
+ } else if (aspectRatio < ASPECT_PORTRAIT_LAYOUT) {
+ return "tall";
+ } else {
+ return "normal";
+ }
+ }, [aspectRatio]);
+
+ const handleSeekToTime = useCallback((timestamp: number, _play?: boolean) => {
+ // Set the target timestamp to seek to
+ setSeekToTimestamp(timestamp);
+ }, []);
+
+ const handleTimeUpdate = useCallback(
+ (time: number) => {
+ // Convert video player time back to timeline timestamp
+ // accounting for motion-only recording gaps
+ const absoluteTime = videoTimeToTimestamp(time);
+
+ setCurrentTime(absoluteTime);
+ },
+ [videoTimeToTimestamp],
+ );
+
+ const [src, setSrc] = useState(
+ `${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
+ );
+ const [hasError, setHasError] = useState(false);
+
+ // Derive the record timestamp to display: manualOverride if present,
+ // otherwise use currentTime.
+ const displayedRecordTime = manualOverride ?? currentTime;
+
+ useEffect(() => {
+ if (displayedRecordTime) {
+ const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
+ setSrc(newSrc);
+ }
+ setImgLoaded(false);
+ setHasError(false);
+
+ // we know that these deps are correct
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [displayedRecordTime]);
+
+ const onUploadFrameToPlus = useCallback(() => {
+ return axios.post(`/${event.camera}/plus/${currentTime}`);
+ }, [event.camera, currentTime]);
+
+ if (!config) {
+ return ;
+ }
+
+ return (
+
+
+
+
+
+ {displaySource == "video" && (
+ <>
+
setIsVideoLoading(false)}
+ setFullResolution={setFullResolution}
+ toggleFullscreen={toggleFullscreen}
+ isDetailMode={true}
+ camera={event.camera}
+ currentTimeOverride={currentTime}
+ />
+ {isVideoLoading && (
+
+ )}
+ >
+ )}
+ {displaySource == "image" && (
+ <>
+
+ {hasError && (
+
+
+
+ {t("objectLifecycle.noImageFound")}
+
+
+ )}
+
+
+
+
+
setImgLoaded(true)}
+ onError={() => setHasError(true)}
+ />
+
+ >
+ )}
+
+
+
+
1 && aspectRatio < ASPECT_PORTRAIT_LAYOUT
+ ? "lg:basis-3/5"
+ : "lg:basis-2/5",
+ )}
+ >
+ {isDesktop && tabs && (
+
+ )}
+
+ {config?.cameras[event.camera]?.onvif.autotracking
+ .enabled_in_config && (
+
+ {t("trackingDetails.autoTrackingTips")}
+
+ )}
+
+
+
+
{
+ e.stopPropagation();
+ // event.start_time is detect time, convert to record
+ handleSeekToTime(
+ (event.start_time ?? 0) + annotationOffset / 1000,
+ );
+ }}
+ role="button"
+ >
+
+ {getIconForLabel(
+ event.sub_label ? event.label + "-verified" : event.label,
+ event.data.type,
+ "size-4 text-white",
+ )}
+
+
+
{label}
+
+ {formattedStart ?? ""}
+ {event.end_time != null ? (
+ <> - {formattedEnd}>
+ ) : (
+
+ )}
+
+ {event.data?.recognized_license_plate && (
+ <>
+
·
+
+
+ {event.data.recognized_license_plate}
+
+
+ >
+ )}
+
+
+
+
+
+ {!eventSequence ? (
+
+ ) : eventSequence.length === 0 ? (
+
+ {t("detail.noObjectDetailData", { ns: "views/events" })}
+
+ ) : (
+
+
+ {isWithinEventRange && (
+
+ )}
+
+ {eventSequence.map((item, idx) => {
+ return (
+
{
+ rowRefs.current[idx] = el;
+ }}
+ >
+ handleLifecycleClick(item)}
+ setSelectedZone={setSelectedZone}
+ getZoneColor={getZoneColor}
+ effectiveTime={effectiveTime}
+ isTimelineActive={isWithinEventRange}
+ />
+
+ );
+ })}
+
+
+ )}
+
+
+
+
+
+ );
+}
+
+type LifecycleIconRowProps = {
+ item: TrackingDetailsSequence;
+ event: Event;
+ onClick: () => void;
+ setSelectedZone: (z: string) => void;
+ getZoneColor: (zoneName: string) => number[] | undefined;
+ effectiveTime?: number;
+ isTimelineActive?: boolean;
+};
+
+function LifecycleIconRow({
+ item,
+ event,
+ onClick,
+ setSelectedZone,
+ getZoneColor,
+ effectiveTime,
+ isTimelineActive,
+}: LifecycleIconRowProps) {
+ const { t } = useTranslation(["views/explore", "components/player"]);
+ const { data: config } = useSWR("config");
+ const [isOpen, setIsOpen] = useState(false);
+ const navigate = useNavigate();
+ const isAdmin = useIsAdmin();
+
+ const aspectRatio = useMemo(() => {
+ if (!config) {
+ return 16 / 9;
+ }
+
+ return (
+ config.cameras[event.camera].detect.width /
+ config.cameras[event.camera].detect.height
+ );
+ }, [config, event]);
+
+ const isActive = useMemo(
+ () => Math.abs((effectiveTime ?? 0) - (item.timestamp ?? 0)) <= 0.5,
+ [effectiveTime, item.timestamp],
+ );
+
+ const formattedEventTimestamp = useMemo(
+ () =>
+ config
+ ? formatUnixTimestampToDateTime(item.timestamp ?? 0, {
+ timezone: config.ui.timezone,
+ date_format:
+ config.ui.time_format == "24hour"
+ ? t("time.formattedTimestampHourMinuteSecond.24hour", {
+ ns: "common",
+ })
+ : t("time.formattedTimestampHourMinuteSecond.12hour", {
+ ns: "common",
+ }),
+ time_style: "medium",
+ date_style: "medium",
+ })
+ : "",
+ [config, item.timestamp, t],
+ );
+
+ const ratio = useMemo(
+ () =>
+ Array.isArray(item.data.box) && item.data.box.length >= 4
+ ? (aspectRatio * (item.data.box[2] / item.data.box[3])).toFixed(2)
+ : "N/A",
+ [aspectRatio, item.data.box],
+ );
+
+ const areaPx = useMemo(
+ () =>
+ Array.isArray(item.data.box) && item.data.box.length >= 4
+ ? Math.round(
+ (config?.cameras[event.camera]?.detect?.width ?? 0) *
+ (config?.cameras[event.camera]?.detect?.height ?? 0) *
+ (item.data.box[2] * item.data.box[3]),
+ )
+ : undefined,
+ [config, event.camera, item.data.box],
+ );
+
+ const attributeAreaPx = useMemo(
+ () =>
+ Array.isArray(item.data.attribute_box) &&
+ item.data.attribute_box.length >= 4
+ ? Math.round(
+ (config?.cameras[event.camera]?.detect?.width ?? 0) *
+ (config?.cameras[event.camera]?.detect?.height ?? 0) *
+ (item.data.attribute_box[2] * item.data.attribute_box[3]),
+ )
+ : undefined,
+ [config, event.camera, item.data.attribute_box],
+ );
+
+ const attributeAreaPct = useMemo(
+ () =>
+ Array.isArray(item.data.attribute_box) &&
+ item.data.attribute_box.length >= 4
+ ? (
+ item.data.attribute_box[2] *
+ item.data.attribute_box[3] *
+ 100
+ ).toFixed(2)
+ : undefined,
+ [item.data.attribute_box],
+ );
+
+ const areaPct = useMemo(
+ () =>
+ Array.isArray(item.data.box) && item.data.box.length >= 4
+ ? (item.data.box[2] * item.data.box[3] * 100).toFixed(2)
+ : undefined,
+ [item.data.box],
+ );
+
+ const score = useMemo(() => {
+ if (item.data.score !== undefined) {
+ return (item.data.score * 100).toFixed(0) + "%";
+ }
+ return "N/A";
+ }, [item.data.score]);
+
+ return (
+
+
+
+ = (item?.timestamp ?? 0)) &&
+ isTimelineActive &&
+ "fill-selected duration-300",
+ )}
+ />
+
+
+
+
+
+ {getLifecycleItemDescription(item)}
+
+ {/* Only show Score/Ratio/Area for object events, not for audio (heard) or manual API (external) events */}
+ {item.class_type !== "heard" && item.class_type !== "external" && (
+
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.score")}
+
+ {score}
+
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.ratio")}
+
+ {ratio}
+
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
+ {attributeAreaPx !== undefined &&
+ attributeAreaPct !== undefined && (
+
+ ({getTranslatedLabel(item.data.label)})
+
+ )}
+
+ {areaPx !== undefined && areaPct !== undefined ? (
+
+ {t("information.pixels", { ns: "common", area: areaPx })}{" "}
+ · {areaPct}%
+
+ ) : (
+ N/A
+ )}
+
+ {attributeAreaPx !== undefined &&
+ attributeAreaPct !== undefined && (
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.area")} (
+ {getTranslatedLabel(item.data.attribute)})
+
+
+ {t("information.pixels", {
+ ns: "common",
+ area: attributeAreaPx,
+ })}{" "}
+ · {attributeAreaPct}%
+
+
+ )}
+
+ )}
+
+ {item.data?.zones && item.data.zones.length > 0 && (
+
+ {item.data.zones.map((zone, zidx) => {
+ const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
+ return (
+ {
+ e.stopPropagation();
+ setSelectedZone(zone);
+ }}
+ style={{
+ borderColor: `rgba(${color}, 0.6)`,
+ background: `rgba(${color}, 0.08)`,
+ }}
+ >
+
+
+ {item.data?.zones_friendly_names?.[zidx]}
+
+
+ );
+ })}
+
+ )}
+
+
+
+
+
{formattedEventTimestamp}
+ {isAdmin && config?.plus?.enabled && item.data.box && (
+
+
+
+
+
+
+
+
+ {isAdmin && config?.plus?.enabled && (
+ {
+ const resp = await axios.post(
+ `/${item.camera}/plus/${item.timestamp}`,
+ );
+
+ if (resp && resp.status == 200) {
+ toast.success(
+ t("toast.success.submittedFrigatePlus", {
+ ns: "components/player",
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ } else {
+ toast.success(
+ t("toast.error.submitFrigatePlusFailed", {
+ ns: "components/player",
+ }),
+ {
+ position: "top-center",
+ },
+ );
+ }
+ }}
+ >
+ {t("itemMenu.submitToPlus.label")}
+
+ )}
+ {item.data.box && (
+ {
+ setIsOpen(false);
+ setTimeout(() => {
+ navigate(
+ `/settings?page=masksAndZones&camera=${item.camera}&object_mask=${item.data.box}`,
+ );
+ }, 0);
+ }}
+ >
+ {t("trackingDetails.createObjectMask")}
+
+ )}
+
+
+
+ )}
+
+
+
+
+ );
+}
diff --git a/web/src/components/player/dynamic/DynamicVideoPlayer.tsx b/web/src/components/player/dynamic/DynamicVideoPlayer.tsx
index c8d95090d..6cae2ef5e 100644
--- a/web/src/components/player/dynamic/DynamicVideoPlayer.tsx
+++ b/web/src/components/player/dynamic/DynamicVideoPlayer.tsx
@@ -1,351 +1,429 @@
-import {
- ReactNode,
- useCallback,
- useEffect,
- useMemo,
- useRef,
- useState,
-} from "react";
-import { useApiHost } from "@/api";
-import useSWR from "swr";
-import { FrigateConfig } from "@/types/frigateConfig";
-import { Recording } from "@/types/record";
-import { Preview } from "@/types/preview";
-import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
-import { DynamicVideoController } from "./DynamicVideoController";
-import HlsVideoPlayer, { HlsSource } from "../HlsVideoPlayer";
-import { useDetailStream } from "@/context/detail-stream-context";
-import { TimeRange } from "@/types/timeline";
-import ActivityIndicator from "@/components/indicators/activity-indicator";
-import { VideoResolutionType } from "@/types/live";
-import axios from "axios";
-import { cn } from "@/lib/utils";
-import { useTranslation } from "react-i18next";
-import {
- calculateInpointOffset,
- calculateSeekPosition,
-} from "@/utils/videoUtil";
-import { isFirefox } from "react-device-detect";
-
-/**
- * Dynamically switches between video playback and scrubbing preview player.
- */
-type DynamicVideoPlayerProps = {
- className?: string;
- camera: string;
- timeRange: TimeRange;
- cameraPreviews: Preview[];
- startTimestamp?: number;
- isScrubbing: boolean;
- hotKeys: boolean;
- supportsFullscreen: boolean;
- fullscreen: boolean;
- onControllerReady: (controller: DynamicVideoController) => void;
- onTimestampUpdate?: (timestamp: number) => void;
- onClipEnded?: () => void;
- onSeekToTime?: (timestamp: number, play?: boolean) => void;
- setFullResolution: React.Dispatch>;
- toggleFullscreen: () => void;
- containerRef?: React.MutableRefObject;
- transformedOverlay?: ReactNode;
-};
-export default function DynamicVideoPlayer({
- className,
- camera,
- timeRange,
- cameraPreviews,
- startTimestamp,
- isScrubbing,
- hotKeys,
- supportsFullscreen,
- fullscreen,
- onControllerReady,
- onTimestampUpdate,
- onClipEnded,
- onSeekToTime,
- setFullResolution,
- toggleFullscreen,
- containerRef,
- transformedOverlay,
-}: DynamicVideoPlayerProps) {
- const { t } = useTranslation(["components/player"]);
- const apiHost = useApiHost();
- const { data: config } = useSWR("config");
-
- // for detail stream context in History
- const {
- isDetailMode,
- camera: contextCamera,
- currentTime,
- } = useDetailStream();
-
- // controlling playback
-
- const playerRef = useRef(null);
- const [previewController, setPreviewController] =
- useState(null);
- const [noRecording, setNoRecording] = useState(false);
- const controller = useMemo(() => {
- if (!config || !playerRef.current || !previewController) {
- return undefined;
- }
-
- return new DynamicVideoController(
- camera,
- playerRef.current,
- previewController,
- (config.cameras[camera]?.detect?.annotation_offset || 0) / 1000,
- isScrubbing ? "scrubbing" : "playback",
- setNoRecording,
- () => {},
- );
- // we only want to fire once when players are ready
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [camera, config, playerRef.current, previewController]);
-
- useEffect(() => {
- if (!controller) {
- return;
- }
-
- if (controller) {
- onControllerReady(controller);
- }
-
- // we only want to fire once when players are ready
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [controller]);
-
- // initial state
-
- const [isLoading, setIsLoading] = useState(false);
- const [isBuffering, setIsBuffering] = useState(false);
- const [loadingTimeout, setLoadingTimeout] = useState();
-
- // Don't set source until recordings load - we need accurate startPosition
- // to avoid hls.js clamping to video end when startPosition exceeds duration
- const [source, setSource] = useState(undefined);
-
- // start at correct time
-
- useEffect(() => {
- if (!isScrubbing) {
- setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
- }
-
- return () => {
- if (loadingTimeout) {
- clearTimeout(loadingTimeout);
- }
- };
- // we only want trigger when scrubbing state changes
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [camera, isScrubbing]);
-
- const onPlayerLoaded = useCallback(() => {
- if (!controller || !startTimestamp) {
- return;
- }
-
- controller.seekToTimestamp(startTimestamp, true);
- }, [startTimestamp, controller]);
-
- const onTimeUpdate = useCallback(
- (time: number) => {
- if (isScrubbing || !controller || !onTimestampUpdate || time == 0) {
- return;
- }
-
- if (isLoading) {
- setIsLoading(false);
- }
-
- if (isBuffering) {
- setIsBuffering(false);
- }
-
- onTimestampUpdate(controller.getProgress(time));
- },
- [controller, onTimestampUpdate, isBuffering, isLoading, isScrubbing],
- );
-
- const onUploadFrameToPlus = useCallback(
- (playTime: number) => {
- if (!controller) {
- return;
- }
-
- const time = controller.getProgress(playTime);
- return axios.post(`/${camera}/plus/${time}`);
- },
- [camera, controller],
- );
-
- // state of playback player
-
- const recordingParams = useMemo(
- () => ({
- before: timeRange.before,
- after: timeRange.after,
- }),
- [timeRange],
- );
- const { data: recordings } = useSWR(
- [`${camera}/recordings`, recordingParams],
- { revalidateOnFocus: false },
- );
-
- useEffect(() => {
- if (!recordings?.length) {
- if (recordings?.length == 0) {
- setNoRecording(true);
- }
-
- return;
- }
-
- let startPosition = undefined;
-
- if (startTimestamp) {
- const inpointOffset = calculateInpointOffset(
- recordingParams.after,
- (recordings || [])[0],
- );
-
- startPosition = calculateSeekPosition(
- startTimestamp,
- recordings,
- inpointOffset,
- );
- }
-
- setSource({
- playlist: `${apiHost}vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`,
- startPosition,
- });
-
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [recordings]);
-
- useEffect(() => {
- if (!controller || !recordings?.length) {
- return;
- }
-
- if (playerRef.current) {
- playerRef.current.autoplay = !isScrubbing;
- }
-
- setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
-
- controller.newPlayback({
- recordings: recordings ?? [],
- timeRange,
- });
-
- // we only want this to change when controller or recordings update
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [controller, recordings]);
-
- const inpointOffset = useMemo(
- () => calculateInpointOffset(recordingParams.after, (recordings || [])[0]),
- [recordingParams, recordings],
- );
-
- const onValidateClipEnd = useCallback(
- (currentTime: number) => {
- if (!onClipEnded || !controller || !recordings) {
- return;
- }
-
- if (!isFirefox) {
- onClipEnded();
- }
-
- // Firefox has a bug where clipEnded can be called prematurely due to buffering
- // we need to validate if the current play-point is truly at the end of available recordings
-
- const lastRecordingTime = recordings.at(-1)?.start_time;
-
- if (
- !lastRecordingTime ||
- controller.getProgress(currentTime) < lastRecordingTime
- ) {
- return;
- }
-
- onClipEnded();
- },
- [onClipEnded, controller, recordings],
- );
-
- return (
- <>
- {source && (
- {
- if (onSeekToTime) {
- onSeekToTime(timestamp, play);
- }
- }}
- onPlaying={() => {
- if (isScrubbing) {
- playerRef.current?.pause();
- }
-
- if (loadingTimeout) {
- clearTimeout(loadingTimeout);
- }
-
- setNoRecording(false);
- }}
- setFullResolution={setFullResolution}
- onUploadFrame={onUploadFrameToPlus}
- toggleFullscreen={toggleFullscreen}
- onError={(error) => {
- if (error == "stalled" && !isScrubbing) {
- setIsBuffering(true);
- }
- }}
- isDetailMode={isDetailMode}
- camera={contextCamera || camera}
- currentTimeOverride={currentTime}
- transformedOverlay={transformedOverlay}
- />
- )}
-
- setPreviewController(previewController)
- }
- />
- {!isScrubbing && (isLoading || isBuffering) && !noRecording && (
-
- )}
- {!isScrubbing && !isLoading && noRecording && (
-
- {t("noRecordingsFoundForThisTime")}
-
- )}
- >
- );
-}
+import {
+ ReactNode,
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from "react";
+import { useApiHost } from "@/api";
+import useSWR from "swr";
+import { FrigateConfig } from "@/types/frigateConfig";
+import {
+ Recording,
+ RecordingPlaybackPreference,
+} from "@/types/record";
+import { Preview } from "@/types/preview";
+import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
+import { DynamicVideoController } from "./DynamicVideoController";
+import HlsVideoPlayer, { HlsSource } from "../HlsVideoPlayer";
+import { useDetailStream } from "@/context/detail-stream-context";
+import { TimeRange } from "@/types/timeline";
+import ActivityIndicator from "@/components/indicators/activity-indicator";
+import { VideoResolutionType } from "@/types/live";
+import axios from "axios";
+import { cn } from "@/lib/utils";
+import { useTranslation } from "react-i18next";
+import { useUserPersistence } from "@/hooks/use-user-persistence";
+import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
+import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
+import {
+ calculateInpointOffset,
+ calculateSeekPosition,
+} from "@/utils/videoUtil";
+import { isFirefox } from "react-device-detect";
+import {
+ Select,
+ SelectContent,
+ SelectItem,
+ SelectTrigger,
+ SelectValue,
+} from "@/components/ui/select";
+
+/**
+ * Dynamically switches between video playback and scrubbing preview player.
+ */
+type DynamicVideoPlayerProps = {
+ className?: string;
+ camera: string;
+ timeRange: TimeRange;
+ cameraPreviews: Preview[];
+ startTimestamp?: number;
+ isScrubbing: boolean;
+ hotKeys: boolean;
+ supportsFullscreen: boolean;
+ fullscreen: boolean;
+ onControllerReady: (controller: DynamicVideoController) => void;
+ onTimestampUpdate?: (timestamp: number) => void;
+ onClipEnded?: () => void;
+ onSeekToTime?: (timestamp: number, play?: boolean) => void;
+ setFullResolution: React.Dispatch>;
+ toggleFullscreen: () => void;
+ containerRef?: React.MutableRefObject;
+ transformedOverlay?: ReactNode;
+};
+export default function DynamicVideoPlayer({
+ className,
+ camera,
+ timeRange,
+ cameraPreviews,
+ startTimestamp,
+ isScrubbing,
+ hotKeys,
+ supportsFullscreen,
+ fullscreen,
+ onControllerReady,
+ onTimestampUpdate,
+ onClipEnded,
+ onSeekToTime,
+ setFullResolution,
+ toggleFullscreen,
+ containerRef,
+ transformedOverlay,
+}: DynamicVideoPlayerProps) {
+ const { t } = useTranslation(["components/player"]);
+ const apiHost = useApiHost();
+ const { data: config } = useSWR("config");
+
+ // for detail stream context in History
+ const {
+ isDetailMode,
+ camera: contextCamera,
+ currentTime,
+ } = useDetailStream();
+
+ // controlling playback
+
+ const playerRef = useRef(null);
+ const [previewController, setPreviewController] =
+ useState(null);
+ const [noRecording, setNoRecording] = useState(false);
+ const controller = useMemo(() => {
+ if (!config || !playerRef.current || !previewController) {
+ return undefined;
+ }
+
+ return new DynamicVideoController(
+ camera,
+ playerRef.current,
+ previewController,
+ (config.cameras[camera]?.detect?.annotation_offset || 0) / 1000,
+ isScrubbing ? "scrubbing" : "playback",
+ setNoRecording,
+ () => {},
+ );
+ // we only want to fire once when players are ready
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [camera, config, playerRef.current, previewController]);
+
+ useEffect(() => {
+ if (!controller) {
+ return;
+ }
+
+ if (controller) {
+ onControllerReady(controller);
+ }
+
+ // we only want to fire once when players are ready
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [controller]);
+
+ // initial state
+
+ const [isLoading, setIsLoading] = useState(false);
+ const [isBuffering, setIsBuffering] = useState(false);
+ const [loadingTimeout, setLoadingTimeout] = useState();
+ const [playbackPreference, setPlaybackPreference] =
+ useUserPersistence(
+ `${camera}-recording-playback-v2`,
+ "sub",
+ );
+
+ // Don't set source until recordings load - we need accurate startPosition
+ // to avoid hls.js clamping to video end when startPosition exceeds duration
+ const [source, setSource] = useState(undefined);
+
+ // start at correct time
+
+ useEffect(() => {
+ if (!isScrubbing) {
+ setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
+ }
+
+ return () => {
+ if (loadingTimeout) {
+ clearTimeout(loadingTimeout);
+ }
+ };
+ // we only want trigger when scrubbing state changes
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [camera, isScrubbing]);
+
+ const onPlayerLoaded = useCallback(() => {
+ if (!controller || !startTimestamp) {
+ return;
+ }
+
+ controller.seekToTimestamp(startTimestamp, true);
+ }, [startTimestamp, controller]);
+
+ const onTimeUpdate = useCallback(
+ (time: number) => {
+ if (isScrubbing || !controller || !onTimestampUpdate || time == 0) {
+ return;
+ }
+
+ if (isLoading) {
+ setIsLoading(false);
+ }
+
+ if (isBuffering) {
+ setIsBuffering(false);
+ }
+
+ onTimestampUpdate(controller.getProgress(time));
+ },
+ [controller, onTimestampUpdate, isBuffering, isLoading, isScrubbing],
+ );
+
+ const onUploadFrameToPlus = useCallback(
+ (playTime: number) => {
+ if (!controller) {
+ return;
+ }
+
+ const time = controller.getProgress(playTime);
+ return axios.post(`/${camera}/plus/${time}`);
+ },
+ [camera, controller],
+ );
+
+ // state of playback player
+
+ const recordingParams = useMemo(
+ () => ({
+ before: timeRange.before,
+ after: timeRange.after,
+ }),
+ [timeRange],
+ );
+ const { data: allRecordings } = useSWR(
+ [`${camera}/recordings`, { ...recordingParams, variant: "all" }],
+ { revalidateOnFocus: false },
+ );
+ const recordings = useMemo(() => {
+ if (!allRecordings?.length) {
+ return allRecordings;
+ }
+
+ const mainRecordings = allRecordings.filter(
+ (recording) => (recording.variant || "main") === "main",
+ );
+
+ return mainRecordings.length > 0 ? mainRecordings : allRecordings;
+ }, [allRecordings]);
+ const codecNames = useMemo(
+ () =>
+ Array.from(
+ new Set((allRecordings ?? []).map((recording) => recording.codec_name)),
+ ),
+ [allRecordings],
+ );
+ const playbackCapabilities = usePlaybackCapabilities(codecNames);
+
+ useEffect(() => {
+ if (!recordings?.length) {
+ if (recordings?.length == 0) {
+ setNoRecording(true);
+ }
+
+ return;
+ }
+
+ let startPosition = undefined;
+
+ if (startTimestamp) {
+ const inpointOffset = calculateInpointOffset(
+ recordingParams.after,
+ (recordings || [])[0],
+ );
+
+ startPosition = calculateSeekPosition(
+ startTimestamp,
+ recordings,
+ inpointOffset,
+ );
+ }
+
+ const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
+ const decision = chooseRecordingPlayback({
+ apiHost,
+ config,
+ recordings: allRecordings ?? recordings,
+ preference: playbackPreference ?? "sub",
+ vodPath,
+ capabilities: playbackCapabilities,
+ });
+ setSource({
+ playlist: decision.url,
+ startPosition,
+ });
+
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [
+ apiHost,
+ camera,
+ recordingParams.after,
+ recordingParams.before,
+ allRecordings,
+ recordings,
+ startTimestamp,
+ playbackPreference,
+ playbackCapabilities,
+ config?.transcode_proxy?.enabled,
+ config?.transcode_proxy?.vod_proxy_url,
+ ]);
+
+ useEffect(() => {
+ if (!controller || !recordings?.length) {
+ return;
+ }
+
+ if (playerRef.current) {
+ playerRef.current.autoplay = !isScrubbing;
+ }
+
+ setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
+
+ controller.newPlayback({
+ recordings: recordings ?? [],
+ timeRange,
+ });
+
+ // we only want this to change when controller or recordings update
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [controller, recordings]);
+
+ const inpointOffset = useMemo(
+ () => calculateInpointOffset(recordingParams.after, (recordings || [])[0]),
+ [recordingParams, recordings],
+ );
+
+ const onValidateClipEnd = useCallback(
+ (currentTime: number) => {
+ if (!onClipEnded || !controller || !recordings) {
+ return;
+ }
+
+ if (!isFirefox) {
+ onClipEnded();
+ }
+
+ // Firefox has a bug where clipEnded can be called prematurely due to buffering
+ // we need to validate if the current play-point is truly at the end of available recordings
+
+ const lastRecordingTime = recordings.at(-1)?.start_time;
+
+ if (
+ !lastRecordingTime ||
+ controller.getProgress(currentTime) < lastRecordingTime
+ ) {
+ return;
+ }
+
+ onClipEnded();
+ },
+ [onClipEnded, controller, recordings],
+ );
+
+ return (
+ <>
+ {source && (
+ {
+ if (onSeekToTime) {
+ onSeekToTime(timestamp, play);
+ }
+ }}
+ onPlaying={() => {
+ if (isScrubbing) {
+ playerRef.current?.pause();
+ }
+
+ if (loadingTimeout) {
+ clearTimeout(loadingTimeout);
+ }
+
+ setNoRecording(false);
+ }}
+ setFullResolution={setFullResolution}
+ onUploadFrame={onUploadFrameToPlus}
+ toggleFullscreen={toggleFullscreen}
+ onError={(error) => {
+ if (error == "stalled" && !isScrubbing) {
+ setIsBuffering(true);
+ }
+ }}
+ isDetailMode={isDetailMode}
+ camera={contextCamera || camera}
+ currentTimeOverride={currentTime}
+ transformedOverlay={transformedOverlay}
+ />
+ )}
+ {!isScrubbing && source && (
+
+
+ setPlaybackPreference(value as RecordingPlaybackPreference)
+ }
+ >
+
+
+
+
+ Auto
+ Main
+ Sub
+ Transcoded
+
+
+
+ )}
+
+ setPreviewController(previewController)
+ }
+ />
+ {!isScrubbing && (isLoading || isBuffering) && !noRecording && (
+
+ )}
+ {!isScrubbing && !isLoading && noRecording && (
+
+ {t("noRecordingsFoundForThisTime")}
+
+ )}
+ >
+ );
+}
diff --git a/web/src/hooks/use-playback-capabilities.ts b/web/src/hooks/use-playback-capabilities.ts
new file mode 100644
index 000000000..ad8709246
--- /dev/null
+++ b/web/src/hooks/use-playback-capabilities.ts
@@ -0,0 +1,77 @@
+import { useMemo } from "react";
+import {
+ getCodecMimeTypes,
+ normalizeCodecName,
+ PlaybackCapabilities,
+} from "@/utils/recordingPlayback";
+
+type NavigatorConnection = {
+ downlink?: number;
+ effectiveType?: string;
+ rtt?: number;
+ saveData?: boolean;
+};
+
+declare global {
+ interface Navigator {
+ connection?: NavigatorConnection;
+ mozConnection?: NavigatorConnection;
+ webkitConnection?: NavigatorConnection;
+ }
+
+ interface Window {
+ ManagedMediaSource?: typeof MediaSource;
+ }
+}
+
+function canPlayMimeType(mimeType?: string): boolean {
+ if (!mimeType || typeof window === "undefined") {
+ return false;
+ }
+
+ if (window.ManagedMediaSource?.isTypeSupported(mimeType)) {
+ return true;
+ }
+
+ if (window.MediaSource?.isTypeSupported(mimeType)) {
+ return true;
+ }
+
+ const video = document.createElement("video");
+ return video.canPlayType(mimeType) !== "";
+}
+
+function canPlayAnyMimeType(mimeTypes: string[]): boolean {
+ return mimeTypes.some((mimeType) => canPlayMimeType(mimeType));
+}
+
+export default function usePlaybackCapabilities(codecNames: Array) {
+ return useMemo(() => {
+ if (typeof window === "undefined") {
+ return { estimatedBandwidthBps: undefined, saveData: false, supports: {} };
+ }
+
+ const connection =
+ navigator.connection ?? navigator.mozConnection ?? navigator.webkitConnection;
+ const supports: Record = {};
+
+ codecNames.forEach((codecName) => {
+ const normalized = normalizeCodecName(codecName);
+ if (!normalized || normalized in supports) {
+ return;
+ }
+
+ supports[normalized] = canPlayAnyMimeType(getCodecMimeTypes(normalized));
+ });
+
+ const downlinkMbps = connection?.downlink;
+ return {
+ estimatedBandwidthBps:
+ typeof downlinkMbps === "number" && downlinkMbps > 0
+ ? downlinkMbps * 1_000_000
+ : undefined,
+ saveData: connection?.saveData === true,
+ supports,
+ };
+ }, [codecNames]);
+}
diff --git a/web/src/hooks/use-recording-playback-source.ts b/web/src/hooks/use-recording-playback-source.ts
new file mode 100644
index 000000000..5d90082b0
--- /dev/null
+++ b/web/src/hooks/use-recording-playback-source.ts
@@ -0,0 +1,72 @@
+import { useApiHost } from "@/api";
+import useSWR from "swr";
+import { FrigateConfig } from "@/types/frigateConfig";
+import {
+ Recording,
+ RecordingPlaybackPreference,
+} from "@/types/record";
+import { useMemo } from "react";
+import { useUserPersistence } from "@/hooks/use-user-persistence";
+import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
+import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
+
+type RecordingPlaybackSourceOptions = {
+ camera: string;
+ after: number;
+ before: number;
+ vodPath: string;
+ preference?: RecordingPlaybackPreference;
+ enabled?: boolean;
+};
+
+export default function useRecordingPlaybackSource({
+ camera,
+ after,
+ before,
+ vodPath,
+ preference,
+ enabled = true,
+}: RecordingPlaybackSourceOptions) {
+ const apiHost = useApiHost();
+ const { data: config } = useSWR("config");
+ const [storedPreference] = useUserPersistence(
+ `${camera}-recording-playback-v2`,
+ "sub",
+ );
+ const { data: recordings } = useSWR(
+ enabled ? [`${camera}/recordings`, { after, before, variant: "all" }] : null,
+ { revalidateOnFocus: false },
+ );
+
+ const codecNames = useMemo(
+ () =>
+ Array.from(
+ new Set((recordings ?? []).map((recording) => recording.codec_name)),
+ ),
+ [recordings],
+ );
+ const capabilities = usePlaybackCapabilities(codecNames);
+
+ return useMemo(() => {
+ if (!recordings?.length) {
+ return undefined;
+ }
+
+ return chooseRecordingPlayback({
+ apiHost,
+ config,
+ recordings,
+ preference: preference ?? storedPreference ?? "sub",
+ vodPath,
+ capabilities,
+ }).url;
+ }, [
+ apiHost,
+ capabilities,
+ config,
+ preference,
+ recordings,
+ storedPreference,
+ vodPath,
+ ]);
+}
diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts
index dcf3c312f..a995f86af 100644
--- a/web/src/types/frigateConfig.ts
+++ b/web/src/types/frigateConfig.ts
@@ -1,620 +1,625 @@
-import { IconName } from "@/components/icons/IconPicker";
-import { TriggerAction, TriggerType } from "./trigger";
-
-export interface UiConfig {
- timezone?: string;
- time_format?: "browser" | "12hour" | "24hour";
- date_style?: "full" | "long" | "medium" | "short";
- time_style?: "full" | "long" | "medium" | "short";
- dashboard: boolean;
- order: number;
- unit_system?: "metric" | "imperial";
-}
-
-export interface BirdseyeConfig {
- enabled: boolean;
- height: number;
- mode: "objects" | "continuous" | "motion";
- quality: number;
- restream: boolean;
- width: number;
-}
-
-export interface FaceRecognitionConfig {
- enabled: boolean;
- model_size: SearchModelSize;
- unknown_score: number;
- detection_threshold: number;
- recognition_threshold: number;
-}
-
-export type SearchModel = "jinav1" | "jinav2";
-export type SearchModelSize = "small" | "large";
-
-export interface CameraConfig {
- friendly_name: string;
- audio: {
- enabled: boolean;
- enabled_in_config: boolean;
- filters: string[] | null;
- listen: string[];
- max_not_heard: number;
- min_volume: number;
- num_threads: number;
- };
- audio_transcription: {
- enabled: boolean;
- enabled_in_config: boolean;
- live_enabled: boolean;
- };
- best_image_timeout: number;
- birdseye: {
- enabled: boolean;
- mode: "objects" | "continuous" | "motion";
- order: number;
- };
- detect: {
- annotation_offset: number;
- enabled: boolean;
- fps: number;
- height: number;
- max_disappeared: number;
- min_initialized: number;
- stationary: {
- interval: number;
- max_frames: {
- default: number | null;
- objects: Record;
- };
- threshold: number;
- };
- width: number;
- };
- enabled: boolean;
- enabled_in_config: boolean;
- ffmpeg: {
- global_args: string[];
- hwaccel_args: string;
- input_args: string;
- inputs: {
- global_args: string[];
- hwaccel_args: string[];
- input_args: string;
- path: string;
- roles: string[];
- }[];
- output_args: {
- detect: string[];
- record: string;
- rtmp: string;
- };
- retry_interval: number;
- };
- ffmpeg_cmds: {
- cmd: string;
- roles: string[];
- }[];
- live: {
- height: number;
- quality: number;
- streams: { [key: string]: string };
- };
- motion: {
- contour_area: number;
- delta_alpha: number;
- frame_alpha: number;
- frame_height: number;
- improve_contrast: boolean;
- lightning_threshold: number;
- skip_motion_threshold: number | null;
- mask: {
- [maskId: string]: {
- friendly_name?: string;
- enabled: boolean;
- enabled_in_config?: boolean;
- coordinates: string;
- };
- };
- mqtt_off_delay: number;
- threshold: number;
- };
- mqtt: {
- bounding_box: boolean;
- crop: boolean;
- enabled: boolean;
- height: number;
- quality: number;
- required_zones: string[];
- timestamp: boolean;
- };
- name: string;
- notifications: {
- enabled: boolean;
- email?: string;
- enabled_in_config: boolean;
- };
- objects: {
- filters: {
- [objectName: string]: {
- mask: {
- [maskId: string]: {
- friendly_name?: string;
- enabled: boolean;
- enabled_in_config?: boolean;
- coordinates: string;
- };
- };
- max_area: number;
- max_ratio: number;
- min_area: number;
- min_ratio: number;
- min_score: number;
- threshold: number;
- };
- };
- mask: {
- [maskId: string]: {
- friendly_name?: string;
- enabled: boolean;
- enabled_in_config?: boolean;
- coordinates: string;
- };
- };
- track: string[];
- genai: {
- enabled: boolean;
- enabled_in_config: boolean;
- prompt: string;
- object_prompts: { [key: string]: string };
- required_zones: string[];
- objects: string[];
- };
- };
- onvif: {
- autotracking: {
- calibrate_on_startup: boolean;
- enabled: boolean;
- enabled_in_config: boolean;
- movement_weights: string[];
- required_zones: string[];
- return_preset: string;
- timeout: number;
- track: string[];
- zoom_factor: number;
- zooming: string;
- };
- host: string;
- password: string | null;
- port: number;
- user: string | null;
- tls_insecure: boolean;
- };
- record: {
- enabled: boolean;
- enabled_in_config: boolean;
- alerts: {
- post_capture: number;
- pre_capture: number;
- retain: {
- days: number;
- mode: string;
- };
- };
- detections: {
- post_capture: number;
- pre_capture: number;
- retain: {
- days: number;
- mode: string;
- };
- };
- expire_interval: number;
- export: {
- timelapse_args: string;
- };
- preview: {
- quality: string;
- };
- retain: {
- days: number;
- mode: string;
- };
- };
- review: {
- alerts: {
- enabled: boolean;
- required_zones: string[];
- labels: string[];
- retain: {
- days: number;
- mode: string;
- };
- };
- detections: {
- enabled: boolean;
- required_zones: string[];
- labels: string[];
- retain: {
- days: number;
- mode: string;
- };
- };
- genai?: {
- enabled: boolean;
- enabled_in_config: boolean;
- alerts: boolean;
- detections: boolean;
- };
- };
- rtmp: {
- enabled: boolean;
- };
- semantic_search: {
- triggers: {
- [triggerName: string]: {
- enabled: boolean;
- type: TriggerType;
- data: string;
- threshold: number;
- actions: TriggerAction[];
- friendly_name: string;
- };
- };
- };
- snapshots: {
- bounding_box: boolean;
- clean_copy: boolean;
- crop: boolean;
- enabled: boolean;
- height: number | null;
- quality: number;
- required_zones: string[];
- retain: {
- default: number;
- mode: string;
- objects: Record;
- };
- timestamp: boolean;
- };
- timestamp_style: {
- color: {
- blue: number;
- green: number;
- red: number;
- };
- effect: string | null;
- format: string;
- position: string;
- thickness: number;
- };
- type: string;
- ui: UiConfig;
- webui_url: string | null;
- zones: {
- [zoneName: string]: {
- coordinates: string;
- distances: string[];
- enabled: boolean;
- enabled_in_config?: boolean;
- filters: Record;
- inertia: number;
- loitering_time: number;
- speed_threshold: number;
- objects: string[];
- color: number[];
- friendly_name?: string;
- };
- };
-}
-
-export type CameraGroupConfig = {
- cameras: string[];
- icon: IconName;
- order: number;
-};
-
-export type StreamType = "no-streaming" | "smart" | "continuous";
-
-export type CameraStreamingSettings = {
- streamName: string;
- streamType: StreamType;
- compatibilityMode: boolean;
- playAudio: boolean;
- volume: number;
-};
-
-export type CustomClassificationModelConfig = {
- enabled: boolean;
- name: string;
- threshold: number;
- save_attempts?: number;
- object_config?: {
- objects: string[];
- classification_type: string;
- };
- state_config?: {
- cameras: {
- [cameraName: string]: {
- crop: [number, number, number, number];
- };
- };
- motion: boolean;
- };
-};
-
-export type GroupStreamingSettings = {
- [cameraName: string]: CameraStreamingSettings;
-};
-
-export type AllGroupsStreamingSettings = {
- [groupName: string]: GroupStreamingSettings;
-};
-
-export interface FrigateConfig {
- version: string;
- safe_mode: boolean;
-
- audio: {
- enabled: boolean;
- enabled_in_config: boolean | null;
- filters: string[] | null;
- listen: string[];
- max_not_heard: number;
- min_volume: number;
- num_threads: number;
- };
-
- audio_transcription: {
- enabled: boolean;
- };
-
- auth: {
- roles: {
- [roleName: string]: string[];
- };
- };
-
- birdseye: BirdseyeConfig;
-
- cameras: {
- [cameraName: string]: CameraConfig;
- };
-
- classification: {
- bird: {
- enabled: boolean;
- threshold: number;
- };
- custom: {
- [modelKey: string]: CustomClassificationModelConfig;
- };
- };
-
- database: {
- path: string;
- };
-
- detect: {
- annotation_offset: number;
- enabled: boolean;
- fps: number;
- height: number | null;
- max_disappeared: number | null;
- min_initialized: number | null;
- stationary: {
- interval: number | null;
- max_frames: {
- default: number | null;
- objects: Record;
- };
- threshold: number | null;
- };
- width: number | null;
- };
-
- detectors: {
- coral: {
- device: string;
- model: {
- height: number;
- input_pixel_format: string;
- input_tensor: string;
- labelmap: Record;
- labelmap_path: string | null;
- model_type: string;
- path: string;
- width: number;
- };
- type: string;
- };
- };
-
- environment_vars: Record;
-
- face_recognition: FaceRecognitionConfig;
-
- ffmpeg: {
- global_args: string[];
- hwaccel_args: string;
- input_args: string;
- output_args: {
- detect: string[];
- record: string;
- rtmp: string;
- };
- retry_interval: number;
- };
-
- genai: {
- provider: string;
- base_url?: string;
- api_key?: string;
- model: string;
- };
-
- go2rtc: {
- streams: string[];
- webrtc: {
- candidates: string[];
- };
- };
-
- camera_groups: { [groupName: string]: CameraGroupConfig };
-
- lpr: {
- enabled: boolean;
- };
-
- logger: {
- default: string;
- logs: Record;
- };
-
- model: {
- height: number;
- input_pixel_format: string;
- input_tensor: string;
- labelmap: Record;
- labelmap_path: string | null;
- model_type: string;
- path: string | null;
- width: number;
- colormap: { [key: string]: [number, number, number] };
- attributes_map: { [key: string]: [string] };
- all_attributes: [string];
- plus?: {
- name: string;
- id: string;
- trainDate: string;
- baseModel: string;
- isBaseModel: boolean;
- supportedDetectors: string[];
- width: number;
- height: number;
- } | null;
- };
-
- motion: Record | null;
-
- mqtt: {
- client_id: string;
- enabled: boolean;
- host: string;
- port: number;
- stats_interval: number;
- tls_ca_certs: string | null;
- tls_client_cert: string | null;
- tls_client_key: string | null;
- tls_insecure: boolean | null;
- topic_prefix: string;
- user: string | null;
- };
-
- notifications: {
- enabled: boolean;
- email?: string;
- enabled_in_config: boolean;
- };
-
- objects: {
- filters: {
- [objectName: string]: {
- mask: string[] | null;
- max_area: number;
- max_ratio: number;
- min_area: number;
- min_ratio: number;
- min_score: number;
- threshold: number;
- };
- };
- mask: string[];
- track: string[];
- };
-
- plus: {
- enabled: boolean;
- };
-
- proxy: {
- logout_url?: string;
- };
-
- record: {
- enabled: boolean;
- enabled_in_config: boolean | null;
- events: {
- objects: string[] | null;
- post_capture: number;
- pre_capture: number;
- required_zones: string[];
- retain: {
- default: number;
- mode: string;
- objects: Record;
- };
- };
- expire_interval: number;
- export: {
- timelapse_args: string;
- };
- preview: {
- quality: string;
- };
- retain: {
- days: number;
- mode: string;
- };
- };
-
- rtmp: {
- enabled: boolean;
- };
-
- semantic_search: {
- enabled: boolean;
- reindex: boolean;
- model: SearchModel;
- model_size: SearchModelSize;
- };
-
- snapshots: {
- bounding_box: boolean;
- clean_copy: boolean;
- crop: boolean;
- enabled: boolean;
- height: number | null;
- quality: number;
- required_zones: string[];
- retain: {
- default: number;
- mode: string;
- objects: Record;
- };
- timestamp: boolean;
- };
-
- telemetry: {
- network_interfaces: string[];
- stats: {
- amd_gpu_stats: boolean;
- intel_gpu_stats: boolean;
- network_bandwidth: boolean;
- };
- version_check: boolean;
- };
-
- timestamp_style: {
- color: {
- blue: number;
- green: number;
- red: number;
- };
- effect: string | null;
- format: string;
- position: string;
- thickness: number;
- };
-
- ui: UiConfig;
-}
+import { IconName } from "@/components/icons/IconPicker";
+import { TriggerAction, TriggerType } from "./trigger";
+
+export interface UiConfig {
+ timezone?: string;
+ time_format?: "browser" | "12hour" | "24hour";
+ date_style?: "full" | "long" | "medium" | "short";
+ time_style?: "full" | "long" | "medium" | "short";
+ dashboard: boolean;
+ order: number;
+ unit_system?: "metric" | "imperial";
+}
+
+export interface BirdseyeConfig {
+ enabled: boolean;
+ height: number;
+ mode: "objects" | "continuous" | "motion";
+ quality: number;
+ restream: boolean;
+ width: number;
+}
+
+export interface FaceRecognitionConfig {
+ enabled: boolean;
+ model_size: SearchModelSize;
+ unknown_score: number;
+ detection_threshold: number;
+ recognition_threshold: number;
+}
+
+export type SearchModel = "jinav1" | "jinav2";
+export type SearchModelSize = "small" | "large";
+
+export interface CameraConfig {
+ friendly_name: string;
+ audio: {
+ enabled: boolean;
+ enabled_in_config: boolean;
+ filters: string[] | null;
+ listen: string[];
+ max_not_heard: number;
+ min_volume: number;
+ num_threads: number;
+ };
+ audio_transcription: {
+ enabled: boolean;
+ enabled_in_config: boolean;
+ live_enabled: boolean;
+ };
+ best_image_timeout: number;
+ birdseye: {
+ enabled: boolean;
+ mode: "objects" | "continuous" | "motion";
+ order: number;
+ };
+ detect: {
+ annotation_offset: number;
+ enabled: boolean;
+ fps: number;
+ height: number;
+ max_disappeared: number;
+ min_initialized: number;
+ stationary: {
+ interval: number;
+ max_frames: {
+ default: number | null;
+ objects: Record;
+ };
+ threshold: number;
+ };
+ width: number;
+ };
+ enabled: boolean;
+ enabled_in_config: boolean;
+ ffmpeg: {
+ global_args: string[];
+ hwaccel_args: string;
+ input_args: string;
+ inputs: {
+ global_args: string[];
+ hwaccel_args: string[];
+ input_args: string;
+ path: string;
+ roles: string[];
+ }[];
+ output_args: {
+ detect: string[];
+ record: string;
+ rtmp: string;
+ };
+ retry_interval: number;
+ };
+ ffmpeg_cmds: {
+ cmd: string;
+ roles: string[];
+ }[];
+ live: {
+ height: number;
+ quality: number;
+ streams: { [key: string]: string };
+ };
+ motion: {
+ contour_area: number;
+ delta_alpha: number;
+ frame_alpha: number;
+ frame_height: number;
+ improve_contrast: boolean;
+ lightning_threshold: number;
+ skip_motion_threshold: number | null;
+ mask: {
+ [maskId: string]: {
+ friendly_name?: string;
+ enabled: boolean;
+ enabled_in_config?: boolean;
+ coordinates: string;
+ };
+ };
+ mqtt_off_delay: number;
+ threshold: number;
+ };
+ mqtt: {
+ bounding_box: boolean;
+ crop: boolean;
+ enabled: boolean;
+ height: number;
+ quality: number;
+ required_zones: string[];
+ timestamp: boolean;
+ };
+ name: string;
+ notifications: {
+ enabled: boolean;
+ email?: string;
+ enabled_in_config: boolean;
+ };
+ objects: {
+ filters: {
+ [objectName: string]: {
+ mask: {
+ [maskId: string]: {
+ friendly_name?: string;
+ enabled: boolean;
+ enabled_in_config?: boolean;
+ coordinates: string;
+ };
+ };
+ max_area: number;
+ max_ratio: number;
+ min_area: number;
+ min_ratio: number;
+ min_score: number;
+ threshold: number;
+ };
+ };
+ mask: {
+ [maskId: string]: {
+ friendly_name?: string;
+ enabled: boolean;
+ enabled_in_config?: boolean;
+ coordinates: string;
+ };
+ };
+ track: string[];
+ genai: {
+ enabled: boolean;
+ enabled_in_config: boolean;
+ prompt: string;
+ object_prompts: { [key: string]: string };
+ required_zones: string[];
+ objects: string[];
+ };
+ };
+ onvif: {
+ autotracking: {
+ calibrate_on_startup: boolean;
+ enabled: boolean;
+ enabled_in_config: boolean;
+ movement_weights: string[];
+ required_zones: string[];
+ return_preset: string;
+ timeout: number;
+ track: string[];
+ zoom_factor: number;
+ zooming: string;
+ };
+ host: string;
+ password: string | null;
+ port: number;
+ user: string | null;
+ tls_insecure: boolean;
+ };
+ record: {
+ enabled: boolean;
+ enabled_in_config: boolean;
+ alerts: {
+ post_capture: number;
+ pre_capture: number;
+ retain: {
+ days: number;
+ mode: string;
+ };
+ };
+ detections: {
+ post_capture: number;
+ pre_capture: number;
+ retain: {
+ days: number;
+ mode: string;
+ };
+ };
+ expire_interval: number;
+ export: {
+ timelapse_args: string;
+ };
+ preview: {
+ quality: string;
+ };
+ retain: {
+ days: number;
+ mode: string;
+ };
+ };
+ review: {
+ alerts: {
+ enabled: boolean;
+ required_zones: string[];
+ labels: string[];
+ retain: {
+ days: number;
+ mode: string;
+ };
+ };
+ detections: {
+ enabled: boolean;
+ required_zones: string[];
+ labels: string[];
+ retain: {
+ days: number;
+ mode: string;
+ };
+ };
+ genai?: {
+ enabled: boolean;
+ enabled_in_config: boolean;
+ alerts: boolean;
+ detections: boolean;
+ };
+ };
+ rtmp: {
+ enabled: boolean;
+ };
+ semantic_search: {
+ triggers: {
+ [triggerName: string]: {
+ enabled: boolean;
+ type: TriggerType;
+ data: string;
+ threshold: number;
+ actions: TriggerAction[];
+ friendly_name: string;
+ };
+ };
+ };
+ snapshots: {
+ bounding_box: boolean;
+ clean_copy: boolean;
+ crop: boolean;
+ enabled: boolean;
+ height: number | null;
+ quality: number;
+ required_zones: string[];
+ retain: {
+ default: number;
+ mode: string;
+ objects: Record;
+ };
+ timestamp: boolean;
+ };
+ timestamp_style: {
+ color: {
+ blue: number;
+ green: number;
+ red: number;
+ };
+ effect: string | null;
+ format: string;
+ position: string;
+ thickness: number;
+ };
+ type: string;
+ ui: UiConfig;
+ webui_url: string | null;
+ zones: {
+ [zoneName: string]: {
+ coordinates: string;
+ distances: string[];
+ enabled: boolean;
+ enabled_in_config?: boolean;
+ filters: Record;
+ inertia: number;
+ loitering_time: number;
+ speed_threshold: number;
+ objects: string[];
+ color: number[];
+ friendly_name?: string;
+ };
+ };
+}
+
+export type CameraGroupConfig = {
+ cameras: string[];
+ icon: IconName;
+ order: number;
+};
+
+export type StreamType = "no-streaming" | "smart" | "continuous";
+
+export type CameraStreamingSettings = {
+ streamName: string;
+ streamType: StreamType;
+ compatibilityMode: boolean;
+ playAudio: boolean;
+ volume: number;
+};
+
+export type CustomClassificationModelConfig = {
+ enabled: boolean;
+ name: string;
+ threshold: number;
+ save_attempts?: number;
+ object_config?: {
+ objects: string[];
+ classification_type: string;
+ };
+ state_config?: {
+ cameras: {
+ [cameraName: string]: {
+ crop: [number, number, number, number];
+ };
+ };
+ motion: boolean;
+ };
+};
+
+export type GroupStreamingSettings = {
+ [cameraName: string]: CameraStreamingSettings;
+};
+
+export type AllGroupsStreamingSettings = {
+ [groupName: string]: GroupStreamingSettings;
+};
+
+export interface FrigateConfig {
+ version: string;
+ safe_mode: boolean;
+
+ audio: {
+ enabled: boolean;
+ enabled_in_config: boolean | null;
+ filters: string[] | null;
+ listen: string[];
+ max_not_heard: number;
+ min_volume: number;
+ num_threads: number;
+ };
+
+ audio_transcription: {
+ enabled: boolean;
+ };
+
+ auth: {
+ roles: {
+ [roleName: string]: string[];
+ };
+ };
+
+ birdseye: BirdseyeConfig;
+
+ cameras: {
+ [cameraName: string]: CameraConfig;
+ };
+
+ classification: {
+ bird: {
+ enabled: boolean;
+ threshold: number;
+ };
+ custom: {
+ [modelKey: string]: CustomClassificationModelConfig;
+ };
+ };
+
+ database: {
+ path: string;
+ };
+
+ detect: {
+ annotation_offset: number;
+ enabled: boolean;
+ fps: number;
+ height: number | null;
+ max_disappeared: number | null;
+ min_initialized: number | null;
+ stationary: {
+ interval: number | null;
+ max_frames: {
+ default: number | null;
+ objects: Record;
+ };
+ threshold: number | null;
+ };
+ width: number | null;
+ };
+
+ detectors: {
+ coral: {
+ device: string;
+ model: {
+ height: number;
+ input_pixel_format: string;
+ input_tensor: string;
+ labelmap: Record;
+ labelmap_path: string | null;
+ model_type: string;
+ path: string;
+ width: number;
+ };
+ type: string;
+ };
+ };
+
+ environment_vars: Record;
+
+ face_recognition: FaceRecognitionConfig;
+
+ ffmpeg: {
+ global_args: string[];
+ hwaccel_args: string;
+ input_args: string;
+ output_args: {
+ detect: string[];
+ record: string;
+ rtmp: string;
+ };
+ retry_interval: number;
+ };
+
+ genai: {
+ provider: string;
+ base_url?: string;
+ api_key?: string;
+ model: string;
+ };
+
+ go2rtc: {
+ streams: string[];
+ webrtc: {
+ candidates: string[];
+ };
+ };
+
+ camera_groups: { [groupName: string]: CameraGroupConfig };
+
+ lpr: {
+ enabled: boolean;
+ };
+
+ logger: {
+ default: string;
+ logs: Record;
+ };
+
+ model: {
+ height: number;
+ input_pixel_format: string;
+ input_tensor: string;
+ labelmap: Record;
+ labelmap_path: string | null;
+ model_type: string;
+ path: string | null;
+ width: number;
+ colormap: { [key: string]: [number, number, number] };
+ attributes_map: { [key: string]: [string] };
+ all_attributes: [string];
+ plus?: {
+ name: string;
+ id: string;
+ trainDate: string;
+ baseModel: string;
+ isBaseModel: boolean;
+ supportedDetectors: string[];
+ width: number;
+ height: number;
+ } | null;
+ };
+
+ motion: Record | null;
+
+ mqtt: {
+ client_id: string;
+ enabled: boolean;
+ host: string;
+ port: number;
+ stats_interval: number;
+ tls_ca_certs: string | null;
+ tls_client_cert: string | null;
+ tls_client_key: string | null;
+ tls_insecure: boolean | null;
+ topic_prefix: string;
+ user: string | null;
+ };
+
+ notifications: {
+ enabled: boolean;
+ email?: string;
+ enabled_in_config: boolean;
+ };
+
+ objects: {
+ filters: {
+ [objectName: string]: {
+ mask: string[] | null;
+ max_area: number;
+ max_ratio: number;
+ min_area: number;
+ min_ratio: number;
+ min_score: number;
+ threshold: number;
+ };
+ };
+ mask: string[];
+ track: string[];
+ };
+
+ plus: {
+ enabled: boolean;
+ };
+
+ proxy: {
+ logout_url?: string;
+ };
+
+ transcode_proxy?: {
+ enabled: boolean;
+ vod_proxy_url: string;
+ };
+
+ record: {
+ enabled: boolean;
+ enabled_in_config: boolean | null;
+ events: {
+ objects: string[] | null;
+ post_capture: number;
+ pre_capture: number;
+ required_zones: string[];
+ retain: {
+ default: number;
+ mode: string;
+ objects: Record;
+ };
+ };
+ expire_interval: number;
+ export: {
+ timelapse_args: string;
+ };
+ preview: {
+ quality: string;
+ };
+ retain: {
+ days: number;
+ mode: string;
+ };
+ };
+
+ rtmp: {
+ enabled: boolean;
+ };
+
+ semantic_search: {
+ enabled: boolean;
+ reindex: boolean;
+ model: SearchModel;
+ model_size: SearchModelSize;
+ };
+
+ snapshots: {
+ bounding_box: boolean;
+ clean_copy: boolean;
+ crop: boolean;
+ enabled: boolean;
+ height: number | null;
+ quality: number;
+ required_zones: string[];
+ retain: {
+ default: number;
+ mode: string;
+ objects: Record;
+ };
+ timestamp: boolean;
+ };
+
+ telemetry: {
+ network_interfaces: string[];
+ stats: {
+ amd_gpu_stats: boolean;
+ intel_gpu_stats: boolean;
+ network_bandwidth: boolean;
+ };
+ version_check: boolean;
+ };
+
+ timestamp_style: {
+ color: {
+ blue: number;
+ green: number;
+ red: number;
+ };
+ effect: string | null;
+ format: string;
+ position: string;
+ thickness: number;
+ };
+
+ ui: UiConfig;
+}
diff --git a/web/src/types/record.ts b/web/src/types/record.ts
index 107a8d86e..af4f4c481 100644
--- a/web/src/types/record.ts
+++ b/web/src/types/record.ts
@@ -1,49 +1,60 @@
-import { ReviewSeverity } from "./review";
-import { TimelineType } from "./timeline";
-
-export type Recording = {
- id: string;
- camera: string;
- start_time: number;
- end_time: number;
- path: string;
- segment_size: number;
- duration: number;
- motion: number;
- objects: number;
- motion_heatmap?: Record | null;
- dBFS: number;
-};
-
-export type RecordingSegment = {
- id: string;
- start_time: number;
- end_time: number;
- motion: number;
- objects: number;
- segment_size: number;
- duration: number;
-};
-
-export type RecordingActivity = {
- [hour: number]: RecordingSegmentActivity[];
-};
-
-type RecordingSegmentActivity = {
- date: number;
- count: number;
- hasObjects: boolean;
-};
-
-export type RecordingStartingPoint = {
- camera: string;
- startTime: number;
- severity: ReviewSeverity;
- timelineType?: TimelineType;
-};
-
-export type RecordingPlayerError = "stalled" | "startup";
-
-export const ASPECT_VERTICAL_LAYOUT = 1.5;
-export const ASPECT_PORTRAIT_LAYOUT = 1.333;
-export const ASPECT_WIDE_LAYOUT = 2;
+import { ReviewSeverity } from "./review";
+import { TimelineType } from "./timeline";
+
+export type Recording = {
+ id: string;
+ camera: string;
+ start_time: number;
+ end_time: number;
+ path: string;
+ variant?: string;
+ segment_size: number;
+ duration: number;
+ motion: number;
+ objects: number;
+ motion_heatmap?: Record | null;
+ dBFS: number;
+ codec_name?: string | null;
+ width?: number | null;
+ height?: number | null;
+ bitrate?: number | null;
+};
+
+export type RecordingSegment = {
+ id: string;
+ start_time: number;
+ end_time: number;
+ motion: number;
+ objects: number;
+ segment_size: number;
+ duration: number;
+};
+
+export type RecordingActivity = {
+ [hour: number]: RecordingSegmentActivity[];
+};
+
+type RecordingSegmentActivity = {
+ date: number;
+ count: number;
+ hasObjects: boolean;
+};
+
+export type RecordingStartingPoint = {
+ camera: string;
+ startTime: number;
+ severity: ReviewSeverity;
+ timelineType?: TimelineType;
+};
+
+export type RecordingPlayerError = "stalled" | "startup";
+
+export type RecordingPlaybackPreference =
+ | "auto"
+ | "main"
+ | "sub"
+ | "transcoded";
+
+export const ASPECT_VERTICAL_LAYOUT = 1.5;
+export const ASPECT_PORTRAIT_LAYOUT = 1.333;
+export const ASPECT_WIDE_LAYOUT = 2;
diff --git a/web/src/utils/liveStreamSelection.ts b/web/src/utils/liveStreamSelection.ts
new file mode 100644
index 000000000..a0baeb682
--- /dev/null
+++ b/web/src/utils/liveStreamSelection.ts
@@ -0,0 +1,44 @@
+const LOW_BANDWIDTH_PATTERN = /\b(sub|low|mobile|small|sd|lowres|low-res)\b/i;
+const HIGH_BANDWIDTH_PATTERN = /\b(main|high|hd|full|primary)\b/i;
+
+function rankStreamLabel(label: string, preferLowBandwidth: boolean): number {
+ if (preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
+ return 3;
+ }
+
+ if (!preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
+ return 3;
+ }
+
+ if (preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
+ return 1;
+ }
+
+ if (!preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
+ return 1;
+ }
+
+ return 2;
+}
+
+export function chooseAutoLiveStream(
+ streams: Record,
+ estimatedBandwidthBps?: number,
+ saveData = false,
+): string {
+ const entries = Object.entries(streams || {});
+ if (entries.length === 0) {
+ return "";
+ }
+
+ const preferLowBandwidth =
+ saveData || !!(estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000);
+
+ return [...entries]
+ .sort(([leftLabel], [rightLabel]) => {
+ return (
+ rankStreamLabel(rightLabel, preferLowBandwidth) -
+ rankStreamLabel(leftLabel, preferLowBandwidth)
+ );
+ })[0][1];
+}
diff --git a/web/src/utils/recordingPlayback.ts b/web/src/utils/recordingPlayback.ts
new file mode 100644
index 000000000..42091ea77
--- /dev/null
+++ b/web/src/utils/recordingPlayback.ts
@@ -0,0 +1,324 @@
+import { FrigateConfig } from "@/types/frigateConfig";
+import {
+ Recording,
+ RecordingPlaybackPreference,
+} from "@/types/record";
+
+export type PlaybackCapabilities = {
+ estimatedBandwidthBps?: number;
+ saveData: boolean;
+ supports: Record;
+};
+
+export type RecordingPlaybackDecision = {
+ mode: "direct" | "transcoded";
+ variant: string;
+ url: string;
+ reason: string;
+};
+
+type DecisionOptions = {
+ apiHost: string;
+ config?: FrigateConfig;
+ recordings: Recording[];
+ preference: RecordingPlaybackPreference;
+ vodPath: string;
+ capabilities: PlaybackCapabilities;
+};
+
+const CODEC_SAMPLES: Record = {
+ h264: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
+ avc1: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
+ hevc: [
+ 'video/mp4; codecs="hev1.1.6.L120.90"',
+ 'video/mp4; codecs="hvc1.1.6.L120.90"',
+ 'video/mp4; codecs="hev1.1.6.L93.B0"',
+ 'video/mp4; codecs="hvc1.1.6.L93.B0"',
+ ],
+ h265: [
+ 'video/mp4; codecs="hev1.1.6.L120.90"',
+ 'video/mp4; codecs="hvc1.1.6.L120.90"',
+ 'video/mp4; codecs="hev1.1.6.L93.B0"',
+ 'video/mp4; codecs="hvc1.1.6.L93.B0"',
+ ],
+ hev1: [
+ 'video/mp4; codecs="hev1.1.6.L120.90"',
+ 'video/mp4; codecs="hvc1.1.6.L120.90"',
+ 'video/mp4; codecs="hev1.1.6.L93.B0"',
+ 'video/mp4; codecs="hvc1.1.6.L93.B0"',
+ ],
+ hvc1: [
+ 'video/mp4; codecs="hev1.1.6.L120.90"',
+ 'video/mp4; codecs="hvc1.1.6.L120.90"',
+ 'video/mp4; codecs="hev1.1.6.L93.B0"',
+ 'video/mp4; codecs="hvc1.1.6.L93.B0"',
+ ],
+ av1: ['video/mp4; codecs="av01.0.05M.08"'],
+ av01: ['video/mp4; codecs="av01.0.05M.08"'],
+ vp9: ['video/mp4; codecs="vp09.00.10.08"'],
+ vp09: ['video/mp4; codecs="vp09.00.10.08"'],
+};
+
+function trimTrailingSlash(value: string): string {
+ return value.replace(/\/$/, "");
+}
+
+function appendQuery(url: string, params: Record): string {
+ const entries = Object.entries(params).filter(([, value]) => value);
+ if (entries.length === 0) {
+ return url;
+ }
+
+ const search = new URLSearchParams(entries as [string, string][]);
+ return `${url}${url.includes("?") ? "&" : "?"}${search.toString()}`;
+}
+
+function average(values: number[]): number | undefined {
+ if (!values.length) {
+ return undefined;
+ }
+
+ return values.reduce((sum, value) => sum + value, 0) / values.length;
+}
+
+export function normalizeCodecName(codecName?: string | null): string | undefined {
+ return codecName?.toLowerCase().trim() || undefined;
+}
+
+export function getCodecMimeTypes(codecName?: string | null): string[] {
+ const normalized = normalizeCodecName(codecName);
+ if (!normalized) {
+ return [];
+ }
+
+ return CODEC_SAMPLES[normalized] ?? [];
+}
+
+export function estimateRecordingBitrate(recordings: Recording[]): number | undefined {
+ const explicit = recordings
+ .map((recording) => recording.bitrate)
+ .filter((value): value is number => typeof value === "number" && value > 0);
+
+ if (explicit.length > 0) {
+ return average(explicit);
+ }
+
+ const derived = recordings
+ .map((recording) => {
+ if (!recording.segment_size || !recording.duration) {
+ return undefined;
+ }
+
+ return (recording.segment_size * 1024 * 1024 * 8) / recording.duration;
+ })
+ .filter((value): value is number => typeof value === "number" && value > 0);
+
+ return average(derived);
+}
+
+export function groupRecordingsByVariant(
+ recordings: Recording[],
+): Record {
+ return recordings.reduce>((acc, recording) => {
+ const variant = recording.variant || "main";
+ if (!acc[variant]) {
+ acc[variant] = [];
+ }
+ acc[variant].push(recording);
+ return acc;
+ }, {});
+}
+
+function canDirectPlayVariant(
+ capabilities: PlaybackCapabilities,
+ recordings: Recording[],
+): boolean {
+ const codecName = normalizeCodecName(recordings[0]?.codec_name);
+ if (!codecName) {
+ return false;
+ }
+
+ return capabilities.supports[codecName] === true;
+}
+
+function getDirectBaseUrl(apiHost: string): string {
+ return trimTrailingSlash(apiHost);
+}
+
+function getTranscodeBaseUrl(apiHost: string, config?: FrigateConfig): string | undefined {
+ if (!config?.transcode_proxy?.enabled) {
+ return undefined;
+ }
+
+ if (config.transcode_proxy.vod_proxy_url?.trim()) {
+ return trimTrailingSlash(config.transcode_proxy.vod_proxy_url);
+ }
+
+ return `${trimTrailingSlash(apiHost)}/vod-transcoded`;
+}
+
+function getTranscodeProfile(estimatedBandwidthBps?: number, saveData = false) {
+ if (saveData || (estimatedBandwidthBps && estimatedBandwidthBps <= 1_500_000)) {
+ return { bitrate: "512k", maxWidth: "640", maxHeight: "360" };
+ }
+
+ if (estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000) {
+ return { bitrate: "1200k", maxWidth: "960", maxHeight: "540" };
+ }
+
+ return { bitrate: "2500k", maxWidth: "1280", maxHeight: "720" };
+}
+
+function buildDirectUrl(apiHost: string, vodPath: string, variant: string): string {
+ const baseUrl = `${getDirectBaseUrl(apiHost)}${vodPath}`;
+ return appendQuery(baseUrl, {
+ variant: variant !== "main" ? variant : undefined,
+ });
+}
+
+function buildTranscodeUrl(
+ apiHost: string,
+ config: FrigateConfig | undefined,
+ vodPath: string,
+ variant: string,
+ capabilities: PlaybackCapabilities,
+): string {
+ const transcodeBase = getTranscodeBaseUrl(apiHost, config);
+ if (!transcodeBase) {
+ return buildDirectUrl(apiHost, vodPath, variant);
+ }
+
+ const profile = getTranscodeProfile(
+ capabilities.estimatedBandwidthBps,
+ capabilities.saveData,
+ );
+
+ return appendQuery(`${transcodeBase}${vodPath}`, {
+ variant,
+ bitrate: profile.bitrate,
+ max_width: profile.maxWidth,
+ max_height: profile.maxHeight,
+ });
+}
+
+export function chooseRecordingPlayback({
+ apiHost,
+ config,
+ recordings,
+ preference,
+ vodPath,
+ capabilities,
+}: DecisionOptions): RecordingPlaybackDecision {
+ const recordingsByVariant = groupRecordingsByVariant(recordings);
+ const mainRecordings = recordingsByVariant.main ?? [];
+ const subRecordings = recordingsByVariant.sub ?? [];
+ const transcodeAvailable = !!getTranscodeBaseUrl(apiHost, config);
+ const estimatedBandwidthBps =
+ capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000);
+
+ const candidates: Record<
+ "main" | "sub",
+ { recordings: Recording[]; playable: boolean; bitrate?: number }
+ > = {
+ main: {
+ recordings: mainRecordings,
+ playable: canDirectPlayVariant(capabilities, mainRecordings),
+ bitrate: estimateRecordingBitrate(mainRecordings),
+ },
+ sub: {
+ recordings: subRecordings,
+ playable: canDirectPlayVariant(capabilities, subRecordings),
+ bitrate: estimateRecordingBitrate(subRecordings),
+ },
+ };
+
+ const preferDirect = (variant: "main" | "sub") => {
+ const candidate = candidates[variant];
+ return (
+ candidate.recordings.length > 0 &&
+ candidate.playable &&
+ (!candidate.bitrate || candidate.bitrate <= estimatedBandwidthBps * 0.85)
+ );
+ };
+
+ if (preference === "main" && candidates.main.recordings.length > 0) {
+ return {
+ mode: "direct",
+ variant: "main",
+ url: buildDirectUrl(apiHost, vodPath, "main"),
+ reason: "manual-main",
+ };
+ }
+
+ if (preference === "sub" && candidates.sub.recordings.length > 0) {
+ if (candidates.sub.playable) {
+ return {
+ mode: "direct",
+ variant: "sub",
+ url: buildDirectUrl(apiHost, vodPath, "sub"),
+ reason: "manual-sub",
+ };
+ }
+
+ return {
+ mode: "transcoded",
+ variant: "sub",
+ url: buildTranscodeUrl(apiHost, config, vodPath, "sub", capabilities),
+ reason: "manual-sub-transcoded",
+ };
+ }
+
+ if (preference === "transcoded") {
+ const targetVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
+ if (!transcodeAvailable) {
+ return {
+ mode: "direct",
+ variant: targetVariant,
+ url: buildDirectUrl(apiHost, vodPath, targetVariant),
+ reason: "manual-transcoded-unavailable",
+ };
+ }
+
+ return {
+ mode: "transcoded",
+ variant: targetVariant,
+ url: buildTranscodeUrl(apiHost, config, vodPath, targetVariant, capabilities),
+ reason: "manual-transcoded",
+ };
+ }
+
+ if (preferDirect("main")) {
+ return {
+ mode: "direct",
+ variant: "main",
+ url: buildDirectUrl(apiHost, vodPath, "main"),
+ reason: "raw-main",
+ };
+ }
+
+ if (preferDirect("sub")) {
+ return {
+ mode: "direct",
+ variant: "sub",
+ url: buildDirectUrl(apiHost, vodPath, "sub"),
+ reason: "raw-sub",
+ };
+ }
+
+ const transcodeVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
+ if (!transcodeAvailable) {
+ return {
+ mode: "direct",
+ variant: transcodeVariant,
+ url: buildDirectUrl(apiHost, vodPath, transcodeVariant),
+ reason: "direct-fallback",
+ };
+ }
+
+ return {
+ mode: "transcoded",
+ variant: transcodeVariant,
+ url: buildTranscodeUrl(apiHost, config, vodPath, transcodeVariant, capabilities),
+ reason: "transcode-fallback",
+ };
+}
diff --git a/web/src/views/live/DraggableGridLayout.tsx b/web/src/views/live/DraggableGridLayout.tsx
index 342865527..fd14c4eff 100644
--- a/web/src/views/live/DraggableGridLayout.tsx
+++ b/web/src/views/live/DraggableGridLayout.tsx
@@ -1,931 +1,938 @@
-import { useUserPersistence } from "@/hooks/use-user-persistence";
-import {
- AllGroupsStreamingSettings,
- BirdseyeConfig,
- CameraConfig,
- FrigateConfig,
-} from "@/types/frigateConfig";
-import React, {
- useCallback,
- useEffect,
- useLayoutEffect,
- useMemo,
- useRef,
- useState,
-} from "react";
-import {
- Layout,
- LayoutItem,
- ResponsiveGridLayout as Responsive,
-} from "react-grid-layout";
-import "react-grid-layout/css/styles.css";
-import "react-resizable/css/styles.css";
-import {
- AudioState,
- LivePlayerMode,
- LiveStreamMetadata,
- StatsState,
- VolumeState,
-} from "@/types/live";
-import { ASPECT_VERTICAL_LAYOUT, ASPECT_WIDE_LAYOUT } from "@/types/record";
-import { Skeleton } from "@/components/ui/skeleton";
-import { useResizeObserver } from "@/hooks/resize-observer";
-import { isEqual } from "lodash";
-import useSWR from "swr";
-import { isDesktop, isMobile } from "react-device-detect";
-import BirdseyeLivePlayer from "@/components/player/BirdseyeLivePlayer";
-import LivePlayer from "@/components/player/LivePlayer";
-import { IoClose } from "react-icons/io5";
-import { LuLayoutDashboard, LuPencil } from "react-icons/lu";
-import { cn } from "@/lib/utils";
-import { EditGroupDialog } from "@/components/filter/CameraGroupSelector";
-import { useUserPersistedOverlayState } from "@/hooks/use-overlay-state";
-import { FaCompress, FaExpand } from "react-icons/fa";
-import {
- Tooltip,
- TooltipTrigger,
- TooltipContent,
-} from "@/components/ui/tooltip";
-import { Toaster } from "@/components/ui/sonner";
-import LiveContextMenu from "@/components/menu/LiveContextMenu";
-import { useStreamingSettings } from "@/context/streaming-settings-provider";
-import { useTranslation } from "react-i18next";
-
-type DraggableGridLayoutProps = {
- cameras: CameraConfig[];
- cameraGroup: string;
- cameraRef: (node: HTMLElement | null) => void;
- containerRef: React.RefObject;
- includeBirdseye: boolean;
- onSelectCamera: (camera: string) => void;
- windowVisible: boolean;
- visibleCameras: string[];
- isEditMode: boolean;
- setIsEditMode: React.Dispatch>;
- fullscreen: boolean;
- toggleFullscreen: () => void;
- preferredLiveModes: { [key: string]: LivePlayerMode };
- setPreferredLiveModes: React.Dispatch<
- React.SetStateAction<{ [key: string]: LivePlayerMode }>
- >;
- resetPreferredLiveMode: (cameraName: string) => void;
- isRestreamedStates: { [key: string]: boolean };
- supportsAudioOutputStates: {
- [key: string]: { supportsAudio: boolean; cameraName: string };
- };
- streamMetadata: { [key: string]: LiveStreamMetadata };
-};
-export default function DraggableGridLayout({
- cameras,
- cameraGroup,
- containerRef,
- cameraRef,
- includeBirdseye,
- onSelectCamera,
- windowVisible,
- visibleCameras,
- isEditMode,
- setIsEditMode,
- fullscreen,
- toggleFullscreen,
- preferredLiveModes,
- setPreferredLiveModes,
- resetPreferredLiveMode,
- isRestreamedStates,
- supportsAudioOutputStates,
- streamMetadata,
-}: DraggableGridLayoutProps) {
- const { t } = useTranslation(["views/live"]);
- const { data: config } = useSWR("config");
- const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
-
- // preferred live modes per camera
-
- const [globalAutoLive] = useUserPersistence("autoLiveView", true);
- const [displayCameraNames] = useUserPersistence("displayCameraNames", false);
-
- const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
- useStreamingSettings();
-
- const currentGroupStreamingSettings = useMemo(() => {
- if (cameraGroup && cameraGroup != "default" && allGroupsStreamingSettings) {
- return allGroupsStreamingSettings[cameraGroup];
- }
- }, [allGroupsStreamingSettings, cameraGroup]);
-
- // grid layout
-
- const [gridLayout, setGridLayout, isGridLayoutLoaded] =
- useUserPersistence(`${cameraGroup}-draggable-layout`);
-
- const [group] = useUserPersistedOverlayState(
- "cameraGroup",
- "default" as string,
- );
-
- const groups = useMemo(() => {
- if (!config) {
- return [];
- }
-
- return Object.entries(config.camera_groups).sort(
- (a, b) => a[1].order - b[1].order,
- );
- }, [config]);
-
- // editing
-
- const [editGroup, setEditGroup] = useState(false);
- const [showCircles, setShowCircles] = useState(true);
-
- useEffect(() => {
- setIsEditMode(false);
- setEditGroup(false);
- // Reset camera tracking state when group changes to prevent the camera-change
- // effect from incorrectly overwriting the loaded layout
- setCurrentCameras(undefined);
- setCurrentIncludeBirdseye(undefined);
- setCurrentGridLayout(undefined);
- }, [cameraGroup, setIsEditMode]);
-
- // camera state
-
- const [currentCameras, setCurrentCameras] = useState();
- const [currentIncludeBirdseye, setCurrentIncludeBirdseye] =
- useState();
- const [currentGridLayout, setCurrentGridLayout] = useState<
- Layout | undefined
- >();
-
- const handleLayoutChange = useCallback(
- (currentLayout: Layout) => {
- if (!isGridLayoutLoaded || !isEqual(gridLayout, currentGridLayout)) {
- return;
- }
- // save layout to idb
- setGridLayout(currentLayout);
- setShowCircles(true);
- },
- [setGridLayout, isGridLayoutLoaded, gridLayout, currentGridLayout],
- );
-
- const generateLayout = useCallback(
- (baseLayout: Layout | undefined) => {
- if (!isGridLayoutLoaded) {
- return;
- }
-
- const cameraNames =
- includeBirdseye && birdseyeConfig?.enabled
- ? ["birdseye", ...cameras.map((camera) => camera?.name || "")]
- : cameras.map((camera) => camera?.name || "");
-
- const optionsMap: LayoutItem[] = baseLayout
- ? baseLayout.filter((layout) => cameraNames?.includes(layout.i))
- : [];
-
- cameraNames.forEach((cameraName, index) => {
- const existingLayout = optionsMap.find(
- (layout) => layout.i === cameraName,
- );
-
- // Skip if the camera already exists in the layout
- if (existingLayout) {
- return;
- }
-
- let aspectRatio;
- let col;
-
- // Handle "birdseye" camera as a special case
- if (cameraName === "birdseye") {
- aspectRatio =
- (birdseyeConfig?.width || 1) / (birdseyeConfig?.height || 1);
- col = 0; // Set birdseye camera in the first column
- } else {
- const camera = cameras.find((cam) => cam.name === cameraName);
- aspectRatio =
- (camera && camera?.detect.width / camera?.detect.height) || 16 / 9;
- col = index % 3; // Regular cameras distributed across columns
- }
-
- // Calculate layout options based on aspect ratio
- const columnsPerPlayer = 4;
- let height;
- let width;
-
- if (aspectRatio < 1) {
- // Portrait
- height = 2 * columnsPerPlayer;
- width = columnsPerPlayer;
- } else if (aspectRatio > 2) {
- // Wide
- height = 1 * columnsPerPlayer;
- width = 2 * columnsPerPlayer;
- } else {
- // Landscape
- height = 1 * columnsPerPlayer;
- width = columnsPerPlayer;
- }
-
- const options = {
- i: cameraName,
- x: col * width,
- y: 0, // don't set y, grid does automatically
- w: width,
- h: height,
- };
-
- optionsMap.push(options);
- });
-
- return optionsMap;
- },
- [cameras, isGridLayoutLoaded, includeBirdseye, birdseyeConfig],
- );
-
- useEffect(() => {
- if (isGridLayoutLoaded) {
- if (gridLayout) {
- // set current grid layout from loaded, possibly adding new cameras
- const updatedLayout = generateLayout(gridLayout);
- setCurrentGridLayout(updatedLayout);
- // Only save if cameras were added (layout changed)
- if (!isEqual(updatedLayout, gridLayout)) {
- setGridLayout(updatedLayout);
- }
- // Set camera tracking state so the camera-change effect has a baseline
- setCurrentCameras(cameras);
- setCurrentIncludeBirdseye(includeBirdseye);
- } else {
- // idb is empty, set it with an initial layout
- const newLayout = generateLayout(undefined);
- setCurrentGridLayout(newLayout);
- setGridLayout(newLayout);
- setCurrentCameras(cameras);
- setCurrentIncludeBirdseye(includeBirdseye);
- }
- }
- }, [
- gridLayout,
- setGridLayout,
- isGridLayoutLoaded,
- generateLayout,
- cameras,
- includeBirdseye,
- ]);
-
- useEffect(() => {
- // Only regenerate layout when cameras change WITHIN an already-loaded group
- // Skip if currentCameras is undefined (means we just switched groups and
- // the first useEffect hasn't run yet to set things up)
- if (!isGridLayoutLoaded || currentCameras === undefined) {
- return;
- }
-
- if (
- !isEqual(cameras, currentCameras) ||
- includeBirdseye !== currentIncludeBirdseye
- ) {
- setCurrentCameras(cameras);
- setCurrentIncludeBirdseye(includeBirdseye);
-
- // Regenerate layout based on current layout, adding any new cameras
- const updatedLayout = generateLayout(currentGridLayout);
- setCurrentGridLayout(updatedLayout);
- setGridLayout(updatedLayout);
- }
- }, [
- cameras,
- includeBirdseye,
- currentCameras,
- currentIncludeBirdseye,
- currentGridLayout,
- generateLayout,
- setGridLayout,
- isGridLayoutLoaded,
- ]);
-
- const [marginValue, setMarginValue] = useState(16);
-
- // calculate margin value for browsers that don't have default font size of 16px
- useLayoutEffect(() => {
- const calculateRemValue = () => {
- const htmlElement = document.documentElement;
- const fontSize = window.getComputedStyle(htmlElement).fontSize;
- setMarginValue(parseFloat(fontSize));
- };
-
- calculateRemValue();
- }, []);
-
- const gridContainerRef = useRef(null);
-
- const [{ width: containerWidth, height: containerHeight }] =
- useResizeObserver(gridContainerRef);
-
- const scrollBarWidth = useMemo(() => {
- if (containerWidth && containerHeight && containerRef.current) {
- return (
- containerRef.current.offsetWidth - containerRef.current.clientWidth
- );
- }
- return 0;
- }, [containerRef, containerHeight, containerWidth]);
-
- const availableWidth = useMemo(
- () => (scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth),
- [containerWidth, scrollBarWidth],
- );
-
- const hasScrollbar = useMemo(() => {
- if (containerHeight && containerRef.current) {
- return (
- containerRef.current.offsetHeight < containerRef.current.scrollHeight
- );
- }
- }, [containerRef, containerHeight]);
-
- const cellHeight = useMemo(() => {
- const aspectRatio = 16 / 9;
- // subtract container margin, 1 camera takes up at least 4 rows
- // account for additional margin on bottom of each row
- return (
- ((availableWidth ?? window.innerWidth) - 2 * marginValue) /
- 12 /
- aspectRatio -
- marginValue +
- marginValue / 4
- );
- }, [availableWidth, marginValue]);
-
- const handleResize = (
- _layout: Layout,
- oldLayoutItem: LayoutItem | null,
- layoutItem: LayoutItem | null,
- placeholder: LayoutItem | null,
- ) => {
- if (!oldLayoutItem || !layoutItem || !placeholder) return;
-
- const heightDiff = layoutItem.h - oldLayoutItem.h;
- const widthDiff = layoutItem.w - oldLayoutItem.w;
- const changeCoef = oldLayoutItem.w / oldLayoutItem.h;
-
- let newWidth, newHeight;
-
- if (Math.abs(heightDiff) < Math.abs(widthDiff)) {
- newHeight = Math.round(layoutItem.w / changeCoef);
- newWidth = Math.round(newHeight * changeCoef);
- } else {
- newWidth = Math.round(layoutItem.h * changeCoef);
- newHeight = Math.round(newWidth / changeCoef);
- }
-
- // Ensure dimensions maintain aspect ratio and fit within the grid
- if (layoutItem.x + newWidth > 12) {
- newWidth = 12 - layoutItem.x;
- newHeight = Math.round(newWidth / changeCoef);
- }
-
- if (changeCoef == 0.5) {
- // portrait
- newHeight = Math.ceil(newHeight / 2) * 2;
- } else if (changeCoef == 2) {
- // pano/wide
- newHeight = Math.ceil(newHeight * 2) / 2;
- }
-
- newWidth = Math.round(newHeight * changeCoef);
-
- layoutItem.w = newWidth;
- layoutItem.h = newHeight;
- placeholder.w = layoutItem.w;
- placeholder.h = layoutItem.h;
- };
-
- // audio and stats states
-
- const [audioStates, setAudioStates] = useState({});
- const [volumeStates, setVolumeStates] = useState({});
- const [statsStates, setStatsStates] = useState(() => {
- const initialStates: StatsState = {};
- cameras.forEach((camera) => {
- initialStates[camera.name] = false;
- });
- return initialStates;
- });
-
- const toggleStats = (cameraName: string): void => {
- setStatsStates((prev) => ({
- ...prev,
- [cameraName]: !prev[cameraName],
- }));
- };
-
- useEffect(() => {
- if (!allGroupsStreamingSettings) {
- return;
- }
-
- const initialAudioStates: AudioState = {};
- const initialVolumeStates: VolumeState = {};
-
- Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
- if (groupSettings) {
- Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
- initialAudioStates[camera] = cameraSettings.playAudio ?? false;
- initialVolumeStates[camera] = cameraSettings.volume ?? 1;
- });
- }
- });
-
- setAudioStates(initialAudioStates);
- setVolumeStates(initialVolumeStates);
- }, [allGroupsStreamingSettings]);
-
- const toggleAudio = (cameraName: string) => {
- setAudioStates((prev) => ({
- ...prev,
- [cameraName]: !prev[cameraName],
- }));
- };
-
- const onSaveMuting = useCallback(
- (playAudio: boolean) => {
- if (!cameraGroup || !allGroupsStreamingSettings) {
- return;
- }
-
- const existingGroupSettings =
- allGroupsStreamingSettings[cameraGroup] || {};
-
- const updatedSettings: AllGroupsStreamingSettings = {
- ...Object.fromEntries(
- Object.entries(allGroupsStreamingSettings || {}).filter(
- ([key]) => key !== cameraGroup,
- ),
- ),
- [cameraGroup]: {
- ...existingGroupSettings,
- ...Object.fromEntries(
- Object.entries(existingGroupSettings).map(
- ([cameraName, settings]) => [
- cameraName,
- {
- ...settings,
- playAudio: playAudio,
- },
- ],
- ),
- ),
- },
- };
-
- setAllGroupsStreamingSettings?.(updatedSettings);
- },
- [cameraGroup, allGroupsStreamingSettings, setAllGroupsStreamingSettings],
- );
-
- const muteAll = () => {
- const updatedStates: AudioState = {};
- cameras.forEach((camera) => {
- updatedStates[camera.name] = false;
- });
- setAudioStates(updatedStates);
- onSaveMuting(false);
- };
-
- const unmuteAll = () => {
- const updatedStates: AudioState = {};
- cameras.forEach((camera) => {
- updatedStates[camera.name] = true;
- });
- setAudioStates(updatedStates);
- onSaveMuting(true);
- };
-
- return (
- <>
-
- {!isGridLayoutLoaded ||
- !currentGridLayout ||
- !isEqual(cameras, currentCameras) ||
- includeBirdseye !== currentIncludeBirdseye ? (
-
- {includeBirdseye && birdseyeConfig?.enabled && (
-
- )}
- {cameras.map((camera) => {
- return (
-
- );
- })}
-
- ) : (
-
-
-
setShowCircles(false)}
- onResizeStop={handleLayoutChange}
- >
- {includeBirdseye && birdseyeConfig?.enabled && (
- onSelectCamera("birdseye")}
- >
- {isEditMode && showCircles && }
-
- )}
- {cameras.map((camera) => {
- let grow;
- const aspectRatio = camera.detect.width / camera.detect.height;
- if (aspectRatio > ASPECT_WIDE_LAYOUT) {
- grow = `aspect-wide w-full`;
- } else if (aspectRatio < ASPECT_VERTICAL_LAYOUT) {
- grow = `aspect-tall h-full`;
- } else {
- grow = "aspect-video";
- }
- const availableStreams = camera.live.streams || {};
- const firstStreamEntry = Object.values(availableStreams)[0] || "";
-
- const streamNameFromSettings =
- currentGroupStreamingSettings?.[camera.name]?.streamName || "";
- const streamExists =
- streamNameFromSettings &&
- Object.values(availableStreams).includes(
- streamNameFromSettings,
- );
-
- const streamName = streamExists
- ? streamNameFromSettings
- : firstStreamEntry;
- const streamType =
- currentGroupStreamingSettings?.[camera.name]?.streamType;
- const autoLive =
- streamType !== undefined
- ? streamType !== "no-streaming"
- : undefined;
- const showStillWithoutActivity =
- currentGroupStreamingSettings?.[camera.name]?.streamType !==
- "continuous";
- const useWebGL =
- currentGroupStreamingSettings?.[camera.name]
- ?.compatibilityMode || false;
- return (
- toggleAudio(camera.name)}
- statsState={statsStates[camera.name]}
- toggleStats={() => toggleStats(camera.name)}
- volumeState={volumeStates[camera.name]}
- setVolumeState={(value) =>
- setVolumeStates((prev) => ({
- ...prev,
- [camera.name]: value,
- }))
- }
- muteAll={muteAll}
- unmuteAll={unmuteAll}
- resetPreferredLiveMode={() =>
- resetPreferredLiveMode(camera.name)
- }
- config={config}
- streamMetadata={streamMetadata}
- >
- {
- !isEditMode && onSelectCamera(camera.name);
- }}
- onError={(e) => {
- setPreferredLiveModes((prevModes) => {
- const newModes = { ...prevModes };
- if (e === "mse-decode") {
- newModes[camera.name] = "webrtc";
- } else {
- newModes[camera.name] = "jsmpeg";
- }
- return newModes;
- });
- }}
- onResetLiveMode={() => resetPreferredLiveMode(camera.name)}
- playAudio={audioStates[camera.name]}
- volume={volumeStates[camera.name]}
- />
- {isEditMode && showCircles && }
-
- );
- })}
-
- {isDesktop && (
-
-
-
-
- setIsEditMode((prevIsEditMode) => !prevIsEditMode)
- }
- >
- {isEditMode ? (
-
- ) : (
-
- )}
-
-
-
- {isEditMode
- ? t("editLayout.exitEdit")
- : t("editLayout.label")}
-
-
- {!isEditMode && (
- <>
- {!fullscreen && (
-
-
-
- setEditGroup((prevEditGroup) => !prevEditGroup)
- }
- >
-
-
-
-
- {isEditMode
- ? t("editLayout.exitEdit")
- : t("editLayout.group.label")}
-
-
- )}
-
-
-
- {fullscreen ? (
-
- ) : (
-
- )}
-
-
-
- {fullscreen
- ? t("button.exitFullscreen", { ns: "common" })
- : t("button.fullscreen", { ns: "common" })}
-
-
- >
- )}
-
- )}
-
- )}
- >
- );
-}
-
-function CornerCircles() {
- return (
- <>
-
-
-
-
- >
- );
-}
-
-type BirdseyeLivePlayerGridItemProps = {
- style?: React.CSSProperties;
- className?: string;
- onMouseDown?: React.MouseEventHandler;
- onMouseUp?: React.MouseEventHandler;
- onTouchEnd?: React.TouchEventHandler;
- children?: React.ReactNode;
- birdseyeConfig: BirdseyeConfig;
- liveMode: LivePlayerMode;
- onClick: () => void;
-};
-
-const BirdseyeLivePlayerGridItem = React.forwardRef<
- HTMLDivElement,
- BirdseyeLivePlayerGridItemProps
->(
- (
- {
- style,
- className,
- onMouseDown,
- onMouseUp,
- onTouchEnd,
- children,
- birdseyeConfig,
- liveMode,
- onClick,
- ...props
- },
- ref,
- ) => {
- return (
-
- }
- />
- {children}
-
- );
- },
-);
-
-type GridLiveContextMenuProps = {
- className?: string;
- style?: React.CSSProperties;
- onMouseDown?: React.MouseEventHandler;
- onMouseUp?: React.MouseEventHandler;
- onTouchEnd?: React.TouchEventHandler;
- children?: React.ReactNode;
- camera: string;
- streamName: string;
- cameraGroup: string;
- preferredLiveMode: string;
- isRestreamed: boolean;
- supportsAudio: boolean;
- audioState: boolean;
- toggleAudio: () => void;
- statsState: boolean;
- toggleStats: () => void;
- volumeState?: number;
- setVolumeState: (volumeState: number) => void;
- muteAll: () => void;
- unmuteAll: () => void;
- resetPreferredLiveMode: () => void;
- config?: FrigateConfig;
- streamMetadata?: { [key: string]: LiveStreamMetadata };
-};
-
-const GridLiveContextMenu = React.forwardRef<
- HTMLDivElement,
- GridLiveContextMenuProps
->(
- (
- {
- className,
- style,
- onMouseDown,
- onMouseUp,
- onTouchEnd,
- children,
- camera,
- streamName,
- cameraGroup,
- preferredLiveMode,
- isRestreamed,
- supportsAudio,
- audioState,
- toggleAudio,
- statsState,
- toggleStats,
- volumeState,
- setVolumeState,
- muteAll,
- unmuteAll,
- resetPreferredLiveMode,
- config,
- streamMetadata,
- ...props
- },
- ref,
- ) => {
- return (
-
-
- {children}
-
-
- );
- },
-);
+import { useUserPersistence } from "@/hooks/use-user-persistence";
+import {
+ AllGroupsStreamingSettings,
+ BirdseyeConfig,
+ CameraConfig,
+ FrigateConfig,
+} from "@/types/frigateConfig";
+import React, {
+ useCallback,
+ useEffect,
+ useLayoutEffect,
+ useMemo,
+ useRef,
+ useState,
+} from "react";
+import {
+ Layout,
+ LayoutItem,
+ ResponsiveGridLayout as Responsive,
+} from "react-grid-layout";
+import "react-grid-layout/css/styles.css";
+import "react-resizable/css/styles.css";
+import {
+ AudioState,
+ LivePlayerMode,
+ LiveStreamMetadata,
+ StatsState,
+ VolumeState,
+} from "@/types/live";
+import { ASPECT_VERTICAL_LAYOUT, ASPECT_WIDE_LAYOUT } from "@/types/record";
+import { Skeleton } from "@/components/ui/skeleton";
+import { useResizeObserver } from "@/hooks/resize-observer";
+import { isEqual } from "lodash";
+import useSWR from "swr";
+import { isDesktop, isMobile } from "react-device-detect";
+import BirdseyeLivePlayer from "@/components/player/BirdseyeLivePlayer";
+import LivePlayer from "@/components/player/LivePlayer";
+import { IoClose } from "react-icons/io5";
+import { LuLayoutDashboard, LuPencil } from "react-icons/lu";
+import { cn } from "@/lib/utils";
+import { EditGroupDialog } from "@/components/filter/CameraGroupSelector";
+import { useUserPersistedOverlayState } from "@/hooks/use-overlay-state";
+import { FaCompress, FaExpand } from "react-icons/fa";
+import {
+ Tooltip,
+ TooltipTrigger,
+ TooltipContent,
+} from "@/components/ui/tooltip";
+import { Toaster } from "@/components/ui/sonner";
+import LiveContextMenu from "@/components/menu/LiveContextMenu";
+import { useStreamingSettings } from "@/context/streaming-settings-provider";
+import { useTranslation } from "react-i18next";
+import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
+import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
+
+type DraggableGridLayoutProps = {
+ cameras: CameraConfig[];
+ cameraGroup: string;
+ cameraRef: (node: HTMLElement | null) => void;
+ containerRef: React.RefObject;
+ includeBirdseye: boolean;
+ onSelectCamera: (camera: string) => void;
+ windowVisible: boolean;
+ visibleCameras: string[];
+ isEditMode: boolean;
+ setIsEditMode: React.Dispatch>;
+ fullscreen: boolean;
+ toggleFullscreen: () => void;
+ preferredLiveModes: { [key: string]: LivePlayerMode };
+ setPreferredLiveModes: React.Dispatch<
+ React.SetStateAction<{ [key: string]: LivePlayerMode }>
+ >;
+ resetPreferredLiveMode: (cameraName: string) => void;
+ isRestreamedStates: { [key: string]: boolean };
+ supportsAudioOutputStates: {
+ [key: string]: { supportsAudio: boolean; cameraName: string };
+ };
+ streamMetadata: { [key: string]: LiveStreamMetadata };
+};
+export default function DraggableGridLayout({
+ cameras,
+ cameraGroup,
+ containerRef,
+ cameraRef,
+ includeBirdseye,
+ onSelectCamera,
+ windowVisible,
+ visibleCameras,
+ isEditMode,
+ setIsEditMode,
+ fullscreen,
+ toggleFullscreen,
+ preferredLiveModes,
+ setPreferredLiveModes,
+ resetPreferredLiveMode,
+ isRestreamedStates,
+ supportsAudioOutputStates,
+ streamMetadata,
+}: DraggableGridLayoutProps) {
+ const { t } = useTranslation(["views/live"]);
+ const playbackCapabilities = usePlaybackCapabilities([]);
+ const { data: config } = useSWR("config");
+ const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
+
+ // preferred live modes per camera
+
+ const [globalAutoLive] = useUserPersistence("autoLiveView", true);
+ const [displayCameraNames] = useUserPersistence("displayCameraNames", false);
+
+ const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
+ useStreamingSettings();
+
+ const currentGroupStreamingSettings = useMemo(() => {
+ if (cameraGroup && cameraGroup != "default" && allGroupsStreamingSettings) {
+ return allGroupsStreamingSettings[cameraGroup];
+ }
+ }, [allGroupsStreamingSettings, cameraGroup]);
+
+ // grid layout
+
+ const [gridLayout, setGridLayout, isGridLayoutLoaded] =
+ useUserPersistence(`${cameraGroup}-draggable-layout`);
+
+ const [group] = useUserPersistedOverlayState(
+ "cameraGroup",
+ "default" as string,
+ );
+
+ const groups = useMemo(() => {
+ if (!config) {
+ return [];
+ }
+
+ return Object.entries(config.camera_groups).sort(
+ (a, b) => a[1].order - b[1].order,
+ );
+ }, [config]);
+
+ // editing
+
+ const [editGroup, setEditGroup] = useState(false);
+ const [showCircles, setShowCircles] = useState(true);
+
+ useEffect(() => {
+ setIsEditMode(false);
+ setEditGroup(false);
+ // Reset camera tracking state when group changes to prevent the camera-change
+ // effect from incorrectly overwriting the loaded layout
+ setCurrentCameras(undefined);
+ setCurrentIncludeBirdseye(undefined);
+ setCurrentGridLayout(undefined);
+ }, [cameraGroup, setIsEditMode]);
+
+ // camera state
+
+ const [currentCameras, setCurrentCameras] = useState();
+ const [currentIncludeBirdseye, setCurrentIncludeBirdseye] =
+ useState();
+ const [currentGridLayout, setCurrentGridLayout] = useState<
+ Layout | undefined
+ >();
+
+ const handleLayoutChange = useCallback(
+ (currentLayout: Layout) => {
+ if (!isGridLayoutLoaded || !isEqual(gridLayout, currentGridLayout)) {
+ return;
+ }
+ // save layout to idb
+ setGridLayout(currentLayout);
+ setShowCircles(true);
+ },
+ [setGridLayout, isGridLayoutLoaded, gridLayout, currentGridLayout],
+ );
+
+ const generateLayout = useCallback(
+ (baseLayout: Layout | undefined) => {
+ if (!isGridLayoutLoaded) {
+ return;
+ }
+
+ const cameraNames =
+ includeBirdseye && birdseyeConfig?.enabled
+ ? ["birdseye", ...cameras.map((camera) => camera?.name || "")]
+ : cameras.map((camera) => camera?.name || "");
+
+ const optionsMap: LayoutItem[] = baseLayout
+ ? baseLayout.filter((layout) => cameraNames?.includes(layout.i))
+ : [];
+
+ cameraNames.forEach((cameraName, index) => {
+ const existingLayout = optionsMap.find(
+ (layout) => layout.i === cameraName,
+ );
+
+ // Skip if the camera already exists in the layout
+ if (existingLayout) {
+ return;
+ }
+
+ let aspectRatio;
+ let col;
+
+ // Handle "birdseye" camera as a special case
+ if (cameraName === "birdseye") {
+ aspectRatio =
+ (birdseyeConfig?.width || 1) / (birdseyeConfig?.height || 1);
+ col = 0; // Set birdseye camera in the first column
+ } else {
+ const camera = cameras.find((cam) => cam.name === cameraName);
+ aspectRatio =
+ (camera && camera?.detect.width / camera?.detect.height) || 16 / 9;
+ col = index % 3; // Regular cameras distributed across columns
+ }
+
+ // Calculate layout options based on aspect ratio
+ const columnsPerPlayer = 4;
+ let height;
+ let width;
+
+ if (aspectRatio < 1) {
+ // Portrait
+ height = 2 * columnsPerPlayer;
+ width = columnsPerPlayer;
+ } else if (aspectRatio > 2) {
+ // Wide
+ height = 1 * columnsPerPlayer;
+ width = 2 * columnsPerPlayer;
+ } else {
+ // Landscape
+ height = 1 * columnsPerPlayer;
+ width = columnsPerPlayer;
+ }
+
+ const options = {
+ i: cameraName,
+ x: col * width,
+ y: 0, // don't set y, grid does automatically
+ w: width,
+ h: height,
+ };
+
+ optionsMap.push(options);
+ });
+
+ return optionsMap;
+ },
+ [cameras, isGridLayoutLoaded, includeBirdseye, birdseyeConfig],
+ );
+
+ useEffect(() => {
+ if (isGridLayoutLoaded) {
+ if (gridLayout) {
+ // set current grid layout from loaded, possibly adding new cameras
+ const updatedLayout = generateLayout(gridLayout);
+ setCurrentGridLayout(updatedLayout);
+ // Only save if cameras were added (layout changed)
+ if (!isEqual(updatedLayout, gridLayout)) {
+ setGridLayout(updatedLayout);
+ }
+ // Set camera tracking state so the camera-change effect has a baseline
+ setCurrentCameras(cameras);
+ setCurrentIncludeBirdseye(includeBirdseye);
+ } else {
+ // idb is empty, set it with an initial layout
+ const newLayout = generateLayout(undefined);
+ setCurrentGridLayout(newLayout);
+ setGridLayout(newLayout);
+ setCurrentCameras(cameras);
+ setCurrentIncludeBirdseye(includeBirdseye);
+ }
+ }
+ }, [
+ gridLayout,
+ setGridLayout,
+ isGridLayoutLoaded,
+ generateLayout,
+ cameras,
+ includeBirdseye,
+ ]);
+
+ useEffect(() => {
+ // Only regenerate layout when cameras change WITHIN an already-loaded group
+ // Skip if currentCameras is undefined (means we just switched groups and
+ // the first useEffect hasn't run yet to set things up)
+ if (!isGridLayoutLoaded || currentCameras === undefined) {
+ return;
+ }
+
+ if (
+ !isEqual(cameras, currentCameras) ||
+ includeBirdseye !== currentIncludeBirdseye
+ ) {
+ setCurrentCameras(cameras);
+ setCurrentIncludeBirdseye(includeBirdseye);
+
+ // Regenerate layout based on current layout, adding any new cameras
+ const updatedLayout = generateLayout(currentGridLayout);
+ setCurrentGridLayout(updatedLayout);
+ setGridLayout(updatedLayout);
+ }
+ }, [
+ cameras,
+ includeBirdseye,
+ currentCameras,
+ currentIncludeBirdseye,
+ currentGridLayout,
+ generateLayout,
+ setGridLayout,
+ isGridLayoutLoaded,
+ ]);
+
+ const [marginValue, setMarginValue] = useState(16);
+
+ // calculate margin value for browsers that don't have default font size of 16px
+ useLayoutEffect(() => {
+ const calculateRemValue = () => {
+ const htmlElement = document.documentElement;
+ const fontSize = window.getComputedStyle(htmlElement).fontSize;
+ setMarginValue(parseFloat(fontSize));
+ };
+
+ calculateRemValue();
+ }, []);
+
+ const gridContainerRef = useRef(null);
+
+ const [{ width: containerWidth, height: containerHeight }] =
+ useResizeObserver(gridContainerRef);
+
+ const scrollBarWidth = useMemo(() => {
+ if (containerWidth && containerHeight && containerRef.current) {
+ return (
+ containerRef.current.offsetWidth - containerRef.current.clientWidth
+ );
+ }
+ return 0;
+ }, [containerRef, containerHeight, containerWidth]);
+
+ const availableWidth = useMemo(
+ () => (scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth),
+ [containerWidth, scrollBarWidth],
+ );
+
+ const hasScrollbar = useMemo(() => {
+ if (containerHeight && containerRef.current) {
+ return (
+ containerRef.current.offsetHeight < containerRef.current.scrollHeight
+ );
+ }
+ }, [containerRef, containerHeight]);
+
+ const cellHeight = useMemo(() => {
+ const aspectRatio = 16 / 9;
+ // subtract container margin, 1 camera takes up at least 4 rows
+ // account for additional margin on bottom of each row
+ return (
+ ((availableWidth ?? window.innerWidth) - 2 * marginValue) /
+ 12 /
+ aspectRatio -
+ marginValue +
+ marginValue / 4
+ );
+ }, [availableWidth, marginValue]);
+
+ const handleResize = (
+ _layout: Layout,
+ oldLayoutItem: LayoutItem | null,
+ layoutItem: LayoutItem | null,
+ placeholder: LayoutItem | null,
+ ) => {
+ if (!oldLayoutItem || !layoutItem || !placeholder) return;
+
+ const heightDiff = layoutItem.h - oldLayoutItem.h;
+ const widthDiff = layoutItem.w - oldLayoutItem.w;
+ const changeCoef = oldLayoutItem.w / oldLayoutItem.h;
+
+ let newWidth, newHeight;
+
+ if (Math.abs(heightDiff) < Math.abs(widthDiff)) {
+ newHeight = Math.round(layoutItem.w / changeCoef);
+ newWidth = Math.round(newHeight * changeCoef);
+ } else {
+ newWidth = Math.round(layoutItem.h * changeCoef);
+ newHeight = Math.round(newWidth / changeCoef);
+ }
+
+ // Ensure dimensions maintain aspect ratio and fit within the grid
+ if (layoutItem.x + newWidth > 12) {
+ newWidth = 12 - layoutItem.x;
+ newHeight = Math.round(newWidth / changeCoef);
+ }
+
+ if (changeCoef == 0.5) {
+ // portrait
+ newHeight = Math.ceil(newHeight / 2) * 2;
+ } else if (changeCoef == 2) {
+ // pano/wide
+ newHeight = Math.ceil(newHeight * 2) / 2;
+ }
+
+ newWidth = Math.round(newHeight * changeCoef);
+
+ layoutItem.w = newWidth;
+ layoutItem.h = newHeight;
+ placeholder.w = layoutItem.w;
+ placeholder.h = layoutItem.h;
+ };
+
+ // audio and stats states
+
+ const [audioStates, setAudioStates] = useState({});
+ const [volumeStates, setVolumeStates] = useState({});
+ const [statsStates, setStatsStates] = useState(() => {
+ const initialStates: StatsState = {};
+ cameras.forEach((camera) => {
+ initialStates[camera.name] = false;
+ });
+ return initialStates;
+ });
+
+ const toggleStats = (cameraName: string): void => {
+ setStatsStates((prev) => ({
+ ...prev,
+ [cameraName]: !prev[cameraName],
+ }));
+ };
+
+ useEffect(() => {
+ if (!allGroupsStreamingSettings) {
+ return;
+ }
+
+ const initialAudioStates: AudioState = {};
+ const initialVolumeStates: VolumeState = {};
+
+ Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
+ if (groupSettings) {
+ Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
+ initialAudioStates[camera] = cameraSettings.playAudio ?? false;
+ initialVolumeStates[camera] = cameraSettings.volume ?? 1;
+ });
+ }
+ });
+
+ setAudioStates(initialAudioStates);
+ setVolumeStates(initialVolumeStates);
+ }, [allGroupsStreamingSettings]);
+
+ const toggleAudio = (cameraName: string) => {
+ setAudioStates((prev) => ({
+ ...prev,
+ [cameraName]: !prev[cameraName],
+ }));
+ };
+
+ const onSaveMuting = useCallback(
+ (playAudio: boolean) => {
+ if (!cameraGroup || !allGroupsStreamingSettings) {
+ return;
+ }
+
+ const existingGroupSettings =
+ allGroupsStreamingSettings[cameraGroup] || {};
+
+ const updatedSettings: AllGroupsStreamingSettings = {
+ ...Object.fromEntries(
+ Object.entries(allGroupsStreamingSettings || {}).filter(
+ ([key]) => key !== cameraGroup,
+ ),
+ ),
+ [cameraGroup]: {
+ ...existingGroupSettings,
+ ...Object.fromEntries(
+ Object.entries(existingGroupSettings).map(
+ ([cameraName, settings]) => [
+ cameraName,
+ {
+ ...settings,
+ playAudio: playAudio,
+ },
+ ],
+ ),
+ ),
+ },
+ };
+
+ setAllGroupsStreamingSettings?.(updatedSettings);
+ },
+ [cameraGroup, allGroupsStreamingSettings, setAllGroupsStreamingSettings],
+ );
+
+ const muteAll = () => {
+ const updatedStates: AudioState = {};
+ cameras.forEach((camera) => {
+ updatedStates[camera.name] = false;
+ });
+ setAudioStates(updatedStates);
+ onSaveMuting(false);
+ };
+
+ const unmuteAll = () => {
+ const updatedStates: AudioState = {};
+ cameras.forEach((camera) => {
+ updatedStates[camera.name] = true;
+ });
+ setAudioStates(updatedStates);
+ onSaveMuting(true);
+ };
+
+ return (
+ <>
+
+ {!isGridLayoutLoaded ||
+ !currentGridLayout ||
+ !isEqual(cameras, currentCameras) ||
+ includeBirdseye !== currentIncludeBirdseye ? (
+
+ {includeBirdseye && birdseyeConfig?.enabled && (
+
+ )}
+ {cameras.map((camera) => {
+ return (
+
+ );
+ })}
+
+ ) : (
+
+
+
setShowCircles(false)}
+ onResizeStop={handleLayoutChange}
+ >
+ {includeBirdseye && birdseyeConfig?.enabled && (
+ onSelectCamera("birdseye")}
+ >
+ {isEditMode && showCircles && }
+
+ )}
+ {cameras.map((camera) => {
+ let grow;
+ const aspectRatio = camera.detect.width / camera.detect.height;
+ if (aspectRatio > ASPECT_WIDE_LAYOUT) {
+ grow = `aspect-wide w-full`;
+ } else if (aspectRatio < ASPECT_VERTICAL_LAYOUT) {
+ grow = `aspect-tall h-full`;
+ } else {
+ grow = "aspect-video";
+ }
+ const availableStreams = camera.live.streams || {};
+ const firstStreamEntry = chooseAutoLiveStream(
+ availableStreams,
+ playbackCapabilities.estimatedBandwidthBps,
+ playbackCapabilities.saveData,
+ );
+
+ const streamNameFromSettings =
+ currentGroupStreamingSettings?.[camera.name]?.streamName || "";
+ const streamExists =
+ streamNameFromSettings &&
+ Object.values(availableStreams).includes(
+ streamNameFromSettings,
+ );
+
+ const streamName = streamExists
+ ? streamNameFromSettings
+ : firstStreamEntry;
+ const streamType =
+ currentGroupStreamingSettings?.[camera.name]?.streamType;
+ const autoLive =
+ streamType !== undefined
+ ? streamType !== "no-streaming"
+ : undefined;
+ const showStillWithoutActivity =
+ currentGroupStreamingSettings?.[camera.name]?.streamType !==
+ "continuous";
+ const useWebGL =
+ currentGroupStreamingSettings?.[camera.name]
+ ?.compatibilityMode || false;
+ return (
+ toggleAudio(camera.name)}
+ statsState={statsStates[camera.name]}
+ toggleStats={() => toggleStats(camera.name)}
+ volumeState={volumeStates[camera.name]}
+ setVolumeState={(value) =>
+ setVolumeStates((prev) => ({
+ ...prev,
+ [camera.name]: value,
+ }))
+ }
+ muteAll={muteAll}
+ unmuteAll={unmuteAll}
+ resetPreferredLiveMode={() =>
+ resetPreferredLiveMode(camera.name)
+ }
+ config={config}
+ streamMetadata={streamMetadata}
+ >
+ {
+ !isEditMode && onSelectCamera(camera.name);
+ }}
+ onError={(e) => {
+ setPreferredLiveModes((prevModes) => {
+ const newModes = { ...prevModes };
+ if (e === "mse-decode") {
+ newModes[camera.name] = "webrtc";
+ } else {
+ newModes[camera.name] = "jsmpeg";
+ }
+ return newModes;
+ });
+ }}
+ onResetLiveMode={() => resetPreferredLiveMode(camera.name)}
+ playAudio={audioStates[camera.name]}
+ volume={volumeStates[camera.name]}
+ />
+ {isEditMode && showCircles && }
+
+ );
+ })}
+
+ {isDesktop && (
+
+
+
+
+ setIsEditMode((prevIsEditMode) => !prevIsEditMode)
+ }
+ >
+ {isEditMode ? (
+
+ ) : (
+
+ )}
+
+
+
+ {isEditMode
+ ? t("editLayout.exitEdit")
+ : t("editLayout.label")}
+
+
+ {!isEditMode && (
+ <>
+ {!fullscreen && (
+
+
+
+ setEditGroup((prevEditGroup) => !prevEditGroup)
+ }
+ >
+
+
+
+
+ {isEditMode
+ ? t("editLayout.exitEdit")
+ : t("editLayout.group.label")}
+
+
+ )}
+
+
+
+ {fullscreen ? (
+
+ ) : (
+
+ )}
+
+
+
+ {fullscreen
+ ? t("button.exitFullscreen", { ns: "common" })
+ : t("button.fullscreen", { ns: "common" })}
+
+
+ >
+ )}
+
+ )}
+
+ )}
+ >
+ );
+}
+
+function CornerCircles() {
+ return (
+ <>
+
+
+
+
+ >
+ );
+}
+
+type BirdseyeLivePlayerGridItemProps = {
+ style?: React.CSSProperties;
+ className?: string;
+ onMouseDown?: React.MouseEventHandler;
+ onMouseUp?: React.MouseEventHandler;
+ onTouchEnd?: React.TouchEventHandler;
+ children?: React.ReactNode;
+ birdseyeConfig: BirdseyeConfig;
+ liveMode: LivePlayerMode;
+ onClick: () => void;
+};
+
+const BirdseyeLivePlayerGridItem = React.forwardRef<
+ HTMLDivElement,
+ BirdseyeLivePlayerGridItemProps
+>(
+ (
+ {
+ style,
+ className,
+ onMouseDown,
+ onMouseUp,
+ onTouchEnd,
+ children,
+ birdseyeConfig,
+ liveMode,
+ onClick,
+ ...props
+ },
+ ref,
+ ) => {
+ return (
+
+ }
+ />
+ {children}
+
+ );
+ },
+);
+
+type GridLiveContextMenuProps = {
+ className?: string;
+ style?: React.CSSProperties;
+ onMouseDown?: React.MouseEventHandler;
+ onMouseUp?: React.MouseEventHandler;
+ onTouchEnd?: React.TouchEventHandler;
+ children?: React.ReactNode;
+ camera: string;
+ streamName: string;
+ cameraGroup: string;
+ preferredLiveMode: string;
+ isRestreamed: boolean;
+ supportsAudio: boolean;
+ audioState: boolean;
+ toggleAudio: () => void;
+ statsState: boolean;
+ toggleStats: () => void;
+ volumeState?: number;
+ setVolumeState: (volumeState: number) => void;
+ muteAll: () => void;
+ unmuteAll: () => void;
+ resetPreferredLiveMode: () => void;
+ config?: FrigateConfig;
+ streamMetadata?: { [key: string]: LiveStreamMetadata };
+};
+
+const GridLiveContextMenu = React.forwardRef<
+ HTMLDivElement,
+ GridLiveContextMenuProps
+>(
+ (
+ {
+ className,
+ style,
+ onMouseDown,
+ onMouseUp,
+ onTouchEnd,
+ children,
+ camera,
+ streamName,
+ cameraGroup,
+ preferredLiveMode,
+ isRestreamed,
+ supportsAudio,
+ audioState,
+ toggleAudio,
+ statsState,
+ toggleStats,
+ volumeState,
+ setVolumeState,
+ muteAll,
+ unmuteAll,
+ resetPreferredLiveMode,
+ config,
+ streamMetadata,
+ ...props
+ },
+ ref,
+ ) => {
+ return (
+
+
+ {children}
+
+
+ );
+ },
+);
diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx
index 418c74068..fcda4d201 100644
--- a/web/src/views/live/LiveCameraView.tsx
+++ b/web/src/views/live/LiveCameraView.tsx
@@ -1,1709 +1,1721 @@
-import {
- useAudioLiveTranscription,
- useAudioState,
- useAudioTranscriptionState,
- useAutotrackingState,
- useDetectState,
- useEnabledState,
- usePtzCommand,
- useRecordingsState,
- useSnapshotsState,
-} from "@/api/ws";
-import CameraFeatureToggle from "@/components/dynamic/CameraFeatureToggle";
-import FilterSwitch from "@/components/filter/FilterSwitch";
-import LivePlayer from "@/components/player/LivePlayer";
-import { Button } from "@/components/ui/button";
-import { Drawer, DrawerContent, DrawerTrigger } from "@/components/ui/drawer";
-import {
- DropdownMenu,
- DropdownMenuContent,
- DropdownMenuTrigger,
-} from "@/components/ui/dropdown-menu";
-import {
- Popover,
- PopoverContent,
- PopoverTrigger,
-} from "@/components/ui/popover";
-import { useResizeObserver } from "@/hooks/resize-observer";
-import useKeyboardListener from "@/hooks/use-keyboard-listener";
-import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
-import {
- LivePlayerError,
- LiveStreamMetadata,
- VideoResolutionType,
-} from "@/types/live";
-import { RecordingStartingPoint } from "@/types/record";
-import React, {
- useCallback,
- useEffect,
- useMemo,
- useRef,
- useState,
-} from "react";
-import {
- isDesktop,
- isFirefox,
- isIOS,
- isMobile,
- isTablet,
- useMobileOrientation,
-} from "react-device-detect";
-import {
- FaCog,
- FaCompress,
- FaExpand,
- FaMicrophone,
- FaMicrophoneSlash,
-} from "react-icons/fa";
-import { GiSpeaker, GiSpeakerOff } from "react-icons/gi";
-import {
- TbCameraDown,
- TbRecordMail,
- TbRecordMailOff,
- TbViewfinder,
- TbViewfinderOff,
-} from "react-icons/tb";
-import { IoIosWarning, IoMdArrowRoundBack } from "react-icons/io";
-import {
- LuCheck,
- LuEar,
- LuEarOff,
- LuExternalLink,
- LuHistory,
- LuInfo,
- LuPictureInPicture,
- LuPower,
- LuPowerOff,
- LuVideo,
- LuVideoOff,
- LuX,
-} from "react-icons/lu";
-import {
- MdClosedCaption,
- MdClosedCaptionDisabled,
- MdNoPhotography,
- MdOutlineRestartAlt,
- MdPersonOff,
- MdPersonSearch,
- MdPhotoCamera,
-} from "react-icons/md";
-import { Link, useNavigate } from "react-router-dom";
-import { TransformWrapper, TransformComponent } from "react-zoom-pan-pinch";
-import useSWR from "swr";
-import { cn } from "@/lib/utils";
-import { useSessionPersistence } from "@/hooks/use-session-persistence";
-
-import {
- Select,
- SelectContent,
- SelectGroup,
- SelectItem,
- SelectTrigger,
- SelectValue,
-} from "@/components/ui/select";
-import { useUserPersistence } from "@/hooks/use-user-persistence";
-import { Label } from "@/components/ui/label";
-import { Switch } from "@/components/ui/switch";
-import axios from "axios";
-import { toast } from "sonner";
-import { Toaster } from "@/components/ui/sonner";
-import { useIsAdmin } from "@/hooks/use-is-admin";
-import { useTranslation } from "react-i18next";
-import { useDocDomain } from "@/hooks/use-doc-domain";
-import { detectCameraAudioFeatures } from "@/utils/cameraUtil";
-import PtzControlPanel from "@/components/overlay/PtzControlPanel";
-import ObjectSettingsView from "../settings/ObjectSettingsView";
-import { useSearchEffect } from "@/hooks/use-overlay-state";
-import {
- downloadSnapshot,
- fetchCameraSnapshot,
- generateSnapshotFilename,
- grabVideoSnapshot,
- SnapshotResult,
-} from "@/utils/snapshotUtil";
-import ActivityIndicator from "@/components/indicators/activity-indicator";
-
-type LiveCameraViewProps = {
- config?: FrigateConfig;
- camera: CameraConfig;
- supportsFullscreen: boolean;
- fullscreen: boolean;
- toggleFullscreen: () => void;
-};
-export default function LiveCameraView({
- config,
- camera,
- supportsFullscreen,
- fullscreen,
- toggleFullscreen,
-}: LiveCameraViewProps) {
- const { t } = useTranslation(["views/live", "components/dialog"]);
- const navigate = useNavigate();
- const { isPortrait } = useMobileOrientation();
- const mainRef = useRef(null);
- const containerRef = useRef(null);
- const [{ width: windowWidth, height: windowHeight }] =
- useResizeObserver(window);
-
- // supported features
-
- const [streamName, setStreamName, streamNameLoaded] =
- useUserPersistence(
- `${camera.name}-stream`,
- Object.values(camera.live.streams)[0],
- );
-
- const isRestreamed = useMemo(
- () =>
- config &&
- Object.keys(config.go2rtc.streams || {}).includes(streamName ?? ""),
- [config, streamName],
- );
-
- // validate stored stream name and reset if now invalid
-
- useEffect(() => {
- if (!streamNameLoaded) return;
-
- const available = Object.values(camera.live.streams || {});
- if (available.length === 0) return;
-
- if (streamName != null && !available.includes(streamName)) {
- setStreamName(available[0]);
- }
- }, [streamNameLoaded, camera.live.streams, streamName, setStreamName]);
-
- const { data: cameraMetadata } = useSWR(
- isRestreamed ? `go2rtc/streams/${streamName}` : null,
- {
- revalidateOnFocus: false,
- revalidateOnReconnect: false,
- revalidateIfStale: false,
- dedupingInterval: 60000,
- },
- );
-
- const { twoWayAudio: supports2WayTalk, audioOutput: supportsAudioOutput } =
- useMemo(() => detectCameraAudioFeatures(cameraMetadata), [cameraMetadata]);
-
- // camera enabled state
- const { payload: enabledState } = useEnabledState(camera.name);
- const cameraEnabled = enabledState === "ON";
-
- // for audio transcriptions
-
- const { payload: audioTranscriptionState, send: sendTranscription } =
- useAudioTranscriptionState(camera.name);
- const { payload: transcription } = useAudioLiveTranscription(camera.name);
- const transcriptionRef = useRef(null);
-
- useEffect(() => {
- if (transcription) {
- if (transcriptionRef.current) {
- transcriptionRef.current.scrollTop =
- transcriptionRef.current.scrollHeight;
- }
- }
- }, [transcription]);
-
- useEffect(() => {
- return () => {
- // disable transcriptions when unmounting
- if (audioTranscriptionState == "ON") sendTranscription("OFF");
- };
- }, [audioTranscriptionState, sendTranscription]);
-
- // click overlay for ptzs
-
- const [clickOverlay, setClickOverlay] = useState(false);
- const clickOverlayRef = useRef(null);
- const { send: sendPtz } = usePtzCommand(camera.name);
-
- const handleOverlayClick = useCallback(
- (
- e: React.MouseEvent | React.TouchEvent,
- ) => {
- if (!clickOverlay) {
- return;
- }
-
- let clientX;
- let clientY;
- if ("TouchEvent" in window && e.nativeEvent instanceof TouchEvent) {
- clientX = e.nativeEvent.touches[0].clientX;
- clientY = e.nativeEvent.touches[0].clientY;
- } else if (e.nativeEvent instanceof MouseEvent) {
- clientX = e.nativeEvent.clientX;
- clientY = e.nativeEvent.clientY;
- }
-
- if (clickOverlayRef.current && clientX && clientY) {
- const rect = clickOverlayRef.current.getBoundingClientRect();
-
- const normalizedX = (clientX - rect.left) / rect.width;
- const normalizedY = (clientY - rect.top) / rect.height;
-
- const pan = (normalizedX - 0.5) * 2;
- const tilt = (0.5 - normalizedY) * 2;
-
- sendPtz(`move_relative_${pan}_${tilt}`);
- }
- },
- [clickOverlayRef, clickOverlay, sendPtz],
- );
-
- // pip state
-
- useEffect(() => {
- setPip(document.pictureInPictureElement != null);
- // we know that these deps are correct
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [document.pictureInPictureElement]);
-
- // playback state
-
- const [audio, setAudio] = useSessionPersistence("liveAudio", false);
- const [mic, setMic] = useState(false);
- const [webRTC, setWebRTC] = useState(false);
- const [pip, setPip] = useState(false);
- const [lowBandwidth, setLowBandwidth] = useState(false);
-
- const [playInBackground, setPlayInBackground] = useUserPersistence(
- `${camera.name}-background-play`,
- false,
- );
-
- const [showStats, setShowStats] = useState(false);
- const [debug, setDebug] = useState(false);
-
- useSearchEffect("debug", (value: string) => {
- if (value === "true") {
- setDebug(true);
- }
-
- return true;
- });
-
- const [fullResolution, setFullResolution] = useState({
- width: 0,
- height: 0,
- });
-
- const preferredLiveMode = useMemo(() => {
- if (mic) {
- return "webrtc";
- }
-
- if (webRTC && isRestreamed) {
- return "webrtc";
- }
-
- if (webRTC && !isRestreamed) {
- return "jsmpeg";
- }
-
- if (lowBandwidth) {
- return "jsmpeg";
- }
-
- if (!("MediaSource" in window || "ManagedMediaSource" in window)) {
- return "webrtc";
- }
-
- if (!isRestreamed) {
- return "jsmpeg";
- }
-
- return "mse";
- }, [lowBandwidth, mic, webRTC, isRestreamed]);
-
- useKeyboardListener(["m"], (key, modifiers) => {
- if (!modifiers.down) {
- return true;
- }
-
- switch (key) {
- case "m":
- if (supportsAudioOutput) {
- setAudio(!audio);
- return true;
- }
- break;
- case "t":
- if (supports2WayTalk) {
- setMic(!mic);
- return true;
- }
- break;
- }
-
- return false;
- });
-
- // layout state
-
- const windowAspectRatio = useMemo(() => {
- return windowWidth / windowHeight;
- }, [windowWidth, windowHeight]);
-
- const containerAspectRatio = useMemo(() => {
- if (!containerRef.current) {
- return windowAspectRatio;
- }
-
- return containerRef.current.clientWidth / containerRef.current.clientHeight;
- }, [windowAspectRatio, containerRef]);
-
- const cameraAspectRatio = useMemo(() => {
- if (fullResolution.width && fullResolution.height) {
- return fullResolution.width / fullResolution.height;
- } else {
- return camera.detect.width / camera.detect.height;
- }
- }, [camera, fullResolution]);
-
- const constrainedAspectRatio = useMemo(() => {
- if (isMobile || fullscreen) {
- return cameraAspectRatio;
- } else {
- return containerAspectRatio < cameraAspectRatio
- ? containerAspectRatio
- : cameraAspectRatio;
- }
- }, [cameraAspectRatio, containerAspectRatio, fullscreen]);
-
- const growClassName = useMemo(() => {
- if (isMobile) {
- if (isPortrait) {
- return "absolute left-0.5 right-0.5 top-[50%] -translate-y-[50%]";
- } else {
- if (cameraAspectRatio > containerAspectRatio) {
- return "p-2 absolute left-0 top-[50%] -translate-y-[50%]";
- } else {
- return "p-2 absolute top-0.5 bottom-0.5 left-[50%] -translate-x-[50%]";
- }
- }
- }
-
- if (fullscreen) {
- if (cameraAspectRatio > containerAspectRatio) {
- return "absolute inset-x-2 top-[50%] -translate-y-[50%]";
- } else {
- return "absolute inset-y-2 left-[50%] -translate-x-[50%]";
- }
- } else {
- return "absolute top-0.5 bottom-0.5 left-[50%] -translate-x-[50%]";
- }
- }, [fullscreen, isPortrait, cameraAspectRatio, containerAspectRatio]);
-
- // On mobile devices that support it, try to orient screen
- // to best fit the camera feed in fullscreen mode
- useEffect(() => {
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- const screenOrientation = screen.orientation as any;
- if (!screenOrientation?.lock || !screenOrientation?.unlock) {
- // Browser does not support ScreenOrientation APIs that we need
- return;
- }
-
- if (fullscreen) {
- const orientationForBestFit =
- cameraAspectRatio > 1 ? "landscape" : "portrait";
-
- // If the current device doesn't support locking orientation,
- // this promise will reject with an error that we can ignore
- screenOrientation.lock(orientationForBestFit).catch(() => {});
- }
-
- return () => screenOrientation.unlock();
- }, [fullscreen, cameraAspectRatio]);
-
- const handleError = useCallback(
- (e: LivePlayerError) => {
- if (e) {
- if (
- !webRTC &&
- config &&
- config.go2rtc?.webrtc?.candidates?.length > 0
- ) {
- setWebRTC(true);
- } else {
- setWebRTC(false);
- setLowBandwidth(true);
- }
- }
- },
- [config, webRTC],
- );
-
- return (
-
-
-
-
- {!fullscreen ? (
-
-
navigate(-1)}
- >
-
- {isDesktop && (
-
- {t("button.back", { ns: "common" })}
-
- )}
-
-
{
- navigate("review", {
- state: {
- severity: "alert",
- recording: {
- camera: camera.name,
- startTime: Date.now() / 1000 - 30,
- severity: "alert",
- } as RecordingStartingPoint,
- },
- });
- }}
- >
-
- {isDesktop && (
-
- {t("button.history", { ns: "common" })}
-
- )}
-
-
- ) : (
-
- )}
-
- {fullscreen && (
-
navigate(-1)}
- >
-
- {isDesktop && (
-
- {t("button.back", { ns: "common" })}
-
- )}
-
- )}
- {supportsFullscreen && (
-
- )}
- {!isIOS && !isFirefox && preferredLiveMode != "jsmpeg" && (
-
{
- if (!pip) {
- setPip(true);
- } else {
- document.exitPictureInPicture();
- setPip(false);
- }
- }}
- disabled={!cameraEnabled || debug}
- />
- )}
- {supports2WayTalk && (
- {
- setMic(!mic);
- if (!mic && !audio) {
- setAudio(true);
- }
- }}
- disabled={!cameraEnabled || debug}
- />
- )}
- {supportsAudioOutput && preferredLiveMode != "jsmpeg" && (
- setAudio(!audio)}
- disabled={!cameraEnabled || debug}
- />
- )}
-
-
-
- {!debug ? (
-
-
-
-
-
-
- {camera?.audio?.enabled_in_config &&
- audioTranscriptionState == "ON" &&
- transcription != null && (
-
- {transcription}
-
- )}
-
- ) : (
-
-
-
- )}
-
- {camera.onvif.host != "" && (
-
- )}
-
- );
-}
-
-type FrigateCameraFeaturesProps = {
- camera: CameraConfig;
- recordingEnabled: boolean;
- audioDetectEnabled: boolean;
- autotrackingEnabled: boolean;
- transcriptionEnabled: boolean;
- fullscreen: boolean;
- streamName: string;
- setStreamName?: (value: string | undefined) => void;
- preferredLiveMode: string;
- playInBackground: boolean;
- setPlayInBackground: (value: boolean | undefined) => void;
- showStats: boolean;
- setShowStats: (value: boolean) => void;
- isRestreamed: boolean;
- setLowBandwidth: React.Dispatch>;
- supportsAudioOutput: boolean;
- supports2WayTalk: boolean;
- cameraEnabled: boolean;
- debug: boolean;
- setDebug: (debug: boolean) => void;
-};
-function FrigateCameraFeatures({
- camera,
- recordingEnabled,
- audioDetectEnabled,
- autotrackingEnabled,
- transcriptionEnabled,
- fullscreen,
- streamName,
- setStreamName,
- preferredLiveMode,
- playInBackground,
- setPlayInBackground,
- showStats,
- setShowStats,
- isRestreamed,
- setLowBandwidth,
- supportsAudioOutput,
- supports2WayTalk,
- cameraEnabled,
- debug,
- setDebug,
-}: FrigateCameraFeaturesProps) {
- const { t } = useTranslation(["views/live", "components/dialog"]);
- const { getLocaleDocUrl } = useDocDomain();
-
- const { payload: detectState, send: sendDetect } = useDetectState(
- camera.name,
- );
- const { payload: enabledState, send: sendEnabled } = useEnabledState(
- camera.name,
- );
- const { payload: recordState, send: sendRecord } = useRecordingsState(
- camera.name,
- );
- const { payload: snapshotState, send: sendSnapshot } = useSnapshotsState(
- camera.name,
- );
- const { payload: audioState, send: sendAudio } = useAudioState(camera.name);
- const { payload: autotrackingState, send: sendAutotracking } =
- useAutotrackingState(camera.name);
- const { payload: transcriptionState, send: sendTranscription } =
- useAudioTranscriptionState(camera.name);
-
- // roles
-
- const isAdmin = useIsAdmin();
-
- // manual event
-
- const recordingEventIdRef = useRef(null);
- const [isRecording, setIsRecording] = useState(false);
- const [activeToastId, setActiveToastId] = useState(
- null,
- );
-
- const createEvent = useCallback(async () => {
- try {
- const response = await axios.post(
- `events/${camera.name}/on_demand/create`,
- {
- include_recording: true,
- duration: null,
- },
- );
-
- if (response.data.success) {
- recordingEventIdRef.current = response.data.event_id;
- setIsRecording(true);
- const toastId = toast.success(
-
-
{t("manualRecording.started")}
- {!camera.record.enabled ||
- (camera.record.alerts.retain.days == 0 && (
-
{t("manualRecording.recordDisabledTips")}
- ))}
-
,
- {
- position: "top-center",
- duration: 10000,
- },
- );
- setActiveToastId(toastId);
- }
- } catch (error) {
- toast.error(t("manualRecording.failedToStart"), {
- position: "top-center",
- });
- }
- }, [camera, t]);
-
- const endEvent = useCallback(() => {
- if (activeToastId) {
- toast.dismiss(activeToastId);
- }
- try {
- if (recordingEventIdRef.current) {
- axios.put(`events/${recordingEventIdRef.current}/end`, {
- end_time: Math.ceil(Date.now() / 1000),
- });
- recordingEventIdRef.current = null;
- setIsRecording(false);
- toast.success(t("manualRecording.ended"), {
- position: "top-center",
- });
- }
- } catch (error) {
- toast.error(t("manualRecording.failedToEnd"), {
- position: "top-center",
- });
- }
- }, [activeToastId, t]);
-
- const endEventViaBeacon = useCallback(() => {
- if (!recordingEventIdRef.current) return;
-
- const url = `${window.location.origin}/api/events/${recordingEventIdRef.current}/end`;
- const payload = JSON.stringify({
- end_time: Math.ceil(Date.now() / 1000),
- });
-
- // this needs to be a synchronous XMLHttpRequest to guarantee the PUT
- // reaches the server before the browser kills the page
- const xhr = new XMLHttpRequest();
- try {
- xhr.open("PUT", url, false);
- xhr.setRequestHeader("Content-Type", "application/json");
- xhr.setRequestHeader("X-CSRF-TOKEN", "1");
- xhr.setRequestHeader("X-CACHE-BYPASS", "1");
- xhr.withCredentials = true;
- xhr.send(payload);
- } catch (e) {
- // Silently ignore errors during unload
- }
- }, []);
-
- const handleEventButtonClick = useCallback(() => {
- if (isRecording) {
- endEvent();
- } else {
- createEvent();
- }
- }, [createEvent, endEvent, isRecording]);
-
- const [isSnapshotLoading, setIsSnapshotLoading] = useState(false);
-
- const handleSnapshotClick = useCallback(async () => {
- setIsSnapshotLoading(true);
- try {
- let result: SnapshotResult;
-
- if (isRestreamed && preferredLiveMode !== "jsmpeg") {
- // For restreamed streams with video elements (MSE/WebRTC), grab directly from video element
- result = await grabVideoSnapshot();
- } else {
- // For detect stream or JSMpeg players, use the API endpoint
- result = await fetchCameraSnapshot(camera.name);
- }
-
- if (result.success) {
- const { dataUrl } = result.data;
- const filename = generateSnapshotFilename(camera.name);
- downloadSnapshot(dataUrl, filename);
- toast.success(t("snapshot.downloadStarted"));
- } else {
- toast.error(t("snapshot.captureFailed"));
- }
- } finally {
- setIsSnapshotLoading(false);
- }
- }, [camera.name, isRestreamed, preferredLiveMode, t]);
-
- useEffect(() => {
- // Handle page unload/close (browser close, tab close, refresh, navigation to external site)
- const handleBeforeUnload = () => {
- if (recordingEventIdRef.current) {
- endEventViaBeacon();
- }
- };
-
- window.addEventListener("beforeunload", handleBeforeUnload);
-
- // ensure manual event is stopped when component unmounts
- return () => {
- window.removeEventListener("beforeunload", handleBeforeUnload);
-
- if (recordingEventIdRef.current) {
- endEvent();
- }
- };
- // mount/unmount only
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, []);
-
- // desktop shows icons part of row
- if (isDesktop || isTablet) {
- return (
- <>
- {isAdmin && (
- <>
- sendEnabled(enabledState == "ON" ? "OFF" : "ON")}
- disabled={debug}
- />
- sendDetect(detectState == "ON" ? "OFF" : "ON")}
- disabled={!cameraEnabled}
- />
- sendRecord(recordState == "ON" ? "OFF" : "ON")}
- disabled={!cameraEnabled}
- />
- sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")}
- disabled={!cameraEnabled}
- />
- {audioDetectEnabled && (
- sendAudio(audioState == "ON" ? "OFF" : "ON")}
- disabled={!cameraEnabled}
- />
- )}
- {audioDetectEnabled && transcriptionEnabled && (
-
- sendTranscription(transcriptionState == "ON" ? "OFF" : "ON")
- }
- disabled={!cameraEnabled || audioState == "OFF"}
- />
- )}
- {autotrackingEnabled && (
-
- sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON")
- }
- disabled={!cameraEnabled}
- />
- )}
- >
- )}
-
-
- {!fullscreen && (
-
-
-
-
-
-
-
-
- {!isRestreamed && (
-
-
- {t("streaming.label", { ns: "components/dialog" })}
-
-
-
-
- {t("streaming.restreaming.disabled", {
- ns: "components/dialog",
- })}
-
-
-
-
-
-
- {t("button.info", { ns: "common" })}
-
-
-
-
- {t("streaming.restreaming.desc.title", {
- ns: "components/dialog",
- })}
-
-
- {t("readTheDocumentation", { ns: "common" })}
-
-
-
-
-
-
-
- )}
- {isRestreamed &&
- Object.values(camera.live.streams).length > 0 && (
-
-
- {t("stream.title")}
-
-
{
- setStreamName?.(value);
- }}
- >
-
-
- {Object.keys(camera.live.streams).find(
- (key) => camera.live.streams[key] === streamName,
- )}
-
-
-
-
-
- {Object.entries(camera.live.streams).map(
- ([stream, name]) => (
-
- {stream}
-
- ),
- )}
-
-
-
-
- {debug && (
-
- <>
-
-
{t("stream.debug.picker")}
- >
-
- )}
-
- {preferredLiveMode != "jsmpeg" &&
- !debug &&
- isRestreamed && (
-
- {supportsAudioOutput ? (
- <>
-
-
{t("stream.audio.available")}
- >
- ) : (
- <>
-
-
{t("stream.audio.unavailable")}
-
-
-
-
-
- {t("button.info", { ns: "common" })}
-
-
-
-
- {t("stream.audio.tips.title")}
-
-
- {t("readTheDocumentation", {
- ns: "common",
- })}
-
-
-
-
-
- >
- )}
-
- )}
- {preferredLiveMode != "jsmpeg" &&
- !debug &&
- isRestreamed &&
- supportsAudioOutput && (
-
- {supports2WayTalk ? (
- <>
-
-
{t("stream.twoWayTalk.available")}
- >
- ) : (
- <>
-
-
{t("stream.twoWayTalk.unavailable")}
-
-
-
-
-
- {t("button.info", { ns: "common" })}
-
-
-
-
- {t("stream.twoWayTalk.tips")}
-
-
- {t("readTheDocumentation", {
- ns: "common",
- })}
-
-
-
-
-
- >
- )}
-
- )}
-
- {preferredLiveMode == "jsmpeg" &&
- !debug &&
- isRestreamed && (
-
-
-
-
-
- {t("stream.lowBandwidth.tips")}
-
-
-
setLowBandwidth(false)}
- >
-
-
- {t("stream.lowBandwidth.resetStream")}
-
-
-
- )}
-
- )}
- {isRestreamed && (
-
-
-
- {t("stream.playInBackground.label")}
-
-
- setPlayInBackground(checked)
- }
- />
-
-
- {t("stream.playInBackground.tips")}
-
-
- )}
-
-
-
- {t("streaming.showStats.label", {
- ns: "components/dialog",
- })}
-
- setShowStats(checked)}
- />
-
-
- {t("streaming.showStats.desc", {
- ns: "components/dialog",
- })}
-
-
-
-
-
- {t("streaming.debugView", {
- ns: "components/dialog",
- })}
-
- setDebug(checked)}
- />
-
-
-
-
-
- )}
- >
- );
- }
-
- // mobile doesn't show settings in fullscreen view
- if (fullscreen) {
- return;
- }
-
- return (
-
-
-
-
-
-
- <>
- {isAdmin && (
- <>
-
- sendEnabled(enabledState == "ON" ? "OFF" : "ON")
- }
- />
-
- sendDetect(detectState == "ON" ? "OFF" : "ON")
- }
- />
- {recordingEnabled && (
-
- sendRecord(recordState == "ON" ? "OFF" : "ON")
- }
- />
- )}
-
- sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")
- }
- />
- {audioDetectEnabled && (
-
- sendAudio(audioState == "ON" ? "OFF" : "ON")
- }
- />
- )}
- {audioDetectEnabled && transcriptionEnabled && (
-
- sendTranscription(
- transcriptionState == "ON" ? "OFF" : "ON",
- )
- }
- />
- )}
- {autotrackingEnabled && (
-
- sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON")
- }
- />
- )}
- >
- )}
-
-
- {!isRestreamed && (
-
-
{t("stream.title")}
-
-
-
- {t("streaming.restreaming.disabled", {
- ns: "components/dialog",
- })}
-
-
-
-
-
-
- {t("button.info", { ns: "common" })}
-
-
-
-
- {t("streaming.restreaming.desc.title", {
- ns: "components/dialog",
- })}
-
-
- {t("readTheDocumentation", { ns: "common" })}
-
-
-
-
-
-
-
- )}
- {isRestreamed &&
- Object.values(camera.live.streams).length > 0 && (
-
-
{t("stream.title")}
-
{
- setStreamName?.(value);
- }}
- disabled={debug}
- >
-
-
- {Object.keys(camera.live.streams).find(
- (key) => camera.live.streams[key] === streamName,
- )}
-
-
-
-
-
- {Object.entries(camera.live.streams).map(
- ([stream, name]) => (
-
- {stream}
-
- ),
- )}
-
-
-
-
- {debug && (
-
- <>
-
-
{t("stream.debug.picker")}
- >
-
- )}
-
- {preferredLiveMode != "jsmpeg" &&
- !debug &&
- isRestreamed && (
-
- {supportsAudioOutput ? (
- <>
-
-
{t("stream.audio.available")}
- >
- ) : (
- <>
-
-
{t("stream.audio.unavailable")}
-
-
-
-
-
- {t("button.info", { ns: "common" })}
-
-
-
-
- {t("stream.audio.tips.title")}
-
-
- {t("readTheDocumentation", {
- ns: "common",
- })}
-
-
-
-
-
- >
- )}
-
- )}
- {preferredLiveMode != "jsmpeg" &&
- !debug &&
- isRestreamed &&
- supportsAudioOutput && (
-
- {supports2WayTalk ? (
- <>
-
-
{t("stream.twoWayTalk.available")}
- >
- ) : (
- <>
-
-
{t("stream.twoWayTalk.unavailable")}
-
-
-
-
-
- {t("button.info", { ns: "common" })}
-
-
-
-
- {t("stream.twoWayTalk.tips")}
-
-
- {t("readTheDocumentation", {
- ns: "common",
- })}
-
-
-
-
-
- >
- )}
-
- )}
- {preferredLiveMode == "jsmpeg" && isRestreamed && (
-
-
-
-
- {t("stream.lowBandwidth.tips")}
-
-
-
setLowBandwidth(false)}
- >
-
-
- {t("stream.lowBandwidth.resetStream")}
-
-
-
- )}
-
- )}
-
-
- {t("manualRecording.title")}
-
-
-
- {isSnapshotLoading && (
-
- )}
- {t("snapshot.takeSnapshot")}
-
-
- {t("manualRecording." + (isRecording ? "end" : "start"))}
-
-
-
- {t("manualRecording.tips")}
-
-
- {isRestreamed && (
- <>
-
-
{
- setPlayInBackground(checked);
- }}
- disabled={debug}
- />
-
- {t("manualRecording.playInBackground.desc")}
-
-
-
-
{
- setShowStats(checked);
- }}
- disabled={debug}
- />
-
- {t("manualRecording.showStats.desc")}
-
-
- >
- )}
-
- setDebug(checked)}
- />
-
-
- >
-
-
-
- );
-}
+import {
+ useAudioLiveTranscription,
+ useAudioState,
+ useAudioTranscriptionState,
+ useAutotrackingState,
+ useDetectState,
+ useEnabledState,
+ usePtzCommand,
+ useRecordingsState,
+ useSnapshotsState,
+} from "@/api/ws";
+import CameraFeatureToggle from "@/components/dynamic/CameraFeatureToggle";
+import FilterSwitch from "@/components/filter/FilterSwitch";
+import LivePlayer from "@/components/player/LivePlayer";
+import { Button } from "@/components/ui/button";
+import { Drawer, DrawerContent, DrawerTrigger } from "@/components/ui/drawer";
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuTrigger,
+} from "@/components/ui/dropdown-menu";
+import {
+ Popover,
+ PopoverContent,
+ PopoverTrigger,
+} from "@/components/ui/popover";
+import { useResizeObserver } from "@/hooks/resize-observer";
+import useKeyboardListener from "@/hooks/use-keyboard-listener";
+import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
+import {
+ LivePlayerError,
+ LiveStreamMetadata,
+ VideoResolutionType,
+} from "@/types/live";
+import { RecordingStartingPoint } from "@/types/record";
+import React, {
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from "react";
+import {
+ isDesktop,
+ isFirefox,
+ isIOS,
+ isMobile,
+ isTablet,
+ useMobileOrientation,
+} from "react-device-detect";
+import {
+ FaCog,
+ FaCompress,
+ FaExpand,
+ FaMicrophone,
+ FaMicrophoneSlash,
+} from "react-icons/fa";
+import { GiSpeaker, GiSpeakerOff } from "react-icons/gi";
+import {
+ TbCameraDown,
+ TbRecordMail,
+ TbRecordMailOff,
+ TbViewfinder,
+ TbViewfinderOff,
+} from "react-icons/tb";
+import { IoIosWarning, IoMdArrowRoundBack } from "react-icons/io";
+import {
+ LuCheck,
+ LuEar,
+ LuEarOff,
+ LuExternalLink,
+ LuHistory,
+ LuInfo,
+ LuPictureInPicture,
+ LuPower,
+ LuPowerOff,
+ LuVideo,
+ LuVideoOff,
+ LuX,
+} from "react-icons/lu";
+import {
+ MdClosedCaption,
+ MdClosedCaptionDisabled,
+ MdNoPhotography,
+ MdOutlineRestartAlt,
+ MdPersonOff,
+ MdPersonSearch,
+ MdPhotoCamera,
+} from "react-icons/md";
+import { Link, useNavigate } from "react-router-dom";
+import { TransformWrapper, TransformComponent } from "react-zoom-pan-pinch";
+import useSWR from "swr";
+import { cn } from "@/lib/utils";
+import { useSessionPersistence } from "@/hooks/use-session-persistence";
+
+import {
+ Select,
+ SelectContent,
+ SelectGroup,
+ SelectItem,
+ SelectTrigger,
+ SelectValue,
+} from "@/components/ui/select";
+import { useUserPersistence } from "@/hooks/use-user-persistence";
+import { Label } from "@/components/ui/label";
+import { Switch } from "@/components/ui/switch";
+import axios from "axios";
+import { toast } from "sonner";
+import { Toaster } from "@/components/ui/sonner";
+import { useIsAdmin } from "@/hooks/use-is-admin";
+import { useTranslation } from "react-i18next";
+import { useDocDomain } from "@/hooks/use-doc-domain";
+import { detectCameraAudioFeatures } from "@/utils/cameraUtil";
+import PtzControlPanel from "@/components/overlay/PtzControlPanel";
+import ObjectSettingsView from "../settings/ObjectSettingsView";
+import { useSearchEffect } from "@/hooks/use-overlay-state";
+import {
+ downloadSnapshot,
+ fetchCameraSnapshot,
+ generateSnapshotFilename,
+ grabVideoSnapshot,
+ SnapshotResult,
+} from "@/utils/snapshotUtil";
+import ActivityIndicator from "@/components/indicators/activity-indicator";
+import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
+import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
+
+type LiveCameraViewProps = {
+ config?: FrigateConfig;
+ camera: CameraConfig;
+ supportsFullscreen: boolean;
+ fullscreen: boolean;
+ toggleFullscreen: () => void;
+};
+export default function LiveCameraView({
+ config,
+ camera,
+ supportsFullscreen,
+ fullscreen,
+ toggleFullscreen,
+}: LiveCameraViewProps) {
+ const { t } = useTranslation(["views/live", "components/dialog"]);
+ const navigate = useNavigate();
+ const { isPortrait } = useMobileOrientation();
+ const mainRef = useRef(null);
+ const containerRef = useRef(null);
+ const [{ width: windowWidth, height: windowHeight }] =
+ useResizeObserver(window);
+ const playbackCapabilities = usePlaybackCapabilities([]);
+ const autoStreamName = useMemo(
+ () =>
+ chooseAutoLiveStream(
+ camera.live.streams,
+ playbackCapabilities.estimatedBandwidthBps,
+ playbackCapabilities.saveData,
+ ),
+ [camera.live.streams, playbackCapabilities],
+ );
+
+ // supported features
+
+ const [streamName, setStreamName, streamNameLoaded] =
+ useUserPersistence(
+ `${camera.name}-stream`,
+ autoStreamName || Object.values(camera.live.streams)[0],
+ );
+
+ const isRestreamed = useMemo(
+ () =>
+ config &&
+ Object.keys(config.go2rtc.streams || {}).includes(streamName ?? ""),
+ [config, streamName],
+ );
+
+ // validate stored stream name and reset if now invalid
+
+ useEffect(() => {
+ if (!streamNameLoaded) return;
+
+ const available = Object.values(camera.live.streams || {});
+ if (available.length === 0) return;
+
+ if (streamName != null && !available.includes(streamName)) {
+ setStreamName(available[0]);
+ }
+ }, [streamNameLoaded, camera.live.streams, streamName, setStreamName]);
+
+ const { data: cameraMetadata } = useSWR(
+ isRestreamed ? `go2rtc/streams/${streamName}` : null,
+ {
+ revalidateOnFocus: false,
+ revalidateOnReconnect: false,
+ revalidateIfStale: false,
+ dedupingInterval: 60000,
+ },
+ );
+
+ const { twoWayAudio: supports2WayTalk, audioOutput: supportsAudioOutput } =
+ useMemo(() => detectCameraAudioFeatures(cameraMetadata), [cameraMetadata]);
+
+ // camera enabled state
+ const { payload: enabledState } = useEnabledState(camera.name);
+ const cameraEnabled = enabledState === "ON";
+
+ // for audio transcriptions
+
+ const { payload: audioTranscriptionState, send: sendTranscription } =
+ useAudioTranscriptionState(camera.name);
+ const { payload: transcription } = useAudioLiveTranscription(camera.name);
+ const transcriptionRef = useRef(null);
+
+ useEffect(() => {
+ if (transcription) {
+ if (transcriptionRef.current) {
+ transcriptionRef.current.scrollTop =
+ transcriptionRef.current.scrollHeight;
+ }
+ }
+ }, [transcription]);
+
+ useEffect(() => {
+ return () => {
+ // disable transcriptions when unmounting
+ if (audioTranscriptionState == "ON") sendTranscription("OFF");
+ };
+ }, [audioTranscriptionState, sendTranscription]);
+
+ // click overlay for ptzs
+
+ const [clickOverlay, setClickOverlay] = useState(false);
+ const clickOverlayRef = useRef(null);
+ const { send: sendPtz } = usePtzCommand(camera.name);
+
+ const handleOverlayClick = useCallback(
+ (
+ e: React.MouseEvent | React.TouchEvent,
+ ) => {
+ if (!clickOverlay) {
+ return;
+ }
+
+ let clientX;
+ let clientY;
+ if ("TouchEvent" in window && e.nativeEvent instanceof TouchEvent) {
+ clientX = e.nativeEvent.touches[0].clientX;
+ clientY = e.nativeEvent.touches[0].clientY;
+ } else if (e.nativeEvent instanceof MouseEvent) {
+ clientX = e.nativeEvent.clientX;
+ clientY = e.nativeEvent.clientY;
+ }
+
+ if (clickOverlayRef.current && clientX && clientY) {
+ const rect = clickOverlayRef.current.getBoundingClientRect();
+
+ const normalizedX = (clientX - rect.left) / rect.width;
+ const normalizedY = (clientY - rect.top) / rect.height;
+
+ const pan = (normalizedX - 0.5) * 2;
+ const tilt = (0.5 - normalizedY) * 2;
+
+ sendPtz(`move_relative_${pan}_${tilt}`);
+ }
+ },
+ [clickOverlayRef, clickOverlay, sendPtz],
+ );
+
+ // pip state
+
+ useEffect(() => {
+ setPip(document.pictureInPictureElement != null);
+ // we know that these deps are correct
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [document.pictureInPictureElement]);
+
+ // playback state
+
+ const [audio, setAudio] = useSessionPersistence("liveAudio", false);
+ const [mic, setMic] = useState(false);
+ const [webRTC, setWebRTC] = useState(false);
+ const [pip, setPip] = useState(false);
+ const [lowBandwidth, setLowBandwidth] = useState(false);
+
+ const [playInBackground, setPlayInBackground] = useUserPersistence(
+ `${camera.name}-background-play`,
+ false,
+ );
+
+ const [showStats, setShowStats] = useState(false);
+ const [debug, setDebug] = useState(false);
+
+ useSearchEffect("debug", (value: string) => {
+ if (value === "true") {
+ setDebug(true);
+ }
+
+ return true;
+ });
+
+ const [fullResolution, setFullResolution] = useState({
+ width: 0,
+ height: 0,
+ });
+
+ const preferredLiveMode = useMemo(() => {
+ if (mic) {
+ return "webrtc";
+ }
+
+ if (webRTC && isRestreamed) {
+ return "webrtc";
+ }
+
+ if (webRTC && !isRestreamed) {
+ return "jsmpeg";
+ }
+
+ if (lowBandwidth) {
+ return "jsmpeg";
+ }
+
+ if (!("MediaSource" in window || "ManagedMediaSource" in window)) {
+ return "webrtc";
+ }
+
+ if (!isRestreamed) {
+ return "jsmpeg";
+ }
+
+ return "mse";
+ }, [lowBandwidth, mic, webRTC, isRestreamed]);
+
+ useKeyboardListener(["m"], (key, modifiers) => {
+ if (!modifiers.down) {
+ return true;
+ }
+
+ switch (key) {
+ case "m":
+ if (supportsAudioOutput) {
+ setAudio(!audio);
+ return true;
+ }
+ break;
+ case "t":
+ if (supports2WayTalk) {
+ setMic(!mic);
+ return true;
+ }
+ break;
+ }
+
+ return false;
+ });
+
+ // layout state
+
+ const windowAspectRatio = useMemo(() => {
+ return windowWidth / windowHeight;
+ }, [windowWidth, windowHeight]);
+
+ const containerAspectRatio = useMemo(() => {
+ if (!containerRef.current) {
+ return windowAspectRatio;
+ }
+
+ return containerRef.current.clientWidth / containerRef.current.clientHeight;
+ }, [windowAspectRatio, containerRef]);
+
+ const cameraAspectRatio = useMemo(() => {
+ if (fullResolution.width && fullResolution.height) {
+ return fullResolution.width / fullResolution.height;
+ } else {
+ return camera.detect.width / camera.detect.height;
+ }
+ }, [camera, fullResolution]);
+
+ const constrainedAspectRatio = useMemo(() => {
+ if (isMobile || fullscreen) {
+ return cameraAspectRatio;
+ } else {
+ return containerAspectRatio < cameraAspectRatio
+ ? containerAspectRatio
+ : cameraAspectRatio;
+ }
+ }, [cameraAspectRatio, containerAspectRatio, fullscreen]);
+
+ const growClassName = useMemo(() => {
+ if (isMobile) {
+ if (isPortrait) {
+ return "absolute left-0.5 right-0.5 top-[50%] -translate-y-[50%]";
+ } else {
+ if (cameraAspectRatio > containerAspectRatio) {
+ return "p-2 absolute left-0 top-[50%] -translate-y-[50%]";
+ } else {
+ return "p-2 absolute top-0.5 bottom-0.5 left-[50%] -translate-x-[50%]";
+ }
+ }
+ }
+
+ if (fullscreen) {
+ if (cameraAspectRatio > containerAspectRatio) {
+ return "absolute inset-x-2 top-[50%] -translate-y-[50%]";
+ } else {
+ return "absolute inset-y-2 left-[50%] -translate-x-[50%]";
+ }
+ } else {
+ return "absolute top-0.5 bottom-0.5 left-[50%] -translate-x-[50%]";
+ }
+ }, [fullscreen, isPortrait, cameraAspectRatio, containerAspectRatio]);
+
+ // On mobile devices that support it, try to orient screen
+ // to best fit the camera feed in fullscreen mode
+ useEffect(() => {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const screenOrientation = screen.orientation as any;
+ if (!screenOrientation?.lock || !screenOrientation?.unlock) {
+ // Browser does not support ScreenOrientation APIs that we need
+ return;
+ }
+
+ if (fullscreen) {
+ const orientationForBestFit =
+ cameraAspectRatio > 1 ? "landscape" : "portrait";
+
+ // If the current device doesn't support locking orientation,
+ // this promise will reject with an error that we can ignore
+ screenOrientation.lock(orientationForBestFit).catch(() => {});
+ }
+
+ return () => screenOrientation.unlock();
+ }, [fullscreen, cameraAspectRatio]);
+
+ const handleError = useCallback(
+ (e: LivePlayerError) => {
+ if (e) {
+ if (
+ !webRTC &&
+ config &&
+ config.go2rtc?.webrtc?.candidates?.length > 0
+ ) {
+ setWebRTC(true);
+ } else {
+ setWebRTC(false);
+ setLowBandwidth(true);
+ }
+ }
+ },
+ [config, webRTC],
+ );
+
+ return (
+
+
+
+
+ {!fullscreen ? (
+
+
navigate(-1)}
+ >
+
+ {isDesktop && (
+
+ {t("button.back", { ns: "common" })}
+
+ )}
+
+
{
+ navigate("review", {
+ state: {
+ severity: "alert",
+ recording: {
+ camera: camera.name,
+ startTime: Date.now() / 1000 - 30,
+ severity: "alert",
+ } as RecordingStartingPoint,
+ },
+ });
+ }}
+ >
+
+ {isDesktop && (
+
+ {t("button.history", { ns: "common" })}
+
+ )}
+
+
+ ) : (
+
+ )}
+
+ {fullscreen && (
+
navigate(-1)}
+ >
+
+ {isDesktop && (
+
+ {t("button.back", { ns: "common" })}
+
+ )}
+
+ )}
+ {supportsFullscreen && (
+
+ )}
+ {!isIOS && !isFirefox && preferredLiveMode != "jsmpeg" && (
+
{
+ if (!pip) {
+ setPip(true);
+ } else {
+ document.exitPictureInPicture();
+ setPip(false);
+ }
+ }}
+ disabled={!cameraEnabled || debug}
+ />
+ )}
+ {supports2WayTalk && (
+ {
+ setMic(!mic);
+ if (!mic && !audio) {
+ setAudio(true);
+ }
+ }}
+ disabled={!cameraEnabled || debug}
+ />
+ )}
+ {supportsAudioOutput && preferredLiveMode != "jsmpeg" && (
+ setAudio(!audio)}
+ disabled={!cameraEnabled || debug}
+ />
+ )}
+
+
+
+ {!debug ? (
+
+
+
+
+
+
+ {camera?.audio?.enabled_in_config &&
+ audioTranscriptionState == "ON" &&
+ transcription != null && (
+
+ {transcription}
+
+ )}
+
+ ) : (
+
+
+
+ )}
+
+ {camera.onvif.host != "" && (
+
+ )}
+
+ );
+}
+
+type FrigateCameraFeaturesProps = {
+ camera: CameraConfig;
+ recordingEnabled: boolean;
+ audioDetectEnabled: boolean;
+ autotrackingEnabled: boolean;
+ transcriptionEnabled: boolean;
+ fullscreen: boolean;
+ streamName: string;
+ setStreamName?: (value: string | undefined) => void;
+ preferredLiveMode: string;
+ playInBackground: boolean;
+ setPlayInBackground: (value: boolean | undefined) => void;
+ showStats: boolean;
+ setShowStats: (value: boolean) => void;
+ isRestreamed: boolean;
+ setLowBandwidth: React.Dispatch>;
+ supportsAudioOutput: boolean;
+ supports2WayTalk: boolean;
+ cameraEnabled: boolean;
+ debug: boolean;
+ setDebug: (debug: boolean) => void;
+};
+function FrigateCameraFeatures({
+ camera,
+ recordingEnabled,
+ audioDetectEnabled,
+ autotrackingEnabled,
+ transcriptionEnabled,
+ fullscreen,
+ streamName,
+ setStreamName,
+ preferredLiveMode,
+ playInBackground,
+ setPlayInBackground,
+ showStats,
+ setShowStats,
+ isRestreamed,
+ setLowBandwidth,
+ supportsAudioOutput,
+ supports2WayTalk,
+ cameraEnabled,
+ debug,
+ setDebug,
+}: FrigateCameraFeaturesProps) {
+ const { t } = useTranslation(["views/live", "components/dialog"]);
+ const { getLocaleDocUrl } = useDocDomain();
+
+ const { payload: detectState, send: sendDetect } = useDetectState(
+ camera.name,
+ );
+ const { payload: enabledState, send: sendEnabled } = useEnabledState(
+ camera.name,
+ );
+ const { payload: recordState, send: sendRecord } = useRecordingsState(
+ camera.name,
+ );
+ const { payload: snapshotState, send: sendSnapshot } = useSnapshotsState(
+ camera.name,
+ );
+ const { payload: audioState, send: sendAudio } = useAudioState(camera.name);
+ const { payload: autotrackingState, send: sendAutotracking } =
+ useAutotrackingState(camera.name);
+ const { payload: transcriptionState, send: sendTranscription } =
+ useAudioTranscriptionState(camera.name);
+
+ // roles
+
+ const isAdmin = useIsAdmin();
+
+ // manual event
+
+ const recordingEventIdRef = useRef(null);
+ const [isRecording, setIsRecording] = useState(false);
+ const [activeToastId, setActiveToastId] = useState(
+ null,
+ );
+
+ const createEvent = useCallback(async () => {
+ try {
+ const response = await axios.post(
+ `events/${camera.name}/on_demand/create`,
+ {
+ include_recording: true,
+ duration: null,
+ },
+ );
+
+ if (response.data.success) {
+ recordingEventIdRef.current = response.data.event_id;
+ setIsRecording(true);
+ const toastId = toast.success(
+
+
{t("manualRecording.started")}
+ {!camera.record.enabled ||
+ (camera.record.alerts.retain.days == 0 && (
+
{t("manualRecording.recordDisabledTips")}
+ ))}
+
,
+ {
+ position: "top-center",
+ duration: 10000,
+ },
+ );
+ setActiveToastId(toastId);
+ }
+ } catch (error) {
+ toast.error(t("manualRecording.failedToStart"), {
+ position: "top-center",
+ });
+ }
+ }, [camera, t]);
+
+ const endEvent = useCallback(() => {
+ if (activeToastId) {
+ toast.dismiss(activeToastId);
+ }
+ try {
+ if (recordingEventIdRef.current) {
+ axios.put(`events/${recordingEventIdRef.current}/end`, {
+ end_time: Math.ceil(Date.now() / 1000),
+ });
+ recordingEventIdRef.current = null;
+ setIsRecording(false);
+ toast.success(t("manualRecording.ended"), {
+ position: "top-center",
+ });
+ }
+ } catch (error) {
+ toast.error(t("manualRecording.failedToEnd"), {
+ position: "top-center",
+ });
+ }
+ }, [activeToastId, t]);
+
+ const endEventViaBeacon = useCallback(() => {
+ if (!recordingEventIdRef.current) return;
+
+ const url = `${window.location.origin}/api/events/${recordingEventIdRef.current}/end`;
+ const payload = JSON.stringify({
+ end_time: Math.ceil(Date.now() / 1000),
+ });
+
+ // this needs to be a synchronous XMLHttpRequest to guarantee the PUT
+ // reaches the server before the browser kills the page
+ const xhr = new XMLHttpRequest();
+ try {
+ xhr.open("PUT", url, false);
+ xhr.setRequestHeader("Content-Type", "application/json");
+ xhr.setRequestHeader("X-CSRF-TOKEN", "1");
+ xhr.setRequestHeader("X-CACHE-BYPASS", "1");
+ xhr.withCredentials = true;
+ xhr.send(payload);
+ } catch (e) {
+ // Silently ignore errors during unload
+ }
+ }, []);
+
+ const handleEventButtonClick = useCallback(() => {
+ if (isRecording) {
+ endEvent();
+ } else {
+ createEvent();
+ }
+ }, [createEvent, endEvent, isRecording]);
+
+ const [isSnapshotLoading, setIsSnapshotLoading] = useState(false);
+
+ const handleSnapshotClick = useCallback(async () => {
+ setIsSnapshotLoading(true);
+ try {
+ let result: SnapshotResult;
+
+ if (isRestreamed && preferredLiveMode !== "jsmpeg") {
+ // For restreamed streams with video elements (MSE/WebRTC), grab directly from video element
+ result = await grabVideoSnapshot();
+ } else {
+ // For detect stream or JSMpeg players, use the API endpoint
+ result = await fetchCameraSnapshot(camera.name);
+ }
+
+ if (result.success) {
+ const { dataUrl } = result.data;
+ const filename = generateSnapshotFilename(camera.name);
+ downloadSnapshot(dataUrl, filename);
+ toast.success(t("snapshot.downloadStarted"));
+ } else {
+ toast.error(t("snapshot.captureFailed"));
+ }
+ } finally {
+ setIsSnapshotLoading(false);
+ }
+ }, [camera.name, isRestreamed, preferredLiveMode, t]);
+
+ useEffect(() => {
+ // Handle page unload/close (browser close, tab close, refresh, navigation to external site)
+ const handleBeforeUnload = () => {
+ if (recordingEventIdRef.current) {
+ endEventViaBeacon();
+ }
+ };
+
+ window.addEventListener("beforeunload", handleBeforeUnload);
+
+ // ensure manual event is stopped when component unmounts
+ return () => {
+ window.removeEventListener("beforeunload", handleBeforeUnload);
+
+ if (recordingEventIdRef.current) {
+ endEvent();
+ }
+ };
+ // mount/unmount only
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
+ // desktop shows icons part of row
+ if (isDesktop || isTablet) {
+ return (
+ <>
+ {isAdmin && (
+ <>
+ sendEnabled(enabledState == "ON" ? "OFF" : "ON")}
+ disabled={debug}
+ />
+ sendDetect(detectState == "ON" ? "OFF" : "ON")}
+ disabled={!cameraEnabled}
+ />
+ sendRecord(recordState == "ON" ? "OFF" : "ON")}
+ disabled={!cameraEnabled}
+ />
+ sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")}
+ disabled={!cameraEnabled}
+ />
+ {audioDetectEnabled && (
+ sendAudio(audioState == "ON" ? "OFF" : "ON")}
+ disabled={!cameraEnabled}
+ />
+ )}
+ {audioDetectEnabled && transcriptionEnabled && (
+
+ sendTranscription(transcriptionState == "ON" ? "OFF" : "ON")
+ }
+ disabled={!cameraEnabled || audioState == "OFF"}
+ />
+ )}
+ {autotrackingEnabled && (
+
+ sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON")
+ }
+ disabled={!cameraEnabled}
+ />
+ )}
+ >
+ )}
+
+
+ {!fullscreen && (
+
+
+
+
+
+
+
+
+ {!isRestreamed && (
+
+
+ {t("streaming.label", { ns: "components/dialog" })}
+
+
+
+
+ {t("streaming.restreaming.disabled", {
+ ns: "components/dialog",
+ })}
+
+
+
+
+
+
+ {t("button.info", { ns: "common" })}
+
+
+
+
+ {t("streaming.restreaming.desc.title", {
+ ns: "components/dialog",
+ })}
+
+
+ {t("readTheDocumentation", { ns: "common" })}
+
+
+
+
+
+
+
+ )}
+ {isRestreamed &&
+ Object.values(camera.live.streams).length > 0 && (
+
+
+ {t("stream.title")}
+
+
{
+ setStreamName?.(value);
+ }}
+ >
+
+
+ {Object.keys(camera.live.streams).find(
+ (key) => camera.live.streams[key] === streamName,
+ )}
+
+
+
+
+
+ {Object.entries(camera.live.streams).map(
+ ([stream, name]) => (
+
+ {stream}
+
+ ),
+ )}
+
+
+
+
+ {debug && (
+
+ <>
+
+
{t("stream.debug.picker")}
+ >
+
+ )}
+
+ {preferredLiveMode != "jsmpeg" &&
+ !debug &&
+ isRestreamed && (
+
+ {supportsAudioOutput ? (
+ <>
+
+
{t("stream.audio.available")}
+ >
+ ) : (
+ <>
+
+
{t("stream.audio.unavailable")}
+
+
+
+
+
+ {t("button.info", { ns: "common" })}
+
+
+
+
+ {t("stream.audio.tips.title")}
+
+
+ {t("readTheDocumentation", {
+ ns: "common",
+ })}
+
+
+
+
+
+ >
+ )}
+
+ )}
+ {preferredLiveMode != "jsmpeg" &&
+ !debug &&
+ isRestreamed &&
+ supportsAudioOutput && (
+
+ {supports2WayTalk ? (
+ <>
+
+
{t("stream.twoWayTalk.available")}
+ >
+ ) : (
+ <>
+
+
{t("stream.twoWayTalk.unavailable")}
+
+
+
+
+
+ {t("button.info", { ns: "common" })}
+
+
+
+
+ {t("stream.twoWayTalk.tips")}
+
+
+ {t("readTheDocumentation", {
+ ns: "common",
+ })}
+
+
+
+
+
+ >
+ )}
+
+ )}
+
+ {preferredLiveMode == "jsmpeg" &&
+ !debug &&
+ isRestreamed && (
+
+
+
+
+
+ {t("stream.lowBandwidth.tips")}
+
+
+
setLowBandwidth(false)}
+ >
+
+
+ {t("stream.lowBandwidth.resetStream")}
+
+
+
+ )}
+
+ )}
+ {isRestreamed && (
+
+
+
+ {t("stream.playInBackground.label")}
+
+
+ setPlayInBackground(checked)
+ }
+ />
+
+
+ {t("stream.playInBackground.tips")}
+
+
+ )}
+
+
+
+ {t("streaming.showStats.label", {
+ ns: "components/dialog",
+ })}
+
+ setShowStats(checked)}
+ />
+
+
+ {t("streaming.showStats.desc", {
+ ns: "components/dialog",
+ })}
+
+
+
+
+
+ {t("streaming.debugView", {
+ ns: "components/dialog",
+ })}
+
+ setDebug(checked)}
+ />
+
+
+
+
+
+ )}
+ >
+ );
+ }
+
+ // mobile doesn't show settings in fullscreen view
+ if (fullscreen) {
+ return;
+ }
+
+ return (
+
+
+
+
+
+
+ <>
+ {isAdmin && (
+ <>
+
+ sendEnabled(enabledState == "ON" ? "OFF" : "ON")
+ }
+ />
+
+ sendDetect(detectState == "ON" ? "OFF" : "ON")
+ }
+ />
+ {recordingEnabled && (
+
+ sendRecord(recordState == "ON" ? "OFF" : "ON")
+ }
+ />
+ )}
+
+ sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")
+ }
+ />
+ {audioDetectEnabled && (
+
+ sendAudio(audioState == "ON" ? "OFF" : "ON")
+ }
+ />
+ )}
+ {audioDetectEnabled && transcriptionEnabled && (
+
+ sendTranscription(
+ transcriptionState == "ON" ? "OFF" : "ON",
+ )
+ }
+ />
+ )}
+ {autotrackingEnabled && (
+
+ sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON")
+ }
+ />
+ )}
+ >
+ )}
+
+
+ {!isRestreamed && (
+
+
{t("stream.title")}
+
+
+
+ {t("streaming.restreaming.disabled", {
+ ns: "components/dialog",
+ })}
+
+
+
+
+
+
+ {t("button.info", { ns: "common" })}
+
+
+
+
+ {t("streaming.restreaming.desc.title", {
+ ns: "components/dialog",
+ })}
+
+
+ {t("readTheDocumentation", { ns: "common" })}
+
+
+
+
+
+
+
+ )}
+ {isRestreamed &&
+ Object.values(camera.live.streams).length > 0 && (
+
+
{t("stream.title")}
+
{
+ setStreamName?.(value);
+ }}
+ disabled={debug}
+ >
+
+
+ {Object.keys(camera.live.streams).find(
+ (key) => camera.live.streams[key] === streamName,
+ )}
+
+
+
+
+
+ {Object.entries(camera.live.streams).map(
+ ([stream, name]) => (
+
+ {stream}
+
+ ),
+ )}
+
+
+
+
+ {debug && (
+
+ <>
+
+
{t("stream.debug.picker")}
+ >
+
+ )}
+
+ {preferredLiveMode != "jsmpeg" &&
+ !debug &&
+ isRestreamed && (
+
+ {supportsAudioOutput ? (
+ <>
+
+
{t("stream.audio.available")}
+ >
+ ) : (
+ <>
+
+
{t("stream.audio.unavailable")}
+
+
+
+
+
+ {t("button.info", { ns: "common" })}
+
+
+
+
+ {t("stream.audio.tips.title")}
+
+
+ {t("readTheDocumentation", {
+ ns: "common",
+ })}
+
+
+
+
+
+ >
+ )}
+
+ )}
+ {preferredLiveMode != "jsmpeg" &&
+ !debug &&
+ isRestreamed &&
+ supportsAudioOutput && (
+
+ {supports2WayTalk ? (
+ <>
+
+
{t("stream.twoWayTalk.available")}
+ >
+ ) : (
+ <>
+
+
{t("stream.twoWayTalk.unavailable")}
+
+
+
+
+
+ {t("button.info", { ns: "common" })}
+
+
+
+
+ {t("stream.twoWayTalk.tips")}
+
+
+ {t("readTheDocumentation", {
+ ns: "common",
+ })}
+
+
+
+
+
+ >
+ )}
+
+ )}
+ {preferredLiveMode == "jsmpeg" && isRestreamed && (
+
+
+
+
+ {t("stream.lowBandwidth.tips")}
+
+
+
setLowBandwidth(false)}
+ >
+
+
+ {t("stream.lowBandwidth.resetStream")}
+
+
+
+ )}
+
+ )}
+
+
+ {t("manualRecording.title")}
+
+
+
+ {isSnapshotLoading && (
+
+ )}
+ {t("snapshot.takeSnapshot")}
+
+
+ {t("manualRecording." + (isRecording ? "end" : "start"))}
+
+
+
+ {t("manualRecording.tips")}
+
+
+ {isRestreamed && (
+ <>
+
+
{
+ setPlayInBackground(checked);
+ }}
+ disabled={debug}
+ />
+
+ {t("manualRecording.playInBackground.desc")}
+
+
+
+
{
+ setShowStats(checked);
+ }}
+ disabled={debug}
+ />
+
+ {t("manualRecording.showStats.desc")}
+
+
+ >
+ )}
+
+ setDebug(checked)}
+ />
+
+
+ >
+
+
+
+ );
+}
diff --git a/web/src/views/live/LiveDashboardView.tsx b/web/src/views/live/LiveDashboardView.tsx
index a25741f63..96ae0d359 100644
--- a/web/src/views/live/LiveDashboardView.tsx
+++ b/web/src/views/live/LiveDashboardView.tsx
@@ -1,712 +1,719 @@
-import { useFrigateReviews } from "@/api/ws";
-import Logo from "@/components/Logo";
-import { CameraGroupSelector } from "@/components/filter/CameraGroupSelector";
-import { LiveGridIcon, LiveListIcon } from "@/components/icons/LiveIcons";
-import { AnimatedEventCard } from "@/components/card/AnimatedEventCard";
-import BirdseyeLivePlayer from "@/components/player/BirdseyeLivePlayer";
-import LivePlayer from "@/components/player/LivePlayer";
-import { Button } from "@/components/ui/button";
-import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
-import {
- Tooltip,
- TooltipContent,
- TooltipProvider,
- TooltipTrigger,
-} from "@/components/ui/tooltip";
-import { useUserPersistence } from "@/hooks/use-user-persistence";
-import {
- AllGroupsStreamingSettings,
- CameraConfig,
- FrigateConfig,
-} from "@/types/frigateConfig";
-import { ReviewSegment } from "@/types/review";
-import {
- useCallback,
- useContext,
- useEffect,
- useMemo,
- useRef,
- useState,
-} from "react";
-import {
- isDesktop,
- isMobile,
- isMobileOnly,
- isTablet,
-} from "react-device-detect";
-import useSWR from "swr";
-import DraggableGridLayout from "./DraggableGridLayout";
-import { IoClose } from "react-icons/io5";
-import { LuLayoutDashboard } from "react-icons/lu";
-import { cn } from "@/lib/utils";
-import {
- AudioState,
- LivePlayerError,
- StatsState,
- VolumeState,
-} from "@/types/live";
-import { FaCompress, FaExpand } from "react-icons/fa";
-import useCameraLiveMode from "@/hooks/use-camera-live-mode";
-import { useResizeObserver } from "@/hooks/resize-observer";
-import LiveContextMenu from "@/components/menu/LiveContextMenu";
-import { useStreamingSettings } from "@/context/streaming-settings-provider";
-import { useTranslation } from "react-i18next";
-import { EmptyCard } from "@/components/card/EmptyCard";
-import { BsFillCameraVideoOffFill } from "react-icons/bs";
-import { AuthContext } from "@/context/auth-context";
-import { useIsAdmin } from "@/hooks/use-is-admin";
-
-type LiveDashboardViewProps = {
- cameras: CameraConfig[];
- cameraGroup: string;
- includeBirdseye: boolean;
- onSelectCamera: (camera: string) => void;
- fullscreen: boolean;
- toggleFullscreen: () => void;
-};
-export default function LiveDashboardView({
- cameras,
- cameraGroup,
- includeBirdseye,
- onSelectCamera,
- fullscreen,
- toggleFullscreen,
-}: LiveDashboardViewProps) {
- const { t } = useTranslation(["views/live"]);
-
- const { data: config } = useSWR("config");
-
- // layout
-
- const [mobileLayout, setMobileLayout] = useUserPersistence<"grid" | "list">(
- "live-layout",
- isDesktop ? "grid" : "list",
- );
-
- const [isEditMode, setIsEditMode] = useState(false);
- const containerRef = useRef(null);
- const birdseyeContainerRef = useRef(null);
-
- // recent events
-
- const eventUpdate = useFrigateReviews();
-
- const alertCameras = useMemo(() => {
- if (!config) {
- return null;
- }
-
- if (cameraGroup == "default") {
- return Object.values(config.cameras)
- .filter((cam) => cam.ui.dashboard)
- .map((cam) => cam.name)
- .join(",");
- }
-
- if (includeBirdseye && cameras.length == 0) {
- return Object.values(config.cameras)
- .filter((cam) => cam.birdseye.enabled)
- .map((cam) => cam.name)
- .join(",");
- }
-
- return cameras
- .map((cam) => cam.name)
- .filter((cam) => config.camera_groups[cameraGroup]?.cameras.includes(cam))
- .join(",");
- }, [cameras, cameraGroup, config, includeBirdseye]);
-
- const { data: allEvents, mutate: updateEvents } = useSWR([
- "review",
- {
- limit: 10,
- severity: "alert",
- reviewed: 0,
- cameras: alertCameras,
- },
- ]);
-
- useEffect(() => {
- if (!eventUpdate) {
- return;
- }
-
- // if event is ended and was saved, update events list
- if (eventUpdate.after.severity == "alert") {
- if (
- eventUpdate.type == "end" ||
- eventUpdate.type == "new" ||
- eventUpdate.type == "genai"
- ) {
- setTimeout(
- () => updateEvents(),
- eventUpdate.type == "end" ? 1000 : 6000,
- );
- } else if (
- eventUpdate.before.data.objects.length <
- eventUpdate.after.data.objects.length
- ) {
- setTimeout(() => updateEvents(), 5000);
- }
-
- return;
- }
- }, [eventUpdate, updateEvents]);
-
- const events = useMemo(() => {
- if (!allEvents) {
- return [];
- }
-
- const date = new Date();
- date.setHours(date.getHours() - 1);
- const cutoff = date.getTime() / 1000;
- return allEvents.filter((event) => event.start_time > cutoff);
- }, [allEvents]);
-
- // camera live views
-
- const [{ height: containerHeight }] = useResizeObserver(containerRef);
-
- const hasScrollbar = useMemo(() => {
- if (containerHeight && containerRef.current) {
- return (
- containerRef.current.offsetHeight < containerRef.current.scrollHeight
- );
- }
- }, [containerRef, containerHeight]);
-
- const [windowVisible, setWindowVisible] = useState(true);
- const visibilityListener = useCallback(() => {
- setWindowVisible(document.visibilityState == "visible");
- }, []);
-
- useEffect(() => {
- addEventListener("visibilitychange", visibilityListener);
-
- return () => {
- removeEventListener("visibilitychange", visibilityListener);
- };
- }, [visibilityListener]);
-
- const [visibleCameras, setVisibleCameras] = useState([]);
- const visibleCameraObserver = useRef(null);
- useEffect(() => {
- const visibleCameras = new Set();
- visibleCameraObserver.current = new IntersectionObserver(
- (entries) => {
- entries.forEach((entry) => {
- const camera = (entry.target as HTMLElement).dataset.camera;
-
- if (!camera) {
- return;
- }
-
- if (entry.isIntersecting) {
- visibleCameras.add(camera);
- } else {
- visibleCameras.delete(camera);
- }
-
- setVisibleCameras([...visibleCameras]);
- });
- },
- { threshold: 0.5 },
- );
-
- return () => {
- visibleCameraObserver.current?.disconnect();
- };
- }, []);
-
- const [globalAutoLive] = useUserPersistence("autoLiveView", true);
- const [displayCameraNames] = useUserPersistence("displayCameraNames", false);
-
- const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
- useStreamingSettings();
-
- const currentGroupStreamingSettings = useMemo(() => {
- if (cameraGroup && cameraGroup != "default" && allGroupsStreamingSettings) {
- return allGroupsStreamingSettings[cameraGroup];
- }
- }, [allGroupsStreamingSettings, cameraGroup]);
-
- const cameraRef = useCallback(
- (node: HTMLElement | null) => {
- if (!visibleCameraObserver.current) {
- return;
- }
-
- try {
- if (node) visibleCameraObserver.current.observe(node);
- } catch (e) {
- // no op
- }
- },
- // we need to listen on the value of the ref
- // eslint-disable-next-line react-hooks/exhaustive-deps
- [visibleCameraObserver.current],
- );
-
- const activeStreams = useMemo(() => {
- const streams: { [cameraName: string]: string } = {};
- cameras.forEach((camera) => {
- const availableStreams = camera.live.streams || {};
- const streamNameFromSettings =
- currentGroupStreamingSettings?.[camera.name]?.streamName || "";
- const streamExists =
- streamNameFromSettings &&
- Object.values(availableStreams).includes(streamNameFromSettings);
-
- const streamName = streamExists
- ? streamNameFromSettings
- : Object.values(availableStreams)[0] || "";
-
- streams[camera.name] = streamName;
- });
- return streams;
- }, [cameras, currentGroupStreamingSettings]);
-
- const {
- preferredLiveModes,
- setPreferredLiveModes,
- resetPreferredLiveMode,
- isRestreamedStates,
- supportsAudioOutputStates,
- streamMetadata,
- } = useCameraLiveMode(cameras, windowVisible, activeStreams);
-
- const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
-
- const handleError = useCallback(
- (cameraName: string, error: LivePlayerError) => {
- setPreferredLiveModes((prevModes) => {
- const newModes = { ...prevModes };
- if (error === "mse-decode") {
- newModes[cameraName] = "webrtc";
- } else {
- newModes[cameraName] = "jsmpeg";
- }
- return newModes;
- });
- },
- [setPreferredLiveModes],
- );
-
- // audio states
-
- const [audioStates, setAudioStates] = useState({});
- const [volumeStates, setVolumeStates] = useState({});
- const [statsStates, setStatsStates] = useState({});
-
- const toggleStats = (cameraName: string): void => {
- setStatsStates((prev) => ({
- ...prev,
- [cameraName]: !prev[cameraName],
- }));
- };
-
- useEffect(() => {
- if (!allGroupsStreamingSettings) {
- return;
- }
-
- const initialAudioStates: AudioState = {};
- const initialVolumeStates: VolumeState = {};
-
- Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
- if (groupSettings) {
- Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
- initialAudioStates[camera] = cameraSettings.playAudio ?? false;
- initialVolumeStates[camera] = cameraSettings.volume ?? 1;
- });
- }
- });
-
- setAudioStates(initialAudioStates);
- setVolumeStates(initialVolumeStates);
- }, [allGroupsStreamingSettings]);
-
- const toggleAudio = (cameraName: string): void => {
- setAudioStates((prev) => ({
- ...prev,
- [cameraName]: !prev[cameraName],
- }));
- };
-
- const onSaveMuting = useCallback(
- (playAudio: boolean) => {
- if (
- !cameraGroup ||
- !allGroupsStreamingSettings ||
- cameraGroup == "default"
- ) {
- return;
- }
-
- const existingGroupSettings =
- allGroupsStreamingSettings[cameraGroup] || {};
-
- const updatedSettings: AllGroupsStreamingSettings = {
- ...Object.fromEntries(
- Object.entries(allGroupsStreamingSettings || {}).filter(
- ([key]) => key !== cameraGroup,
- ),
- ),
- [cameraGroup]: {
- ...existingGroupSettings,
- ...Object.fromEntries(
- Object.entries(existingGroupSettings).map(
- ([cameraName, settings]) => [
- cameraName,
- {
- ...settings,
- playAudio: playAudio,
- },
- ],
- ),
- ),
- },
- };
-
- setAllGroupsStreamingSettings?.(updatedSettings);
- },
- [cameraGroup, allGroupsStreamingSettings, setAllGroupsStreamingSettings],
- );
-
- const muteAll = (): void => {
- const updatedStates: Record = {};
- visibleCameras.forEach((cameraName) => {
- updatedStates[cameraName] = false;
- });
- setAudioStates(updatedStates);
- onSaveMuting(false);
- };
-
- const unmuteAll = (): void => {
- const updatedStates: Record = {};
- visibleCameras.forEach((cameraName) => {
- updatedStates[cameraName] = true;
- });
- setAudioStates(updatedStates);
- onSaveMuting(true);
- };
-
- return (
-
- {isMobile && (
-
-
-
-
-
- {(!cameraGroup || cameraGroup == "default" || isMobileOnly) && (
-
- setMobileLayout("grid")}
- >
-
-
- setMobileLayout("list")}
- >
-
-
-
- )}
- {cameraGroup && cameraGroup !== "default" && isTablet && (
-
-
- setIsEditMode((prevIsEditMode) => !prevIsEditMode)
- }
- >
- {isEditMode ? : }
-
-
- )}
-
- )}
-
- {cameras.length == 0 && !includeBirdseye ? (
-
- ) : (
- <>
- {!fullscreen && events && events.length > 0 && (
-
-
-
- {events.map((event) => {
- return (
-
- );
- })}
-
-
-
-
- )}
-
- {!cameraGroup || cameraGroup == "default" || isMobileOnly ? (
- <>
-
- {includeBirdseye && birdseyeConfig?.enabled && (
-
{
- const aspectRatio =
- birdseyeConfig.width / birdseyeConfig.height;
- if (aspectRatio > 2) {
- return `${mobileLayout == "grid" && "col-span-2"} aspect-wide`;
- } else if (aspectRatio < 1) {
- return `${mobileLayout == "grid" && "row-span-2 h-full"} aspect-tall`;
- } else {
- return "aspect-video";
- }
- })()}
- ref={birdseyeContainerRef}
- >
- onSelectCamera("birdseye")}
- containerRef={birdseyeContainerRef}
- />
-
- )}
- {cameras.map((camera) => {
- let grow;
- const aspectRatio =
- camera.detect.width / camera.detect.height;
- if (aspectRatio > 2) {
- grow = `${mobileLayout == "grid" && "col-span-2"} aspect-wide`;
- } else if (aspectRatio < 1) {
- grow = `${mobileLayout == "grid" && "row-span-2 h-full"} aspect-tall`;
- } else {
- grow = "aspect-video";
- }
- const availableStreams = camera.live.streams || {};
- const firstStreamEntry =
- Object.values(availableStreams)[0] || "";
-
- const streamNameFromSettings =
- currentGroupStreamingSettings?.[camera.name]?.streamName ||
- "";
- const streamExists =
- streamNameFromSettings &&
- Object.values(availableStreams).includes(
- streamNameFromSettings,
- );
-
- const streamName = streamExists
- ? streamNameFromSettings
- : firstStreamEntry;
- const streamType =
- currentGroupStreamingSettings?.[camera.name]?.streamType;
- const autoLive =
- streamType !== undefined
- ? streamType !== "no-streaming"
- : undefined;
- const showStillWithoutActivity =
- currentGroupStreamingSettings?.[camera.name]?.streamType !==
- "continuous";
- const useWebGL =
- currentGroupStreamingSettings?.[camera.name]
- ?.compatibilityMode || false;
- return (
-
toggleAudio(camera.name)}
- statsState={statsStates[camera.name]}
- toggleStats={() => toggleStats(camera.name)}
- volumeState={volumeStates[camera.name] ?? 1}
- setVolumeState={(value) =>
- setVolumeStates({
- [camera.name]: value,
- })
- }
- muteAll={muteAll}
- unmuteAll={unmuteAll}
- resetPreferredLiveMode={() =>
- resetPreferredLiveMode(camera.name)
- }
- config={config}
- >
- onSelectCamera(camera.name)}
- onError={(e) => handleError(camera.name, e)}
- onResetLiveMode={() =>
- resetPreferredLiveMode(camera.name)
- }
- playAudio={audioStates[camera.name] ?? false}
- volume={volumeStates[camera.name]}
- />
-
- );
- })}
-
- {isDesktop && (
-
-
-
-
- {fullscreen ? (
-
- ) : (
-
- )}
-
-
-
- {fullscreen
- ? t("button.exitFullscreen", { ns: "common" })
- : t("button.fullscreen", { ns: "common" })}
-
-
-
- )}
- >
- ) : (
-
- )}
- >
- )}
-
- );
-}
-
-function NoCameraView({ cameraGroup }: { cameraGroup?: string }) {
- const { t } = useTranslation(["views/live"]);
- const { auth } = useContext(AuthContext);
- const isAdmin = useIsAdmin();
-
- const isDefault = cameraGroup === "default";
- const isRestricted = !isAdmin && auth.isAuthenticated;
-
- let type: "default" | "group" | "restricted";
- if (isRestricted) {
- type = "restricted";
- } else if (isDefault) {
- type = "default";
- } else {
- type = "group";
- }
-
- return (
-
- }
- title={t(`noCameras.${type}.title`)}
- description={t(`noCameras.${type}.description`)}
- buttonText={
- type !== "restricted" && isDefault
- ? t(`noCameras.${type}.buttonText`)
- : undefined
- }
- link={
- type !== "restricted" && isDefault
- ? "/settings?page=cameraManagement"
- : undefined
- }
- />
-
- );
-}
+import { useFrigateReviews } from "@/api/ws";
+import Logo from "@/components/Logo";
+import { CameraGroupSelector } from "@/components/filter/CameraGroupSelector";
+import { LiveGridIcon, LiveListIcon } from "@/components/icons/LiveIcons";
+import { AnimatedEventCard } from "@/components/card/AnimatedEventCard";
+import BirdseyeLivePlayer from "@/components/player/BirdseyeLivePlayer";
+import LivePlayer from "@/components/player/LivePlayer";
+import { Button } from "@/components/ui/button";
+import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
+import {
+ Tooltip,
+ TooltipContent,
+ TooltipProvider,
+ TooltipTrigger,
+} from "@/components/ui/tooltip";
+import { useUserPersistence } from "@/hooks/use-user-persistence";
+import {
+ AllGroupsStreamingSettings,
+ CameraConfig,
+ FrigateConfig,
+} from "@/types/frigateConfig";
+import { ReviewSegment } from "@/types/review";
+import {
+ useCallback,
+ useContext,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from "react";
+import {
+ isDesktop,
+ isMobile,
+ isMobileOnly,
+ isTablet,
+} from "react-device-detect";
+import useSWR from "swr";
+import DraggableGridLayout from "./DraggableGridLayout";
+import { IoClose } from "react-icons/io5";
+import { LuLayoutDashboard } from "react-icons/lu";
+import { cn } from "@/lib/utils";
+import {
+ AudioState,
+ LivePlayerError,
+ StatsState,
+ VolumeState,
+} from "@/types/live";
+import { FaCompress, FaExpand } from "react-icons/fa";
+import useCameraLiveMode from "@/hooks/use-camera-live-mode";
+import { useResizeObserver } from "@/hooks/resize-observer";
+import LiveContextMenu from "@/components/menu/LiveContextMenu";
+import { useStreamingSettings } from "@/context/streaming-settings-provider";
+import { useTranslation } from "react-i18next";
+import { EmptyCard } from "@/components/card/EmptyCard";
+import { BsFillCameraVideoOffFill } from "react-icons/bs";
+import { AuthContext } from "@/context/auth-context";
+import { useIsAdmin } from "@/hooks/use-is-admin";
+import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
+import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
+
+type LiveDashboardViewProps = {
+ cameras: CameraConfig[];
+ cameraGroup: string;
+ includeBirdseye: boolean;
+ onSelectCamera: (camera: string) => void;
+ fullscreen: boolean;
+ toggleFullscreen: () => void;
+};
+export default function LiveDashboardView({
+ cameras,
+ cameraGroup,
+ includeBirdseye,
+ onSelectCamera,
+ fullscreen,
+ toggleFullscreen,
+}: LiveDashboardViewProps) {
+ const { t } = useTranslation(["views/live"]);
+
+ const { data: config } = useSWR("config");
+
+ // layout
+
+ const [mobileLayout, setMobileLayout] = useUserPersistence<"grid" | "list">(
+ "live-layout",
+ isDesktop ? "grid" : "list",
+ );
+
+ const [isEditMode, setIsEditMode] = useState(false);
+ const containerRef = useRef(null);
+ const birdseyeContainerRef = useRef(null);
+
+ // recent events
+
+ const eventUpdate = useFrigateReviews();
+
+ const alertCameras = useMemo(() => {
+ if (!config) {
+ return null;
+ }
+
+ if (cameraGroup == "default") {
+ return Object.values(config.cameras)
+ .filter((cam) => cam.ui.dashboard)
+ .map((cam) => cam.name)
+ .join(",");
+ }
+
+ if (includeBirdseye && cameras.length == 0) {
+ return Object.values(config.cameras)
+ .filter((cam) => cam.birdseye.enabled)
+ .map((cam) => cam.name)
+ .join(",");
+ }
+
+ return cameras
+ .map((cam) => cam.name)
+ .filter((cam) => config.camera_groups[cameraGroup]?.cameras.includes(cam))
+ .join(",");
+ }, [cameras, cameraGroup, config, includeBirdseye]);
+
+ const { data: allEvents, mutate: updateEvents } = useSWR([
+ "review",
+ {
+ limit: 10,
+ severity: "alert",
+ reviewed: 0,
+ cameras: alertCameras,
+ },
+ ]);
+
+ useEffect(() => {
+ if (!eventUpdate) {
+ return;
+ }
+
+ // if event is ended and was saved, update events list
+ if (eventUpdate.after.severity == "alert") {
+ if (
+ eventUpdate.type == "end" ||
+ eventUpdate.type == "new" ||
+ eventUpdate.type == "genai"
+ ) {
+ setTimeout(
+ () => updateEvents(),
+ eventUpdate.type == "end" ? 1000 : 6000,
+ );
+ } else if (
+ eventUpdate.before.data.objects.length <
+ eventUpdate.after.data.objects.length
+ ) {
+ setTimeout(() => updateEvents(), 5000);
+ }
+
+ return;
+ }
+ }, [eventUpdate, updateEvents]);
+
+ const events = useMemo(() => {
+ if (!allEvents) {
+ return [];
+ }
+
+ const date = new Date();
+ date.setHours(date.getHours() - 1);
+ const cutoff = date.getTime() / 1000;
+ return allEvents.filter((event) => event.start_time > cutoff);
+ }, [allEvents]);
+
+ // camera live views
+
+ const [{ height: containerHeight }] = useResizeObserver(containerRef);
+
+ const hasScrollbar = useMemo(() => {
+ if (containerHeight && containerRef.current) {
+ return (
+ containerRef.current.offsetHeight < containerRef.current.scrollHeight
+ );
+ }
+ }, [containerRef, containerHeight]);
+
+ const [windowVisible, setWindowVisible] = useState(true);
+ const visibilityListener = useCallback(() => {
+ setWindowVisible(document.visibilityState == "visible");
+ }, []);
+
+ useEffect(() => {
+ addEventListener("visibilitychange", visibilityListener);
+
+ return () => {
+ removeEventListener("visibilitychange", visibilityListener);
+ };
+ }, [visibilityListener]);
+
+ const [visibleCameras, setVisibleCameras] = useState([]);
+ const playbackCapabilities = usePlaybackCapabilities([]);
+ const visibleCameraObserver = useRef(null);
+ useEffect(() => {
+ const visibleCameras = new Set();
+ visibleCameraObserver.current = new IntersectionObserver(
+ (entries) => {
+ entries.forEach((entry) => {
+ const camera = (entry.target as HTMLElement).dataset.camera;
+
+ if (!camera) {
+ return;
+ }
+
+ if (entry.isIntersecting) {
+ visibleCameras.add(camera);
+ } else {
+ visibleCameras.delete(camera);
+ }
+
+ setVisibleCameras([...visibleCameras]);
+ });
+ },
+ { threshold: 0.5 },
+ );
+
+ return () => {
+ visibleCameraObserver.current?.disconnect();
+ };
+ }, []);
+
+ const [globalAutoLive] = useUserPersistence("autoLiveView", true);
+ const [displayCameraNames] = useUserPersistence("displayCameraNames", false);
+
+ const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
+ useStreamingSettings();
+
+ const currentGroupStreamingSettings = useMemo(() => {
+ if (cameraGroup && cameraGroup != "default" && allGroupsStreamingSettings) {
+ return allGroupsStreamingSettings[cameraGroup];
+ }
+ }, [allGroupsStreamingSettings, cameraGroup]);
+
+ const cameraRef = useCallback(
+ (node: HTMLElement | null) => {
+ if (!visibleCameraObserver.current) {
+ return;
+ }
+
+ try {
+ if (node) visibleCameraObserver.current.observe(node);
+ } catch (e) {
+ // no op
+ }
+ },
+ // we need to listen on the value of the ref
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ [visibleCameraObserver.current],
+ );
+
+ const activeStreams = useMemo(() => {
+ const streams: { [cameraName: string]: string } = {};
+ cameras.forEach((camera) => {
+ const availableStreams = camera.live.streams || {};
+ const streamNameFromSettings =
+ currentGroupStreamingSettings?.[camera.name]?.streamName || "";
+ const streamExists =
+ streamNameFromSettings &&
+ Object.values(availableStreams).includes(streamNameFromSettings);
+
+ const streamName = streamExists
+ ? streamNameFromSettings
+ : chooseAutoLiveStream(
+ availableStreams,
+ playbackCapabilities.estimatedBandwidthBps,
+ playbackCapabilities.saveData,
+ );
+
+ streams[camera.name] = streamName;
+ });
+ return streams;
+ }, [cameras, currentGroupStreamingSettings, playbackCapabilities]);
+
+ const {
+ preferredLiveModes,
+ setPreferredLiveModes,
+ resetPreferredLiveMode,
+ isRestreamedStates,
+ supportsAudioOutputStates,
+ streamMetadata,
+ } = useCameraLiveMode(cameras, windowVisible, activeStreams);
+
+ const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
+
+ const handleError = useCallback(
+ (cameraName: string, error: LivePlayerError) => {
+ setPreferredLiveModes((prevModes) => {
+ const newModes = { ...prevModes };
+ if (error === "mse-decode") {
+ newModes[cameraName] = "webrtc";
+ } else {
+ newModes[cameraName] = "jsmpeg";
+ }
+ return newModes;
+ });
+ },
+ [setPreferredLiveModes],
+ );
+
+ // audio states
+
+ const [audioStates, setAudioStates] = useState({});
+ const [volumeStates, setVolumeStates] = useState({});
+ const [statsStates, setStatsStates] = useState({});
+
+ const toggleStats = (cameraName: string): void => {
+ setStatsStates((prev) => ({
+ ...prev,
+ [cameraName]: !prev[cameraName],
+ }));
+ };
+
+ useEffect(() => {
+ if (!allGroupsStreamingSettings) {
+ return;
+ }
+
+ const initialAudioStates: AudioState = {};
+ const initialVolumeStates: VolumeState = {};
+
+ Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
+ if (groupSettings) {
+ Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
+ initialAudioStates[camera] = cameraSettings.playAudio ?? false;
+ initialVolumeStates[camera] = cameraSettings.volume ?? 1;
+ });
+ }
+ });
+
+ setAudioStates(initialAudioStates);
+ setVolumeStates(initialVolumeStates);
+ }, [allGroupsStreamingSettings]);
+
+ const toggleAudio = (cameraName: string): void => {
+ setAudioStates((prev) => ({
+ ...prev,
+ [cameraName]: !prev[cameraName],
+ }));
+ };
+
+ const onSaveMuting = useCallback(
+ (playAudio: boolean) => {
+ if (
+ !cameraGroup ||
+ !allGroupsStreamingSettings ||
+ cameraGroup == "default"
+ ) {
+ return;
+ }
+
+ const existingGroupSettings =
+ allGroupsStreamingSettings[cameraGroup] || {};
+
+ const updatedSettings: AllGroupsStreamingSettings = {
+ ...Object.fromEntries(
+ Object.entries(allGroupsStreamingSettings || {}).filter(
+ ([key]) => key !== cameraGroup,
+ ),
+ ),
+ [cameraGroup]: {
+ ...existingGroupSettings,
+ ...Object.fromEntries(
+ Object.entries(existingGroupSettings).map(
+ ([cameraName, settings]) => [
+ cameraName,
+ {
+ ...settings,
+ playAudio: playAudio,
+ },
+ ],
+ ),
+ ),
+ },
+ };
+
+ setAllGroupsStreamingSettings?.(updatedSettings);
+ },
+ [cameraGroup, allGroupsStreamingSettings, setAllGroupsStreamingSettings],
+ );
+
+ const muteAll = (): void => {
+ const updatedStates: Record = {};
+ visibleCameras.forEach((cameraName) => {
+ updatedStates[cameraName] = false;
+ });
+ setAudioStates(updatedStates);
+ onSaveMuting(false);
+ };
+
+ const unmuteAll = (): void => {
+ const updatedStates: Record = {};
+ visibleCameras.forEach((cameraName) => {
+ updatedStates[cameraName] = true;
+ });
+ setAudioStates(updatedStates);
+ onSaveMuting(true);
+ };
+
+ return (
+
+ {isMobile && (
+
+
+
+
+
+ {(!cameraGroup || cameraGroup == "default" || isMobileOnly) && (
+
+ setMobileLayout("grid")}
+ >
+
+
+ setMobileLayout("list")}
+ >
+
+
+
+ )}
+ {cameraGroup && cameraGroup !== "default" && isTablet && (
+
+
+ setIsEditMode((prevIsEditMode) => !prevIsEditMode)
+ }
+ >
+ {isEditMode ? : }
+
+
+ )}
+
+ )}
+
+ {cameras.length == 0 && !includeBirdseye ? (
+
+ ) : (
+ <>
+ {!fullscreen && events && events.length > 0 && (
+
+
+
+ {events.map((event) => {
+ return (
+
+ );
+ })}
+
+
+
+
+ )}
+
+ {!cameraGroup || cameraGroup == "default" || isMobileOnly ? (
+ <>
+
+ {includeBirdseye && birdseyeConfig?.enabled && (
+
{
+ const aspectRatio =
+ birdseyeConfig.width / birdseyeConfig.height;
+ if (aspectRatio > 2) {
+ return `${mobileLayout == "grid" && "col-span-2"} aspect-wide`;
+ } else if (aspectRatio < 1) {
+ return `${mobileLayout == "grid" && "row-span-2 h-full"} aspect-tall`;
+ } else {
+ return "aspect-video";
+ }
+ })()}
+ ref={birdseyeContainerRef}
+ >
+ onSelectCamera("birdseye")}
+ containerRef={birdseyeContainerRef}
+ />
+
+ )}
+ {cameras.map((camera) => {
+ let grow;
+ const aspectRatio =
+ camera.detect.width / camera.detect.height;
+ if (aspectRatio > 2) {
+ grow = `${mobileLayout == "grid" && "col-span-2"} aspect-wide`;
+ } else if (aspectRatio < 1) {
+ grow = `${mobileLayout == "grid" && "row-span-2 h-full"} aspect-tall`;
+ } else {
+ grow = "aspect-video";
+ }
+ const availableStreams = camera.live.streams || {};
+ const firstStreamEntry =
+ Object.values(availableStreams)[0] || "";
+
+ const streamNameFromSettings =
+ currentGroupStreamingSettings?.[camera.name]?.streamName ||
+ "";
+ const streamExists =
+ streamNameFromSettings &&
+ Object.values(availableStreams).includes(
+ streamNameFromSettings,
+ );
+
+ const streamName = streamExists
+ ? streamNameFromSettings
+ : firstStreamEntry;
+ const streamType =
+ currentGroupStreamingSettings?.[camera.name]?.streamType;
+ const autoLive =
+ streamType !== undefined
+ ? streamType !== "no-streaming"
+ : undefined;
+ const showStillWithoutActivity =
+ currentGroupStreamingSettings?.[camera.name]?.streamType !==
+ "continuous";
+ const useWebGL =
+ currentGroupStreamingSettings?.[camera.name]
+ ?.compatibilityMode || false;
+ return (
+
toggleAudio(camera.name)}
+ statsState={statsStates[camera.name]}
+ toggleStats={() => toggleStats(camera.name)}
+ volumeState={volumeStates[camera.name] ?? 1}
+ setVolumeState={(value) =>
+ setVolumeStates({
+ [camera.name]: value,
+ })
+ }
+ muteAll={muteAll}
+ unmuteAll={unmuteAll}
+ resetPreferredLiveMode={() =>
+ resetPreferredLiveMode(camera.name)
+ }
+ config={config}
+ >
+ onSelectCamera(camera.name)}
+ onError={(e) => handleError(camera.name, e)}
+ onResetLiveMode={() =>
+ resetPreferredLiveMode(camera.name)
+ }
+ playAudio={audioStates[camera.name] ?? false}
+ volume={volumeStates[camera.name]}
+ />
+
+ );
+ })}
+
+ {isDesktop && (
+
+
+
+
+ {fullscreen ? (
+
+ ) : (
+
+ )}
+
+
+
+ {fullscreen
+ ? t("button.exitFullscreen", { ns: "common" })
+ : t("button.fullscreen", { ns: "common" })}
+
+
+
+ )}
+ >
+ ) : (
+
+ )}
+ >
+ )}
+
+ );
+}
+
+function NoCameraView({ cameraGroup }: { cameraGroup?: string }) {
+ const { t } = useTranslation(["views/live"]);
+ const { auth } = useContext(AuthContext);
+ const isAdmin = useIsAdmin();
+
+ const isDefault = cameraGroup === "default";
+ const isRestricted = !isAdmin && auth.isAuthenticated;
+
+ let type: "default" | "group" | "restricted";
+ if (isRestricted) {
+ type = "restricted";
+ } else if (isDefault) {
+ type = "default";
+ } else {
+ type = "group";
+ }
+
+ return (
+
+ }
+ title={t(`noCameras.${type}.title`)}
+ description={t(`noCameras.${type}.description`)}
+ buttonText={
+ type !== "restricted" && isDefault
+ ? t(`noCameras.${type}.buttonText`)
+ : undefined
+ }
+ link={
+ type !== "restricted" && isDefault
+ ? "/settings?page=cameraManagement"
+ : undefined
+ }
+ />
+
+ );
+}