Made-with:

This commit is contained in:
ericj 2026-03-17 10:46:42 -07:00
parent 687fefb343
commit 5560af611a
52 changed files with 17247 additions and 14837 deletions

View File

@ -1,356 +1,377 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
ARG BASE_IMAGE=debian:12
ARG SLIM_BASE=debian:12-slim
# A hook that allows us to inject commands right after the base images
ARG BASE_HOOK=
FROM ${BASE_IMAGE} AS base
ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK
RUN sh -c "$BASE_HOOK"
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
ARG PIP_BREAK_SYSTEM_PACKAGES
FROM ${SLIM_BASE} AS slim-base
ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK
RUN sh -c "$BASE_HOOK"
FROM slim-base AS wget
ARG DEBIAN_FRONTEND
RUN apt-get update \
&& apt-get install -y wget xz-utils \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /rootfs
FROM base AS nginx
ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
/deps/build_nginx.sh
FROM wget AS sqlite-vec
ARG DEBIAN_FRONTEND
# Build sqlite_vec from source
COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_sqlite_vec.sh
FROM scratch AS go2rtc
ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
FROM wget AS tempio
ARG TARGETARCH
RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
/deps/install_tempio.sh
####
#
# OpenVino Support
#
# 1. Download and convert a model from Intel's Public Open Model Zoo
#
####
# Download and Convert OpenVino model
FROM base_host AS ov-converter
ARG DEBIAN_FRONTEND
# Install OpenVino Runtime and Dev library
COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip" \
&& pip3 install -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
mkdir /models && cd /models \
&& wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
&& tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
&& python3 /build_ov_model.py
####
#
# Coral Compatibility
#
# Builds libusb without udev. Needed for synology and other devices with USB coral
####
# libUSB - No Udev
FROM wget as libusb-build
ARG TARGETARCH
ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
# Build libUSB without udev. Needed for Openvino NCS2 support
WORKDIR /opt
RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config
RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \
unzip v1.0.26.zip && cd libusb-1.0.26 && \
./bootstrap.sh && \
./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
make -j $(nproc --all)
RUN apt-get update && \
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
rm -rf /var/lib/apt/lists/*
WORKDIR /opt/libusb-1.0.26/libusb
RUN /bin/mkdir -p '/usr/local/lib' && \
/bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
cd /opt/libusb-1.0.26/ && \
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
ldconfig
FROM wget AS models
# Get model and labels
RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
COPY labelmap.txt .
# Copy OpenVino model
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/
RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
# Get Audio Model and labels
RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite
COPY audio-labelmap.txt .
FROM wget AS s6-overlay
ARG TARGETARCH
RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
/deps/install_s6_overlay.sh
FROM base AS wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
ARG DEBUG=false
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https wget unzip \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3.11 \
python3.11-dev \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
gfortran openexr libatlas-base-dev libssl-dev\
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# sqlite3 dependencies
tclsh \
# scipy dependencies
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt
COPY docker/main/requirements-dev.txt /requirements-dev.txt
RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
RUN /build_pysqlite3.sh
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
if [ "$DEBUG" = "true" ]; then \
pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
fi
# Install HailoRT & Wheels
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
/deps/install_hailort.sh
# Collect deps in a single layer
FROM scratch AS deps-rootfs
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
COPY --from=go2rtc /rootfs/ /
COPY --from=libusb-build /usr/local/lib /usr/local/lib
COPY --from=tempio /rootfs/ /
COPY --from=s6-overlay /rootfs/ /
COPY --from=models /rootfs/ /
COPY --from=wheels /rootfs/ /
COPY docker/main/rootfs/ /
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
FROM slim-base AS deps
ARG TARGETARCH
ARG BASE_IMAGE
ARG DEBIAN_FRONTEND
# http://stackoverflow.com/questions/48162574/ddg#49462622
ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
# Disable tokenizer parallelism warning
# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
ENV TOKENIZERS_PARALLELISM=true
# https://github.com/huggingface/transformers/issues/27214
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8
# Set NumPy to ignore getlimits warning
ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
# Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE
# TensorFlow C++ logging suppression (must be set before import)
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
ENV TF_CPP_MIN_LOG_LEVEL=3
# Suppress verbose logging from TensorFlow C++ code
ENV TF_CPP_MIN_VLOG_LEVEL=3
# Disable oneDNN optimization messages ("optimized with oneDNN...")
ENV TF_ENABLE_ONEDNN_OPTS=0
# Suppress AutoGraph verbosity during conversion
ENV AUTOGRAPH_VERBOSITY=0
# Google Logging (GLOG) suppression for TensorFlow components
ENV GLOG_minloglevel=3
ENV GLOG_logtostderr=0
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh
ENV DEFAULT_FFMPEG_VERSION="7.0"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
pip3 install -U /deps/wheels/*.whl
# Install Axera Engine
RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
ENV PATH="${PATH}:/usr/bin/axcl"
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
bash -c "bash /deps/install_memryx.sh"
COPY --from=deps-rootfs / /
RUN ldconfig
EXPOSE 5000
EXPOSE 8554
EXPOSE 8555/tcp 8555/udp
# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
# Do not fail on long-running download scripts
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
ENTRYPOINT ["/init"]
CMD []
HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
# Frigate deps with Node.js and NPM for devcontainer
FROM deps AS devcontainer
# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
# But start a fake service for simulating the logs
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
RUN mkdir -p /opt/frigate \
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
# Install Node 20
RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \
chmod 500 nsolid_setup_deb.sh && \
./nsolid_setup_deb.sh 20 && \
apt-get install nodejs -y \
&& rm -rf /var/lib/apt/lists/* \
&& npm install -g npm@10
WORKDIR /workspace/frigate
RUN apt-get update \
&& apt-get install make -y \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt
HEALTHCHECK NONE
CMD ["sleep", "infinity"]
# Frigate web build
# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
FROM --platform=$BUILDPLATFORM node:20 AS web-build
WORKDIR /work
COPY web/package.json web/package-lock.json ./
RUN npm install
COPY web/ ./
RUN npm run build \
&& mv dist/BASE_PATH/monacoeditorwork/* dist/assets/ \
&& rm -rf dist/BASE_PATH
# Collect final files in a single layer
FROM scratch AS rootfs
WORKDIR /opt/frigate/
COPY frigate frigate/
COPY migrations migrations/
COPY --from=web-build /work/dist/ web/
# Frigate final container
FROM deps AS frigate
WORKDIR /opt/frigate/
COPY --from=rootfs / /
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
ARG BASE_IMAGE=debian:12
ARG SLIM_BASE=debian:12-slim
# A hook that allows us to inject commands right after the base images
ARG BASE_HOOK=
FROM ${BASE_IMAGE} AS base
ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK
RUN if [ -n "$BASE_HOOK" ]; then \
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
fi
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
ARG PIP_BREAK_SYSTEM_PACKAGES
FROM ${SLIM_BASE} AS slim-base
ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK
RUN if [ -n "$BASE_HOOK" ]; then \
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
fi
FROM slim-base AS wget
ARG DEBIAN_FRONTEND
RUN apt-get update \
&& apt-get install -y wget xz-utils \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /rootfs
FROM base AS nginx
ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
tr -d '\r' </deps/build_nginx.sh >/tmp/build_nginx.sh \
&& bash /tmp/build_nginx.sh
FROM wget AS sqlite-vec
ARG DEBIAN_FRONTEND
# Build sqlite_vec from source
COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
--mount=type=cache,target=/root/.ccache \
tr -d '\r' </deps/build_sqlite_vec.sh >/tmp/build_sqlite_vec.sh \
&& bash /tmp/build_sqlite_vec.sh
FROM scratch AS go2rtc
ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
FROM wget AS tempio
ARG TARGETARCH
RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
tr -d '\r' </deps/install_tempio.sh >/tmp/install_tempio.sh \
&& bash /tmp/install_tempio.sh
####
#
# OpenVino Support
#
# 1. Download and convert a model from Intel's Public Open Model Zoo
#
####
# Download and Convert OpenVino model
FROM base_host AS ov-converter
ARG DEBIAN_FRONTEND
# Install OpenVino Runtime and Dev library
COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip" \
&& pip3 install -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
mkdir /models && cd /models \
&& wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
&& tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
&& python3 /build_ov_model.py
####
#
# Coral Compatibility
#
# Builds libusb without udev. Needed for synology and other devices with USB coral
####
# libUSB - No Udev
FROM wget as libusb-build
ARG TARGETARCH
ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
# Build libUSB without udev. Needed for Openvino NCS2 support
WORKDIR /opt
RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config
RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \
unzip v1.0.26.zip && cd libusb-1.0.26 && \
./bootstrap.sh && \
./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
make -j $(nproc --all)
RUN apt-get update && \
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
rm -rf /var/lib/apt/lists/*
WORKDIR /opt/libusb-1.0.26/libusb
RUN /bin/mkdir -p '/usr/local/lib' && \
/bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
cd /opt/libusb-1.0.26/ && \
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
ldconfig
FROM wget AS models
# Get model and labels
RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
COPY labelmap.txt .
# Copy OpenVino model
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/
RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
# Get Audio Model and labels
RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite
COPY audio-labelmap.txt .
FROM wget AS s6-overlay
ARG TARGETARCH
RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
tr -d '\r' </deps/install_s6_overlay.sh >/tmp/install_s6_overlay.sh \
&& bash /tmp/install_s6_overlay.sh
FROM base AS wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
ARG DEBUG=false
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https wget unzip \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3.11 \
python3.11-dev \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
gfortran openexr libatlas-base-dev libssl-dev\
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# sqlite3 dependencies
tclsh \
# scipy dependencies
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt
COPY docker/main/requirements-dev.txt /requirements-dev.txt
RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
RUN tr -d '\r' </build_pysqlite3.sh >/tmp/build_pysqlite3.sh \
&& bash /tmp/build_pysqlite3.sh
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
if [ "$DEBUG" = "true" ]; then \
pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
fi
# Install HailoRT & Wheels
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
tr -d '\r' </deps/install_hailort.sh >/tmp/install_hailort.sh \
&& bash /tmp/install_hailort.sh
# Collect deps in a single layer
FROM scratch AS deps-rootfs
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
COPY --from=go2rtc /rootfs/ /
COPY --from=libusb-build /usr/local/lib /usr/local/lib
COPY --from=tempio /rootfs/ /
COPY --from=s6-overlay /rootfs/ /
COPY --from=models /rootfs/ /
COPY --from=wheels /rootfs/ /
COPY docker/main/rootfs/ /
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
FROM slim-base AS deps
ARG TARGETARCH
ARG BASE_IMAGE
ARG DEBIAN_FRONTEND
# http://stackoverflow.com/questions/48162574/ddg#49462622
ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
# Disable tokenizer parallelism warning
# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
ENV TOKENIZERS_PARALLELISM=true
# https://github.com/huggingface/transformers/issues/27214
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8
# Set NumPy to ignore getlimits warning
ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
# Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE
# TensorFlow C++ logging suppression (must be set before import)
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
ENV TF_CPP_MIN_LOG_LEVEL=3
# Suppress verbose logging from TensorFlow C++ code
ENV TF_CPP_MIN_VLOG_LEVEL=3
# Disable oneDNN optimization messages ("optimized with oneDNN...")
ENV TF_ENABLE_ONEDNN_OPTS=0
# Suppress AutoGraph verbosity during conversion
ENV AUTOGRAPH_VERBOSITY=0
# Google Logging (GLOG) suppression for TensorFlow components
ENV GLOG_minloglevel=3
ENV GLOG_logtostderr=0
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
tr -d '\r' </deps/install_deps.sh >/tmp/install_deps.sh \
&& bash /tmp/install_deps.sh
ENV DEFAULT_FFMPEG_VERSION="7.0"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
pip3 install -U /deps/wheels/*.whl
# Install Axera Engine
RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
ENV PATH="${PATH}:/usr/bin/axcl"
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
tr -d '\r' </deps/install_memryx.sh >/tmp/install_memryx.sh \
&& bash /tmp/install_memryx.sh
COPY --from=deps-rootfs / /
RUN find /etc/s6-overlay/s6-rc.d -type f -exec sed -i 's/\r$//' {} +
RUN find /etc/s6-overlay/s6-rc.d -type f \
\( -name run -o -name up \) \
-exec chmod +x {} +
RUN ldconfig
EXPOSE 5000
EXPOSE 5010
EXPOSE 8554
EXPOSE 8555/tcp 8555/udp
# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
# Do not fail on long-running download scripts
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
ENTRYPOINT ["/init"]
CMD []
HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
# Frigate deps with Node.js and NPM for devcontainer
FROM deps AS devcontainer
# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
# But start a fake service for simulating the logs
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
RUN mkdir -p /opt/frigate \
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
# Install Node 20
RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \
chmod 500 nsolid_setup_deb.sh && \
./nsolid_setup_deb.sh 20 && \
apt-get install nodejs -y \
&& rm -rf /var/lib/apt/lists/* \
&& npm install -g npm@10
WORKDIR /workspace/frigate
RUN apt-get update \
&& apt-get install make -y \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt
HEALTHCHECK NONE
CMD ["sleep", "infinity"]
# Frigate web build
# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
FROM --platform=$BUILDPLATFORM node:20 AS web-build
WORKDIR /work
COPY web/package.json web/package-lock.json ./
RUN npm install
COPY web/ ./
RUN npm run build \
&& mv dist/BASE_PATH/monacoeditorwork/* dist/assets/ \
&& rm -rf dist/BASE_PATH
# Collect final files in a single layer
FROM scratch AS rootfs
WORKDIR /opt/frigate/
COPY frigate frigate/
COPY migrations migrations/
COPY transcode_proxy transcode_proxy/
COPY --from=web-build /work/dist/ web/
# Frigate final container
FROM deps AS frigate
WORKDIR /opt/frigate/
COPY --from=rootfs / /
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt

View File

@ -1,33 +1,56 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Start the Frigate service
set -o errexit -o nounset -o pipefail
# opt out of openvino telemetry
if [ -e /usr/local/bin/opt_in_out ]; then
/usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1
fi
# Logs should be sent to stdout so that s6 can collect them
# Tell S6-Overlay not to restart this service
s6-svc -O .
function set_libva_version() {
local ffmpeg_path
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
export LIBAVFORMAT_VERSION_MAJOR
}
echo "[INFO] Preparing Frigate..."
set_libva_version
echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
# Replace the bash process with the Frigate process, redirecting stderr to stdout
exec 2>&1
exec python3 -u -m frigate
#!/command/with-contenv bash
# shellcheck shell=bash
# Start the Frigate service
set -o errexit -o nounset -o pipefail
# opt out of openvino telemetry
if [ -e /usr/local/bin/opt_in_out ]; then
/usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1
fi
# Logs should be sent to stdout so that s6 can collect them
# Tell S6-Overlay not to restart this service
s6-svc -O .
function set_libva_version() {
local ffmpeg_path
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
export LIBAVFORMAT_VERSION_MAJOR
}
function start_transcode_proxy() {
(
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
if [[ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]]; then
TRANSCODE_PROXY_FFMPEG=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
export TRANSCODE_PROXY_FFMPEG
fi
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
sleep 1
done
echo "[INFO] Starting transcode proxy..."
exec python3 -m uvicorn transcode_proxy.main:app \
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
--port "${TRANSCODE_PROXY_PORT:-5010}"
) &
}
echo "[INFO] Preparing Frigate..."
set_libva_version
start_transcode_proxy
echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
# Replace the bash process with the Frigate process, redirecting stderr to stdout
exec 2>&1
exec python3 -u -m frigate

View File

@ -1,11 +1,11 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Prepare the logs folder for s6-log
set -o errexit -o nounset -o pipefail
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync)
mkdir -p "${dirs[@]}"
chown nobody:nogroup "${dirs[@]}"
chmod 02755 "${dirs[@]}"
#!/command/with-contenv bash
# shellcheck shell=bash
# Prepare the logs folder for s6-log
set -o errexit -o nounset -o pipefail
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync /dev/shm/logs/transcode-proxy)
mkdir -p "${dirs[@]}"
chown nobody:nogroup "${dirs[@]}"
chmod 02755 "${dirs[@]}"

View File

@ -0,0 +1 @@
transcode-proxy

View File

@ -0,0 +1 @@
transcode-proxy-pipeline

View File

@ -0,0 +1,4 @@
#!/command/with-contenv bash
# shellcheck shell=bash
exec logutil-service /dev/shm/logs/transcode-proxy

View File

@ -0,0 +1 @@
longrun

View File

@ -0,0 +1 @@
transcode-proxy-log

View File

@ -0,0 +1,32 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Start the transcode proxy (in-process with Frigate container)
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
echo "[INFO] Starting transcode proxy..."
# Default upstream to nginx internal port when not set
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
# Use Frigate's FFmpeg when not set
if [ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]; then
export TRANSCODE_PROXY_FFMPEG="$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)"
fi
# Wait for nginx/API to be ready so proxy can reach upstream
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
echo "[INFO] Waiting for upstream ${TRANSCODE_PROXY_UPSTREAM}..."
sleep 1
done
echo "[INFO] Upstream ready, starting transcode proxy on port ${TRANSCODE_PROXY_PORT:-5010}"
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
exec 2>&1
exec python3 -m uvicorn transcode_proxy.main:app \
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
--port "${TRANSCODE_PROXY_PORT:-5010}"

View File

@ -0,0 +1 @@
longrun

View File

@ -1,365 +1,375 @@
daemon off;
user root;
worker_processes auto;
error_log /dev/stdout warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
map_hash_bucket_size 256;
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
access_log /dev/stdout main;
# send headers in one piece, it is better than sending them one by one
tcp_nopush on;
sendfile on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 6;
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
gzip_proxied no-cache no-store private expired auth;
gzip_vary on;
proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
map $sent_http_content_type $should_not_cache {
'application/json' 0;
default 1;
}
upstream frigate_api {
server 127.0.0.1:5001;
keepalive 1024;
}
upstream mqtt_ws {
server 127.0.0.1:5002;
keepalive 1024;
}
upstream jsmpeg {
server 127.0.0.1:8082;
keepalive 1024;
}
include go2rtc_upstream.conf;
server {
include listen.conf;
# enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
http2 on;
# vod settings
vod_base_url '';
vod_segments_base_url '';
vod_mode mapped;
vod_max_mapping_response_size 1m;
vod_upstream_location /api;
vod_align_segments_to_key_frames on;
vod_manifest_segment_durations_mode accurate;
vod_ignore_edit_list on;
vod_segment_duration 10000;
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
vod_hls_mpegts_align_frames off;
vod_hls_mpegts_interleave_frames on;
# file handle caching / aio
open_file_cache max=1000 inactive=5m;
open_file_cache_valid 2m;
open_file_cache_min_uses 1;
open_file_cache_errors on;
aio on;
# file upload size
client_max_body_size 20M;
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
vod_open_file_thread_pool default;
# vod caches
vod_metadata_cache metadata_cache 512m;
vod_mapping_cache mapping_cache 5m 10m;
# gzip manifests
gzip on;
gzip_types application/vnd.apple.mpegurl;
include auth_location.conf;
include base_path.conf;
location /vod/ {
include auth_request.conf;
aio threads;
vod hls;
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
# Smaller segments, faster generation, better browser compatibility
vod_hls_container_format fmp4;
secure_token $args;
secure_token_types application/vnd.apple.mpegurl;
add_header Cache-Control "no-store";
expires off;
keepalive_disable safari;
# vod module returns 502 for non-existent media
# https://github.com/kaltura/nginx-vod-module/issues/468
error_page 502 =404 /vod-not-found;
}
location = /vod-not-found {
return 404;
}
location /stream/ {
include auth_request.conf;
add_header Cache-Control "no-store";
expires off;
types {
application/dash+xml mpd;
application/vnd.apple.mpegurl m3u8;
video/mp2t ts;
image/jpeg jpg;
}
root /tmp;
}
location /clips/ {
include auth_request.conf;
types {
video/mp4 mp4;
image/jpeg jpg;
}
expires 7d;
add_header Cache-Control "public";
autoindex on;
root /media/frigate;
}
location /cache/ {
internal; # This tells nginx it's not accessible from the outside
alias /tmp/cache/;
}
location /recordings/ {
include auth_request.conf;
types {
video/mp4 mp4;
}
autoindex on;
autoindex_format json;
root /media/frigate;
}
location /exports/ {
include auth_request.conf;
types {
video/mp4 mp4;
}
autoindex on;
autoindex_format json;
root /media/frigate;
}
location /ws {
include auth_request.conf;
proxy_pass http://mqtt_ws/;
include proxy.conf;
}
location /live/jsmpeg/ {
include auth_request.conf;
proxy_pass http://jsmpeg/;
include proxy.conf;
}
# frigate lovelace card uses this path
location /live/mse/api/ws {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api/ws;
include proxy.conf;
}
location /live/webrtc/api/ws {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api/ws;
include proxy.conf;
}
# pass through go2rtc player
location /live/webrtc/webrtc.html {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/webrtc.html;
include proxy.conf;
}
# frontend uses this to fetch the version
location /api/go2rtc/api {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api;
include proxy.conf;
}
# integration uses this to add webrtc candidate
location /api/go2rtc/webrtc {
include auth_request.conf;
limit_except POST {
deny all;
}
proxy_pass http://go2rtc/api/webrtc;
include proxy.conf;
}
location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
include auth_request.conf;
rewrite ^/api/(.*)$ /$1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/ {
include auth_request.conf;
add_header Cache-Control "no-store";
expires off;
proxy_pass http://frigate_api/;
include proxy.conf;
proxy_cache api_cache;
proxy_cache_lock on;
proxy_cache_use_stale updating;
proxy_cache_valid 200 5s;
proxy_cache_bypass $http_x_cache_bypass;
proxy_no_cache $should_not_cache;
add_header X-Cache-Status $upstream_cache_status;
location /api/vod/ {
include auth_request.conf;
proxy_pass http://frigate_api/vod/;
include proxy.conf;
proxy_cache off;
}
location /api/login {
auth_request off;
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
# Allow unauthenticated access to the first_time_login endpoint
# so the login page can load help text before authentication.
location /api/auth/first_time_login {
auth_request off;
limit_except GET {
deny all;
}
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/stats {
include auth_request.conf;
access_log off;
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/version {
include auth_request.conf;
access_log off;
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
}
location / {
# do not require auth for static assets
add_header Cache-Control "no-store";
expires off;
location /assets/ {
access_log off;
expires 1y;
add_header Cache-Control "public";
}
location /fonts/ {
access_log off;
expires 1y;
add_header Cache-Control "public";
}
location /locales/ {
access_log off;
add_header Cache-Control "public";
}
location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
access_log off;
expires 1y;
add_header Cache-Control "public";
default_type application/json;
proxy_set_header Accept-Encoding "";
sub_filter_once off;
sub_filter_types application/json;
sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
}
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';
sub_filter_types text/css application/javascript;
sub_filter_once off;
root /opt/frigate/web;
try_files $uri $uri.html $uri/ /index.html;
}
}
}
daemon off;
user root;
worker_processes auto;
error_log /dev/stdout warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
map_hash_bucket_size 256;
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
access_log /dev/stdout main;
# send headers in one piece, it is better than sending them one by one
tcp_nopush on;
sendfile on;
keepalive_timeout 65;
gzip on;
gzip_comp_level 6;
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
gzip_proxied no-cache no-store private expired auth;
gzip_vary on;
proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
map $sent_http_content_type $should_not_cache {
'application/json' 0;
default 1;
}
upstream frigate_api {
server 127.0.0.1:5001;
keepalive 1024;
}
upstream mqtt_ws {
server 127.0.0.1:5002;
keepalive 1024;
}
upstream jsmpeg {
server 127.0.0.1:8082;
keepalive 1024;
}
include go2rtc_upstream.conf;
server {
include listen.conf;
# enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
http2 on;
# vod settings
vod_base_url '';
vod_segments_base_url '';
vod_mode mapped;
vod_max_mapping_response_size 1m;
vod_upstream_location /api;
vod_align_segments_to_key_frames on;
vod_manifest_segment_durations_mode accurate;
vod_ignore_edit_list on;
vod_segment_duration 10000;
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
vod_hls_mpegts_align_frames off;
vod_hls_mpegts_interleave_frames on;
# file handle caching / aio
open_file_cache max=1000 inactive=5m;
open_file_cache_valid 2m;
open_file_cache_min_uses 1;
open_file_cache_errors on;
aio on;
# file upload size
client_max_body_size 20M;
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
vod_open_file_thread_pool default;
# vod caches
vod_metadata_cache metadata_cache 512m;
vod_mapping_cache mapping_cache 5m 10m;
# gzip manifests
gzip on;
gzip_types application/vnd.apple.mpegurl;
include auth_location.conf;
include base_path.conf;
location = /vod-transcoded {
return 302 /vod-transcoded/;
}
location /vod-transcoded/ {
include auth_request.conf;
proxy_pass http://127.0.0.1:5010;
include proxy.conf;
}
location /vod/ {
include auth_request.conf;
aio threads;
vod hls;
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
# Smaller segments, faster generation, better browser compatibility
vod_hls_container_format fmp4;
secure_token $args;
secure_token_types application/vnd.apple.mpegurl;
add_header Cache-Control "no-store";
expires off;
keepalive_disable safari;
# vod module returns 502 for non-existent media
# https://github.com/kaltura/nginx-vod-module/issues/468
error_page 502 =404 /vod-not-found;
}
location = /vod-not-found {
return 404;
}
location /stream/ {
include auth_request.conf;
add_header Cache-Control "no-store";
expires off;
types {
application/dash+xml mpd;
application/vnd.apple.mpegurl m3u8;
video/mp2t ts;
image/jpeg jpg;
}
root /tmp;
}
location /clips/ {
include auth_request.conf;
types {
video/mp4 mp4;
image/jpeg jpg;
}
expires 7d;
add_header Cache-Control "public";
autoindex on;
root /media/frigate;
}
location /cache/ {
internal; # This tells nginx it's not accessible from the outside
alias /tmp/cache/;
}
location /recordings/ {
include auth_request.conf;
types {
video/mp4 mp4;
}
autoindex on;
autoindex_format json;
root /media/frigate;
}
location /exports/ {
include auth_request.conf;
types {
video/mp4 mp4;
}
autoindex on;
autoindex_format json;
root /media/frigate;
}
location /ws {
include auth_request.conf;
proxy_pass http://mqtt_ws/;
include proxy.conf;
}
location /live/jsmpeg/ {
include auth_request.conf;
proxy_pass http://jsmpeg/;
include proxy.conf;
}
# frigate lovelace card uses this path
location /live/mse/api/ws {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api/ws;
include proxy.conf;
}
location /live/webrtc/api/ws {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api/ws;
include proxy.conf;
}
# pass through go2rtc player
location /live/webrtc/webrtc.html {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/webrtc.html;
include proxy.conf;
}
# frontend uses this to fetch the version
location /api/go2rtc/api {
include auth_request.conf;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api;
include proxy.conf;
}
# integration uses this to add webrtc candidate
location /api/go2rtc/webrtc {
include auth_request.conf;
limit_except POST {
deny all;
}
proxy_pass http://go2rtc/api/webrtc;
include proxy.conf;
}
location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
include auth_request.conf;
rewrite ^/api/(.*)$ /$1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/ {
include auth_request.conf;
add_header Cache-Control "no-store";
expires off;
proxy_pass http://frigate_api/;
include proxy.conf;
proxy_cache api_cache;
proxy_cache_lock on;
proxy_cache_use_stale updating;
proxy_cache_valid 200 5s;
proxy_cache_bypass $http_x_cache_bypass;
proxy_no_cache $should_not_cache;
add_header X-Cache-Status $upstream_cache_status;
location /api/vod/ {
include auth_request.conf;
proxy_pass http://frigate_api/vod/;
include proxy.conf;
proxy_cache off;
}
location /api/login {
auth_request off;
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
# Allow unauthenticated access to the first_time_login endpoint
# so the login page can load help text before authentication.
location /api/auth/first_time_login {
auth_request off;
limit_except GET {
deny all;
}
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/stats {
include auth_request.conf;
access_log off;
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/version {
include auth_request.conf;
access_log off;
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
}
location / {
# do not require auth for static assets
add_header Cache-Control "no-store";
expires off;
location /assets/ {
access_log off;
expires 1y;
add_header Cache-Control "public";
}
location /fonts/ {
access_log off;
expires 1y;
add_header Cache-Control "public";
}
location /locales/ {
access_log off;
add_header Cache-Control "public";
}
location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
access_log off;
expires 1y;
add_header Cache-Control "public";
default_type application/json;
proxy_set_header Accept-Encoding "";
sub_filter_once off;
sub_filter_types application/json;
sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
}
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';
sub_filter_types text/css application/javascript;
sub_filter_once off;
root /opt/frigate/web;
try_files $uri $uri.html $uri/ /index.html;
}
}
}

View File

@ -1,37 +1,38 @@
# syntax=docker/dockerfile:1.4
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM wheels AS trt-wheels
ARG PIP_BREAK_SYSTEM_PACKAGES
# Install TensorRT wheels
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
# remove dependencies from the requirements that have type constraints
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
FROM deps AS frigate-tensorrt
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl
COPY --from=rootfs / /
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
RUN ldconfig
WORKDIR /opt/frigate/
# Dev Container w/ TRT
FROM devcontainer AS devcontainer-trt
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl
# syntax=docker/dockerfile:1.4
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM wheels AS trt-wheels
ARG PIP_BREAK_SYSTEM_PACKAGES
# Install TensorRT wheels
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
# remove dependencies from the requirements that have type constraints
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
FROM deps AS frigate-tensorrt
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl
COPY --from=rootfs / /
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
RUN ldconfig
WORKDIR /opt/frigate/
# Dev Container w/ TRT
FROM devcontainer AS devcontainer-trt
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl

View File

@ -1,105 +1,105 @@
variable "ARCH" {
default = "amd64"
}
variable "BASE_IMAGE" {
default = null
}
variable "SLIM_BASE" {
default = null
}
variable "TRT_BASE" {
default = null
}
variable "COMPUTE_LEVEL" {
default = ""
}
variable "BASE_HOOK" {
# Ensure an up-to-date python 3.11 is available in jetson images
default = <<EOT
if grep -iq \"ubuntu\" /etc/os-release; then
. /etc/os-release
# Add the deadsnakes PPA repository
echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
# Add deadsnakes signing key
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
fi
EOT
}
target "_build_args" {
args = {
BASE_IMAGE = BASE_IMAGE,
SLIM_BASE = SLIM_BASE,
TRT_BASE = TRT_BASE,
COMPUTE_LEVEL = COMPUTE_LEVEL,
BASE_HOOK = BASE_HOOK
}
platforms = ["linux/${ARCH}"]
}
target wget {
dockerfile = "docker/main/Dockerfile"
target = "wget"
inherits = ["_build_args"]
}
target deps {
dockerfile = "docker/main/Dockerfile"
target = "deps"
inherits = ["_build_args"]
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
target = "rootfs"
inherits = ["_build_args"]
}
target wheels {
dockerfile = "docker/main/Dockerfile"
target = "wheels"
inherits = ["_build_args"]
}
target devcontainer {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "devcontainer"
}
target "trt-deps" {
dockerfile = "docker/tensorrt/Dockerfile.base"
context = "."
contexts = {
deps = "target:deps",
}
inherits = ["_build_args"]
}
target "tensorrt" {
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
context = "."
contexts = {
wget = "target:wget",
wheels = "target:wheels",
deps = "target:deps",
rootfs = "target:rootfs"
}
target = "frigate-tensorrt"
inherits = ["_build_args"]
}
target "devcontainer-trt" {
dockerfile = "docker/tensorrt/Dockerfile.amd64"
context = "."
contexts = {
wheels = "target:wheels",
trt-deps = "target:trt-deps",
devcontainer = "target:devcontainer"
}
platforms = ["linux/amd64"]
target = "devcontainer-trt"
}
variable "ARCH" {
default = "amd64"
}
variable "BASE_IMAGE" {
default = null
}
variable "SLIM_BASE" {
default = null
}
variable "TRT_BASE" {
default = null
}
variable "COMPUTE_LEVEL" {
default = ""
}
variable "BASE_HOOK" {
# Ensure an up-to-date python 3.11 is available in jetson images
default = <<EOT
if grep -iq "ubuntu" /etc/os-release; then
. /etc/os-release
# Add the deadsnakes PPA repository
echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
# Add deadsnakes signing key
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
fi
EOT
}
target "_build_args" {
args = {
BASE_IMAGE = BASE_IMAGE,
SLIM_BASE = SLIM_BASE,
TRT_BASE = TRT_BASE,
COMPUTE_LEVEL = COMPUTE_LEVEL,
BASE_HOOK = BASE_HOOK
}
platforms = ["linux/${ARCH}"]
}
target wget {
dockerfile = "docker/main/Dockerfile"
target = "wget"
inherits = ["_build_args"]
}
target deps {
dockerfile = "docker/main/Dockerfile"
target = "deps"
inherits = ["_build_args"]
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
target = "rootfs"
inherits = ["_build_args"]
}
target wheels {
dockerfile = "docker/main/Dockerfile"
target = "wheels"
inherits = ["_build_args"]
}
target devcontainer {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "devcontainer"
}
target "trt-deps" {
dockerfile = "docker/tensorrt/Dockerfile.base"
context = "."
contexts = {
deps = "target:deps",
}
inherits = ["_build_args"]
}
target "tensorrt" {
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
context = "."
contexts = {
wget = "target:wget",
wheels = "target:wheels",
deps = "target:deps",
rootfs = "target:rootfs"
}
target = "frigate-tensorrt"
inherits = ["_build_args"]
}
target "devcontainer-trt" {
dockerfile = "docker/tensorrt/Dockerfile.amd64"
context = "."
contexts = {
wheels = "target:wheels",
trt-deps = "target:trt-deps",
devcontainer = "target:devcontainer"
}
platforms = ["linux/amd64"]
target = "devcontainer-trt"
}

File diff suppressed because it is too large Load Diff

View File

@ -1,458 +1,468 @@
"""Recording APIs."""
import datetime as dt
import logging
from datetime import datetime, timedelta
from functools import reduce
from pathlib import Path
from typing import List
from urllib.parse import unquote
from fastapi import APIRouter, Depends, Request
from fastapi import Path as PathParam
from fastapi.responses import JSONResponse
from peewee import fn, operator
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
)
from frigate.api.defs.query.recordings_query_parameters import (
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams,
RecordingsDeleteQueryParams,
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.recordings])
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
][RECORD_DIR]
if not recording_stats:
return JSONResponse({})
total_mb = recording_stats["total"]
camera_usages: dict[str, dict] = (
request.app.storage_maintainer.calculate_camera_usages()
)
for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"):
camera_usages[camera_name]["usage_percent"] = (
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
) * 100
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
period_query = (
Recordings.select(day_expr.alias("day_idx"))
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.distinct()
.namedtuples()
)
for g in period_query:
day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
days[day_str] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get(
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
async def recordings(
camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(),
):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.motion_heatmap,
Recordings.duration,
)
.where(
Recordings.camera == camera_name,
Recordings.end_time >= after,
Recordings.start_time <= before,
)
.order_by(Recordings.start_time)
.dicts()
.iterator()
)
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.delete(
"/recordings/start/{start}/end/{end}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete recordings",
description="""Deletes recordings within the specified time range.
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
""",
)
async def delete_recordings(
start: float = PathParam(..., description="Start timestamp (unix)"),
end: float = PathParam(..., description="End timestamp (unix)"),
params: RecordingsDeleteQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Delete recordings in the specified time range."""
if start >= end:
return JSONResponse(
content={
"success": False,
"message": "Start time must be less than end time.",
},
status_code=400,
)
cameras = params.cameras
if cameras != "all":
requested = set(cameras.split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(
content={
"success": False,
"message": "No valid cameras found in the request.",
},
status_code=400,
)
camera_list = list(filtered)
else:
camera_list = allowed_cameras
# Parse keep parameter
keep_set = set()
if params.keep:
keep_set = set(params.keep.split(","))
# Build query to find overlapping recordings
clauses = [
(
Recordings.start_time.between(start, end)
| Recordings.end_time.between(start, end)
| ((start > Recordings.start_time) & (end < Recordings.end_time))
),
(Recordings.camera << camera_list),
]
keep_clauses = []
if "motion" in keep_set:
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
if "object" in keep_set:
keep_clauses.append(
Recordings.objects.is_null(False) & (Recordings.objects > 0)
)
if "audio" in keep_set:
keep_clauses.append(Recordings.dBFS.is_null(False))
if keep_clauses:
keep_condition = reduce(operator.or_, keep_clauses)
clauses.append(~keep_condition)
recordings_to_delete = (
Recordings.select(Recordings.id, Recordings.path)
.where(reduce(operator.and_, clauses))
.dicts()
.iterator()
)
recording_ids = []
deleted_count = 0
error_count = 0
for recording in recordings_to_delete:
recording_ids.append(recording["id"])
try:
Path(recording["path"]).unlink(missing_ok=True)
deleted_count += 1
except Exception as e:
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
error_count += 1
if recording_ids:
max_deletes = 100000
recording_ids_list = list(recording_ids)
for i in range(0, len(recording_ids_list), max_deletes):
Recordings.delete().where(
Recordings.id << recording_ids_list[i : i + max_deletes]
).execute()
message = f"Successfully deleted {deleted_count} recording(s)."
if error_count > 0:
message += f" {error_count} file deletion error(s) occurred."
return JSONResponse(
content={"success": True, "message": message},
status_code=200,
)
"""Recording APIs."""
import datetime as dt
import logging
from datetime import datetime, timedelta
from functools import reduce
from pathlib import Path
from typing import List
from urllib.parse import unquote
from fastapi import APIRouter, Depends, Request
from fastapi import Path as PathParam
from fastapi.responses import JSONResponse
from peewee import fn, operator
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
)
from frigate.api.defs.query.recordings_query_parameters import (
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams,
RecordingsDeleteQueryParams,
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.recordings])
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
][RECORD_DIR]
if not recording_stats:
return JSONResponse({})
total_mb = recording_stats["total"]
camera_usages: dict[str, dict] = (
request.app.storage_maintainer.calculate_camera_usages()
)
for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"):
camera_usages[camera_name]["usage_percent"] = (
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
) * 100
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
period_query = (
Recordings.select(day_expr.alias("day_idx"))
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.distinct()
.namedtuples()
)
for g in period_query:
day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
days[day_str] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get(
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
async def recordings(
camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(),
variant: str = "main",
):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
query = (
Recordings.select(
Recordings.id,
Recordings.camera,
Recordings.start_time,
Recordings.end_time,
Recordings.path,
Recordings.variant,
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.motion_heatmap,
Recordings.duration,
Recordings.codec_name,
Recordings.width,
Recordings.height,
Recordings.bitrate,
)
.where(
Recordings.camera == camera_name,
Recordings.end_time >= after,
Recordings.start_time <= before,
)
)
if variant != "all":
query = query.where(Recordings.variant == variant)
recordings = query.order_by(Recordings.start_time).dicts().iterator()
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.delete(
"/recordings/start/{start}/end/{end}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete recordings",
description="""Deletes recordings within the specified time range.
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
""",
)
async def delete_recordings(
start: float = PathParam(..., description="Start timestamp (unix)"),
end: float = PathParam(..., description="End timestamp (unix)"),
params: RecordingsDeleteQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Delete recordings in the specified time range."""
if start >= end:
return JSONResponse(
content={
"success": False,
"message": "Start time must be less than end time.",
},
status_code=400,
)
cameras = params.cameras
if cameras != "all":
requested = set(cameras.split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(
content={
"success": False,
"message": "No valid cameras found in the request.",
},
status_code=400,
)
camera_list = list(filtered)
else:
camera_list = allowed_cameras
# Parse keep parameter
keep_set = set()
if params.keep:
keep_set = set(params.keep.split(","))
# Build query to find overlapping recordings
clauses = [
(
Recordings.start_time.between(start, end)
| Recordings.end_time.between(start, end)
| ((start > Recordings.start_time) & (end < Recordings.end_time))
),
(Recordings.camera << camera_list),
]
keep_clauses = []
if "motion" in keep_set:
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
if "object" in keep_set:
keep_clauses.append(
Recordings.objects.is_null(False) & (Recordings.objects > 0)
)
if "audio" in keep_set:
keep_clauses.append(Recordings.dBFS.is_null(False))
if keep_clauses:
keep_condition = reduce(operator.or_, keep_clauses)
clauses.append(~keep_condition)
recordings_to_delete = (
Recordings.select(Recordings.id, Recordings.path)
.where(reduce(operator.and_, clauses))
.dicts()
.iterator()
)
recording_ids = []
deleted_count = 0
error_count = 0
for recording in recordings_to_delete:
recording_ids.append(recording["id"])
try:
Path(recording["path"]).unlink(missing_ok=True)
deleted_count += 1
except Exception as e:
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
error_count += 1
if recording_ids:
max_deletes = 100000
recording_ids_list = list(recording_ids)
for i in range(0, len(recording_ids_list), max_deletes):
Recordings.delete().where(
Recordings.id << recording_ids_list[i : i + max_deletes]
).execute()
message = f"Successfully deleted {deleted_count} recording(s)."
if error_count > 0:
message += f" {error_count} file deletion error(s) occurred."
return JSONResponse(
content={"success": True, "message": message},
status_code=200,
)

View File

@ -1,337 +1,346 @@
import os
from enum import Enum
from typing import Optional
from pydantic import Field, PrivateAttr, model_validator
from frigate.const import CACHE_DIR, CACHE_SEGMENT_FORMAT, REGEX_CAMERA_NAME
from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_decode,
parse_preset_hardware_acceleration_scale,
parse_preset_input,
parse_preset_output_record,
)
from frigate.util.builtin import (
escape_special_characters,
generate_color_palette,
get_ffmpeg_arg_list,
)
from ..base import FrigateBaseModel
from ..classification import (
CameraAudioTranscriptionConfig,
CameraFaceRecognitionConfig,
CameraLicensePlateRecognitionConfig,
CameraSemanticSearchConfig,
)
from .audio import AudioConfig
from .birdseye import BirdseyeCameraConfig
from .detect import DetectConfig
from .ffmpeg import CameraFfmpegConfig, CameraInput
from .live import CameraLiveConfig
from .motion import MotionConfig
from .mqtt import CameraMqttConfig
from .notification import NotificationConfig
from .objects import ObjectConfig
from .onvif import OnvifConfig
from .record import RecordConfig
from .review import ReviewConfig
from .snapshots import SnapshotsConfig
from .timestamp import TimestampStyleConfig
from .ui import CameraUiConfig
from .zone import ZoneConfig
__all__ = ["CameraConfig"]
class CameraTypeEnum(str, Enum):
generic = "generic"
lpr = "lpr"
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(
None,
title="Camera name",
description="Camera name is required",
pattern=REGEX_CAMERA_NAME,
)
friendly_name: Optional[str] = Field(
None,
title="Friendly name",
description="Camera friendly name used in the Frigate UI",
)
@model_validator(mode="before")
@classmethod
def handle_friendly_name(cls, values):
if isinstance(values, dict) and "friendly_name" in values:
pass
return values
enabled: bool = Field(default=True, title="Enabled", description="Enabled")
# Options with global fallback
audio: AudioConfig = Field(
default_factory=AudioConfig,
title="Audio events",
description="Settings for audio-based event detection for this camera.",
)
audio_transcription: CameraAudioTranscriptionConfig = Field(
default_factory=CameraAudioTranscriptionConfig,
title="Audio transcription",
description="Settings for live and speech audio transcription used for events and live captions.",
)
birdseye: BirdseyeCameraConfig = Field(
default_factory=BirdseyeCameraConfig,
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
)
detect: DetectConfig = Field(
default_factory=DetectConfig,
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
)
face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig,
title="Face recognition",
description="Settings for face detection and recognition for this camera.",
)
ffmpeg: CameraFfmpegConfig = Field(
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig,
title="Live playback",
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
)
lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig,
title="License Plate Recognition",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
)
motion: MotionConfig = Field(
None,
title="Motion detection",
description="Default motion detection settings for this camera.",
)
objects: ObjectConfig = Field(
default_factory=ObjectConfig,
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
)
record: RecordConfig = Field(
default_factory=RecordConfig,
title="Recording",
description="Recording and retention settings for this camera.",
)
review: ReviewConfig = Field(
default_factory=ReviewConfig,
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
)
semantic_search: CameraSemanticSearchConfig = Field(
default_factory=CameraSemanticSearchConfig,
title="Semantic Search",
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig,
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects for this camera.",
)
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig,
title="Timestamp style",
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
)
# Options without global fallback
best_image_timeout: int = Field(
default=60,
title="Best image timeout",
description="How long to wait for the image with the highest confidence score.",
)
mqtt: CameraMqttConfig = Field(
default_factory=CameraMqttConfig,
title="MQTT",
description="MQTT image publishing settings.",
)
notifications: NotificationConfig = Field(
default_factory=NotificationConfig,
title="Notifications",
description="Settings to enable and control notifications for this camera.",
)
onvif: OnvifConfig = Field(
default_factory=OnvifConfig,
title="ONVIF",
description="ONVIF connection and PTZ autotracking settings for this camera.",
)
type: CameraTypeEnum = Field(
default=CameraTypeEnum.generic,
title="Camera type",
description="Camera Type",
)
ui: CameraUiConfig = Field(
default_factory=CameraUiConfig,
title="Camera UI",
description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
)
webui_url: Optional[str] = Field(
None,
title="Camera URL",
description="URL to visit the camera directly from system page",
)
zones: dict[str, ZoneConfig] = Field(
default_factory=dict,
title="Zones",
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original camera state",
description="Keep track of original state of camera.",
)
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
def __init__(self, **config):
# Set zone colors
if "zones" in config:
colors = generate_color_palette(len(config["zones"]))
config["zones"] = {
name: {**z, "color": color}
for (name, z), color in zip(config["zones"].items(), colors)
}
# add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1:
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
config["ffmpeg"]["inputs"][0]["roles"] = [
"record",
"detect",
]
if has_audio:
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
super().__init__(**config)
@property
def frame_shape(self) -> tuple[int, int]:
return self.detect.height, self.detect.width
@property
def frame_shape_yuv(self) -> tuple[int, int]:
return self.detect.height * 3 // 2, self.detect.width
@property
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
return self._ffmpeg_cmds
def get_formatted_name(self) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
if self.friendly_name:
return self.friendly_name
return self.name.replace("_", " ").title() if self.name else ""
def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self:
return
self._build_ffmpeg_cmds()
def recreate_ffmpeg_cmds(self):
"""Force regeneration of ffmpeg commands from current config."""
self._build_ffmpeg_cmds()
def _build_ffmpeg_cmds(self):
"""Build ffmpeg commands from the current ffmpeg config."""
ffmpeg_cmds = []
for ffmpeg_input in self.ffmpeg.inputs:
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
if ffmpeg_cmd is None:
continue
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
self._ffmpeg_cmds = ffmpeg_cmds
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
ffmpeg_output_args = []
if "detect" in ffmpeg_input.roles:
detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
scale_detect_args = parse_preset_hardware_acceleration_scale(
ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
detect_args,
self.detect.fps,
self.detect.width,
self.detect.height,
)
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(
self.ffmpeg.output_args.record,
self.ffmpeg.apple_compatibility,
)
or self.ffmpeg.output_args.record
)
ffmpeg_output_args = (
record_args
+ [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"]
+ ffmpeg_output_args
)
# if there aren't any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None
global_args = get_ffmpeg_arg_list(
ffmpeg_input.global_args or self.ffmpeg.global_args
)
camera_arg = (
self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
)
hwaccel_args = get_ffmpeg_arg_list(
parse_preset_hardware_acceleration_decode(
ffmpeg_input.hwaccel_args,
self.detect.fps,
self.detect.width,
self.detect.height,
self.ffmpeg.gpu,
)
or ffmpeg_input.hwaccel_args
or parse_preset_hardware_acceleration_decode(
camera_arg,
self.detect.fps,
self.detect.width,
self.detect.height,
self.ffmpeg.gpu,
)
or camera_arg
or []
)
input_args = get_ffmpeg_arg_list(
parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
or ffmpeg_input.input_args
or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
or self.ffmpeg.input_args
)
cmd = (
[self.ffmpeg.ffmpeg_path]
+ global_args
+ (hwaccel_args if "detect" in ffmpeg_input.roles else [])
+ input_args
+ ["-i", escape_special_characters(ffmpeg_input.path)]
+ ffmpeg_output_args
)
return [part for part in cmd if part != ""]
import os
from enum import Enum
from typing import Optional
from pydantic import Field, PrivateAttr, model_validator
from frigate.const import CACHE_DIR, CACHE_SEGMENT_FORMAT, REGEX_CAMERA_NAME
from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_decode,
parse_preset_hardware_acceleration_scale,
parse_preset_input,
parse_preset_output_record,
)
from frigate.util.builtin import (
escape_special_characters,
generate_color_palette,
get_ffmpeg_arg_list,
)
from ..base import FrigateBaseModel
from ..classification import (
CameraAudioTranscriptionConfig,
CameraFaceRecognitionConfig,
CameraLicensePlateRecognitionConfig,
CameraSemanticSearchConfig,
)
from .audio import AudioConfig
from .birdseye import BirdseyeCameraConfig
from .detect import DetectConfig
from .ffmpeg import CameraFfmpegConfig, CameraInput
from .live import CameraLiveConfig
from .motion import MotionConfig
from .mqtt import CameraMqttConfig
from .notification import NotificationConfig
from .objects import ObjectConfig
from .onvif import OnvifConfig
from .record import RecordConfig
from .review import ReviewConfig
from .snapshots import SnapshotsConfig
from .timestamp import TimestampStyleConfig
from .ui import CameraUiConfig
from .zone import ZoneConfig
__all__ = ["CameraConfig"]
class CameraTypeEnum(str, Enum):
generic = "generic"
lpr = "lpr"
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(
None,
title="Camera name",
description="Camera name is required",
pattern=REGEX_CAMERA_NAME,
)
friendly_name: Optional[str] = Field(
None,
title="Friendly name",
description="Camera friendly name used in the Frigate UI",
)
@model_validator(mode="before")
@classmethod
def handle_friendly_name(cls, values):
if isinstance(values, dict) and "friendly_name" in values:
pass
return values
enabled: bool = Field(default=True, title="Enabled", description="Enabled")
# Options with global fallback
audio: AudioConfig = Field(
default_factory=AudioConfig,
title="Audio events",
description="Settings for audio-based event detection for this camera.",
)
audio_transcription: CameraAudioTranscriptionConfig = Field(
default_factory=CameraAudioTranscriptionConfig,
title="Audio transcription",
description="Settings for live and speech audio transcription used for events and live captions.",
)
birdseye: BirdseyeCameraConfig = Field(
default_factory=BirdseyeCameraConfig,
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
)
detect: DetectConfig = Field(
default_factory=DetectConfig,
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
)
face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig,
title="Face recognition",
description="Settings for face detection and recognition for this camera.",
)
ffmpeg: CameraFfmpegConfig = Field(
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig,
title="Live playback",
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
)
lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig,
title="License Plate Recognition",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
)
motion: MotionConfig = Field(
None,
title="Motion detection",
description="Default motion detection settings for this camera.",
)
objects: ObjectConfig = Field(
default_factory=ObjectConfig,
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
)
record: RecordConfig = Field(
default_factory=RecordConfig,
title="Recording",
description="Recording and retention settings for this camera.",
)
review: ReviewConfig = Field(
default_factory=ReviewConfig,
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
)
semantic_search: CameraSemanticSearchConfig = Field(
default_factory=CameraSemanticSearchConfig,
title="Semantic Search",
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig,
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects for this camera.",
)
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig,
title="Timestamp style",
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
)
# Options without global fallback
best_image_timeout: int = Field(
default=60,
title="Best image timeout",
description="How long to wait for the image with the highest confidence score.",
)
mqtt: CameraMqttConfig = Field(
default_factory=CameraMqttConfig,
title="MQTT",
description="MQTT image publishing settings.",
)
notifications: NotificationConfig = Field(
default_factory=NotificationConfig,
title="Notifications",
description="Settings to enable and control notifications for this camera.",
)
onvif: OnvifConfig = Field(
default_factory=OnvifConfig,
title="ONVIF",
description="ONVIF connection and PTZ autotracking settings for this camera.",
)
type: CameraTypeEnum = Field(
default=CameraTypeEnum.generic,
title="Camera type",
description="Camera Type",
)
ui: CameraUiConfig = Field(
default_factory=CameraUiConfig,
title="Camera UI",
description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
)
webui_url: Optional[str] = Field(
None,
title="Camera URL",
description="URL to visit the camera directly from system page",
)
zones: dict[str, ZoneConfig] = Field(
default_factory=dict,
title="Zones",
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
)
enabled_in_config: Optional[bool] = Field(
default=None,
title="Original camera state",
description="Keep track of original state of camera.",
)
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
def __init__(self, **config):
# Set zone colors
if "zones" in config:
colors = generate_color_palette(len(config["zones"]))
config["zones"] = {
name: {**z, "color": color}
for (name, z), color in zip(config["zones"].items(), colors)
}
# add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1:
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
config["ffmpeg"]["inputs"][0]["roles"] = [
"record",
"detect",
]
if has_audio:
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
super().__init__(**config)
@property
def frame_shape(self) -> tuple[int, int]:
return self.detect.height, self.detect.width
@property
def frame_shape_yuv(self) -> tuple[int, int]:
return self.detect.height * 3 // 2, self.detect.width
@property
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
return self._ffmpeg_cmds
def get_formatted_name(self) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
if self.friendly_name:
return self.friendly_name
return self.name.replace("_", " ").title() if self.name else ""
def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self:
return
self._build_ffmpeg_cmds()
def recreate_ffmpeg_cmds(self):
"""Force regeneration of ffmpeg commands from current config."""
self._build_ffmpeg_cmds()
def _build_ffmpeg_cmds(self):
"""Build ffmpeg commands from the current ffmpeg config."""
ffmpeg_cmds = []
for ffmpeg_input in self.ffmpeg.inputs:
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
if ffmpeg_cmd is None:
continue
ffmpeg_cmds.append(
{
"roles": ffmpeg_input.roles,
"cmd": ffmpeg_cmd,
"record_variant": ffmpeg_input.record_variant,
}
)
self._ffmpeg_cmds = ffmpeg_cmds
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
ffmpeg_output_args = []
if "detect" in ffmpeg_input.roles:
detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
scale_detect_args = parse_preset_hardware_acceleration_scale(
ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
detect_args,
self.detect.fps,
self.detect.width,
self.detect.height,
)
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(
self.ffmpeg.output_args.record,
self.ffmpeg.apple_compatibility,
)
or self.ffmpeg.output_args.record
)
record_variant = ffmpeg_input.record_variant or "main"
cache_prefix = os.path.join(CACHE_DIR, self.name)
cache_path = f"{cache_prefix}@{record_variant}@{CACHE_SEGMENT_FORMAT}.mp4"
ffmpeg_output_args = (
record_args
+ [cache_path]
+ ffmpeg_output_args
)
# if there aren't any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None
global_args = get_ffmpeg_arg_list(
ffmpeg_input.global_args or self.ffmpeg.global_args
)
camera_arg = (
self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
)
hwaccel_args = get_ffmpeg_arg_list(
parse_preset_hardware_acceleration_decode(
ffmpeg_input.hwaccel_args,
self.detect.fps,
self.detect.width,
self.detect.height,
self.ffmpeg.gpu,
)
or ffmpeg_input.hwaccel_args
or parse_preset_hardware_acceleration_decode(
camera_arg,
self.detect.fps,
self.detect.width,
self.detect.height,
self.ffmpeg.gpu,
)
or camera_arg
or []
)
input_args = get_ffmpeg_arg_list(
parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
or ffmpeg_input.input_args
or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
or self.ffmpeg.input_args
)
cmd = (
[self.ffmpeg.ffmpeg_path]
+ global_args
+ (hwaccel_args if "detect" in ffmpeg_input.roles else [])
+ input_args
+ ["-i", escape_special_characters(ffmpeg_input.path)]
+ ffmpeg_output_args
)
return [part for part in cmd if part != ""]

View File

@ -1,159 +1,192 @@
from enum import Enum
from typing import Union
from pydantic import Field, field_validator
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
from ..base import FrigateBaseModel
from ..env import EnvString
__all__ = [
"CameraFfmpegConfig",
"CameraInput",
"CameraRoleEnum",
"FfmpegConfig",
"FfmpegOutputArgsConfig",
]
# Note: Setting threads to less than 2 caused several issues with recording segments
# https://github.com/blakeblackshear/frigate/issues/5659
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-threads",
"2",
"-f",
"rawvideo",
"-pix_fmt",
"yuv420p",
]
class FfmpegOutputArgsConfig(FrigateBaseModel):
detect: Union[str, list[str]] = Field(
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Detect output arguments",
description="Default output arguments for detect role streams.",
)
record: Union[str, list[str]] = Field(
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record output arguments",
description="Default output arguments for record role streams.",
)
class FfmpegConfig(FrigateBaseModel):
path: str = Field(
default="default",
title="FFmpeg path",
description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
)
global_args: Union[str, list[str]] = Field(
default=FFMPEG_GLOBAL_ARGS_DEFAULT,
title="FFmpeg global arguments",
description="Global arguments passed to FFmpeg processes.",
)
hwaccel_args: Union[str, list[str]] = Field(
default="auto",
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
)
input_args: Union[str, list[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT,
title="Input arguments",
description="Input arguments applied to FFmpeg input streams.",
)
output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig,
title="Output arguments",
description="Default output arguments used for different FFmpeg roles such as detect and record.",
)
retry_interval: float = Field(
default=10.0,
title="FFmpeg retry time",
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
gt=0.0,
)
apple_compatibility: bool = Field(
default=False,
title="Apple compatibility",
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
)
gpu: int = Field(
default=0,
title="GPU index",
description="Default GPU index used for hardware acceleration if available.",
)
@property
def ffmpeg_path(self) -> str:
if self.path == "default":
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
else:
return f"{self.path}/bin/ffmpeg"
@property
def ffprobe_path(self) -> str:
if self.path == "default":
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
else:
return f"{self.path}/bin/ffprobe"
class CameraRoleEnum(str, Enum):
audio = "audio"
record = "record"
detect = "detect"
class CameraInput(FrigateBaseModel):
path: EnvString = Field(
title="Input path",
description="Camera input stream URL or path.",
)
roles: list[CameraRoleEnum] = Field(
title="Input roles",
description="Roles for this input stream.",
)
global_args: Union[str, list[str]] = Field(
default_factory=list,
title="FFmpeg global arguments",
description="FFmpeg global arguments for this input stream.",
)
hwaccel_args: Union[str, list[str]] = Field(
default_factory=list,
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for this input stream.",
)
input_args: Union[str, list[str]] = Field(
default_factory=list,
title="Input arguments",
description="Input arguments specific to this stream.",
)
class CameraFfmpegConfig(FfmpegConfig):
inputs: list[CameraInput] = Field(
title="Camera inputs",
description="List of input stream definitions (paths and roles) for this camera.",
)
@field_validator("inputs")
@classmethod
def validate_roles(cls, v):
roles = [role for input in v for role in input.roles]
if len(roles) != len(set(roles)):
raise ValueError("Each input role may only be used once.")
if "detect" not in roles:
raise ValueError("The detect role is required.")
return v
from enum import Enum
from typing import Union
from pydantic import Field, field_validator, model_validator
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, REGEX_CAMERA_NAME
from ..base import FrigateBaseModel
from ..env import EnvString
__all__ = [
"CameraFfmpegConfig",
"CameraInput",
"CameraRoleEnum",
"FfmpegConfig",
"FfmpegOutputArgsConfig",
]
# Note: Setting threads to less than 2 caused several issues with recording segments
# https://github.com/blakeblackshear/frigate/issues/5659
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-threads",
"2",
"-f",
"rawvideo",
"-pix_fmt",
"yuv420p",
]
class FfmpegOutputArgsConfig(FrigateBaseModel):
detect: Union[str, list[str]] = Field(
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Detect output arguments",
description="Default output arguments for detect role streams.",
)
record: Union[str, list[str]] = Field(
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record output arguments",
description="Default output arguments for record role streams.",
)
class FfmpegConfig(FrigateBaseModel):
path: str = Field(
default="default",
title="FFmpeg path",
description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
)
global_args: Union[str, list[str]] = Field(
default=FFMPEG_GLOBAL_ARGS_DEFAULT,
title="FFmpeg global arguments",
description="Global arguments passed to FFmpeg processes.",
)
hwaccel_args: Union[str, list[str]] = Field(
default="auto",
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
)
input_args: Union[str, list[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT,
title="Input arguments",
description="Input arguments applied to FFmpeg input streams.",
)
output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig,
title="Output arguments",
description="Default output arguments used for different FFmpeg roles such as detect and record.",
)
retry_interval: float = Field(
default=10.0,
title="FFmpeg retry time",
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
gt=0.0,
)
apple_compatibility: bool = Field(
default=False,
title="Apple compatibility",
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
)
gpu: int = Field(
default=0,
title="GPU index",
description="Default GPU index used for hardware acceleration if available.",
)
@property
def ffmpeg_path(self) -> str:
if self.path == "default":
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
else:
return f"{self.path}/bin/ffmpeg"
@property
def ffprobe_path(self) -> str:
if self.path == "default":
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
else:
return f"{self.path}/bin/ffprobe"
class CameraRoleEnum(str, Enum):
audio = "audio"
record = "record"
detect = "detect"
class CameraInput(FrigateBaseModel):
path: EnvString = Field(
title="Input path",
description="Camera input stream URL or path.",
)
roles: list[CameraRoleEnum] = Field(
title="Input roles",
description="Roles for this input stream.",
)
global_args: Union[str, list[str]] = Field(
default_factory=list,
title="FFmpeg global arguments",
description="FFmpeg global arguments for this input stream.",
)
hwaccel_args: Union[str, list[str]] = Field(
default_factory=list,
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for this input stream.",
)
input_args: Union[str, list[str]] = Field(
default_factory=list,
title="Input arguments",
description="Input arguments specific to this stream.",
)
record_variant: str | None = Field(
default=None,
title="Recording variant",
description="Optional recording variant label for record role inputs such as main or sub.",
pattern=REGEX_CAMERA_NAME,
)
@model_validator(mode="after")
def validate_record_variant(self):
if CameraRoleEnum.record in self.roles:
if not self.record_variant:
self.record_variant = "main"
else:
self.record_variant = None
return self
class CameraFfmpegConfig(FfmpegConfig):
inputs: list[CameraInput] = Field(
title="Camera inputs",
description="List of input stream definitions (paths and roles) for this camera.",
)
@field_validator("inputs")
@classmethod
def validate_roles(cls, v):
detect_inputs = 0
audio_inputs = 0
record_variants: set[str] = set()
for camera_input in v:
if CameraRoleEnum.detect in camera_input.roles:
detect_inputs += 1
if CameraRoleEnum.audio in camera_input.roles:
audio_inputs += 1
if CameraRoleEnum.record in camera_input.roles:
record_variant = camera_input.record_variant or "main"
if record_variant in record_variants:
raise ValueError(
f"Record variant '{record_variant}' may only be used once."
)
record_variants.add(record_variant)
if detect_inputs != 1:
raise ValueError("The detect role is required.")
if audio_inputs > 1:
raise ValueError("Each input role may only be used once.")
return v

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
"""Configuration for the VOD transcode proxy (optional playback transcoding)."""
from pydantic import Field
from .base import FrigateBaseModel
__all__ = ["TranscodeProxyConfig"]
class TranscodeProxyConfig(FrigateBaseModel):
"""Settings for the optional transcode proxy used for recording playback."""
enabled: bool = Field(
default=False,
title="Transcode proxy enabled",
description="When enabled, the UI uses the transcode proxy URL for VOD playback so recordings are transcoded to H.264 on the fly (e.g. for HEVC compatibility or lower bitrate).",
)
vod_proxy_url: str = Field(
default="",
title="VOD proxy base URL",
description="Base URL for the transcode proxy (e.g. http://host:5010). When enabled, recording playback requests go to this URL + /vod/... Leave empty if the proxy is mounted at the same host (e.g. /vod-transcoded/ under the same origin).",
)

View File

@ -1,179 +1,184 @@
from peewee import (
BlobField,
BooleanField,
CharField,
CompositeKey,
DateTimeField,
FloatField,
ForeignKeyField,
IntegerField,
Model,
TextField,
)
from playhouse.sqlite_ext import JSONField
class Event(Model):
id = CharField(null=False, primary_key=True, max_length=30)
label = CharField(index=True, max_length=20)
sub_label = CharField(max_length=100, null=True)
camera = CharField(index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
top_score = (
FloatField()
) # TODO remove when columns can be dropped without rebuilding table
score = (
FloatField()
) # TODO remove when columns can be dropped without rebuilding table
false_positive = BooleanField()
zones = JSONField()
thumbnail = TextField()
has_clip = BooleanField(default=True)
has_snapshot = BooleanField(default=True)
region = (
JSONField()
) # TODO remove when columns can be dropped without rebuilding table
box = (
JSONField()
) # TODO remove when columns can be dropped without rebuilding table
area = (
IntegerField()
) # TODO remove when columns can be dropped without rebuilding table
retain_indefinitely = BooleanField(default=False)
ratio = FloatField(
default=1.0
) # TODO remove when columns can be dropped without rebuilding table
plus_id = CharField(max_length=30)
model_hash = CharField(max_length=32)
detector_type = CharField(max_length=32)
model_type = CharField(max_length=32)
data = JSONField() # ex: tracked object box, region, etc.
class Timeline(Model):
timestamp = DateTimeField()
camera = CharField(index=True, max_length=20)
source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
source_id = CharField(index=True, max_length=30)
class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
data = JSONField() # ex: tracked object id, region, box, etc.
class Regions(Model):
camera = CharField(null=False, primary_key=True, max_length=20)
grid = JSONField() # json blob of grid
last_update = DateTimeField()
class Recordings(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
motion = IntegerField(null=True)
objects = IntegerField(null=True)
dBFS = IntegerField(null=True)
segment_size = FloatField(default=0) # this should be stored as MB
regions = IntegerField(null=True)
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
class ExportCase(Model):
id = CharField(null=False, primary_key=True, max_length=30)
name = CharField(index=True, max_length=100)
description = TextField(null=True)
created_at = DateTimeField()
updated_at = DateTimeField()
class Export(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
name = CharField(index=True, max_length=100)
date = DateTimeField()
video_path = CharField(unique=True)
thumb_path = CharField(unique=True)
in_progress = BooleanField()
export_case = ForeignKeyField(
ExportCase,
null=True,
backref="exports",
column_name="export_case_id",
)
class ReviewSegment(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
severity = CharField(max_length=30) # alert, detection
thumb_path = CharField(unique=True)
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
class UserReviewStatus(Model):
user_id = CharField(max_length=30)
review_segment = ForeignKeyField(ReviewSegment, backref="user_reviews")
has_been_reviewed = BooleanField(default=False)
class Meta:
indexes = ((("user_id", "review_segment"), True),)
class Previews(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
# Used for temporary table in record/cleanup.py
class RecordingsToDelete(Model):
id = CharField(null=False, primary_key=False, max_length=30)
class Meta:
temporary = True
class User(Model):
username = CharField(null=False, primary_key=True, max_length=30)
role = CharField(
max_length=20,
default="admin",
)
password_hash = CharField(null=False, max_length=120)
password_changed_at = DateTimeField(null=True)
notification_tokens = JSONField()
@classmethod
def get_allowed_cameras(
cls, role: str, roles_dict: dict[str, list[str]], all_camera_names: set[str]
) -> list[str]:
if role not in roles_dict:
return [] # Invalid role grants no access
allowed = roles_dict[role]
if not allowed: # Empty list means all cameras
return list(all_camera_names)
return [cam for cam in allowed if cam in all_camera_names]
class Trigger(Model):
camera = CharField(max_length=20)
name = CharField()
type = CharField(max_length=10)
data = TextField()
threshold = FloatField()
model = CharField(max_length=30)
embedding = BlobField()
triggering_event_id = CharField(max_length=30)
last_triggered = DateTimeField()
class Meta:
primary_key = CompositeKey("camera", "name")
from peewee import (
BlobField,
BooleanField,
CharField,
CompositeKey,
DateTimeField,
FloatField,
ForeignKeyField,
IntegerField,
Model,
TextField,
)
from playhouse.sqlite_ext import JSONField
class Event(Model):
id = CharField(null=False, primary_key=True, max_length=30)
label = CharField(index=True, max_length=20)
sub_label = CharField(max_length=100, null=True)
camera = CharField(index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
top_score = (
FloatField()
) # TODO remove when columns can be dropped without rebuilding table
score = (
FloatField()
) # TODO remove when columns can be dropped without rebuilding table
false_positive = BooleanField()
zones = JSONField()
thumbnail = TextField()
has_clip = BooleanField(default=True)
has_snapshot = BooleanField(default=True)
region = (
JSONField()
) # TODO remove when columns can be dropped without rebuilding table
box = (
JSONField()
) # TODO remove when columns can be dropped without rebuilding table
area = (
IntegerField()
) # TODO remove when columns can be dropped without rebuilding table
retain_indefinitely = BooleanField(default=False)
ratio = FloatField(
default=1.0
) # TODO remove when columns can be dropped without rebuilding table
plus_id = CharField(max_length=30)
model_hash = CharField(max_length=32)
detector_type = CharField(max_length=32)
model_type = CharField(max_length=32)
data = JSONField() # ex: tracked object box, region, etc.
class Timeline(Model):
timestamp = DateTimeField()
camera = CharField(index=True, max_length=20)
source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
source_id = CharField(index=True, max_length=30)
class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
data = JSONField() # ex: tracked object id, region, box, etc.
class Regions(Model):
camera = CharField(null=False, primary_key=True, max_length=20)
grid = JSONField() # json blob of grid
last_update = DateTimeField()
class Recordings(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
variant = CharField(default="main", index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
motion = IntegerField(null=True)
objects = IntegerField(null=True)
dBFS = IntegerField(null=True)
segment_size = FloatField(default=0) # this should be stored as MB
codec_name = CharField(null=True, max_length=32)
width = IntegerField(null=True)
height = IntegerField(null=True)
bitrate = IntegerField(null=True)
regions = IntegerField(null=True)
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
class ExportCase(Model):
id = CharField(null=False, primary_key=True, max_length=30)
name = CharField(index=True, max_length=100)
description = TextField(null=True)
created_at = DateTimeField()
updated_at = DateTimeField()
class Export(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
name = CharField(index=True, max_length=100)
date = DateTimeField()
video_path = CharField(unique=True)
thumb_path = CharField(unique=True)
in_progress = BooleanField()
export_case = ForeignKeyField(
ExportCase,
null=True,
backref="exports",
column_name="export_case_id",
)
class ReviewSegment(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
severity = CharField(max_length=30) # alert, detection
thumb_path = CharField(unique=True)
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
class UserReviewStatus(Model):
user_id = CharField(max_length=30)
review_segment = ForeignKeyField(ReviewSegment, backref="user_reviews")
has_been_reviewed = BooleanField(default=False)
class Meta:
indexes = ((("user_id", "review_segment"), True),)
class Previews(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
# Used for temporary table in record/cleanup.py
class RecordingsToDelete(Model):
id = CharField(null=False, primary_key=False, max_length=30)
class Meta:
temporary = True
class User(Model):
username = CharField(null=False, primary_key=True, max_length=30)
role = CharField(
max_length=20,
default="admin",
)
password_hash = CharField(null=False, max_length=120)
password_changed_at = DateTimeField(null=True)
notification_tokens = JSONField()
@classmethod
def get_allowed_cameras(
cls, role: str, roles_dict: dict[str, list[str]], all_camera_names: set[str]
) -> list[str]:
if role not in roles_dict:
return [] # Invalid role grants no access
allowed = roles_dict[role]
if not allowed: # Empty list means all cameras
return list(all_camera_names)
return [cam for cam in allowed if cam in all_camera_names]
class Trigger(Model):
camera = CharField(max_length=20)
name = CharField()
type = CharField(max_length=10)
data = TextField()
threshold = FloatField()
model = CharField(max_length=30)
embedding = BlobField()
triggering_event_id = CharField(max_length=30)
last_triggered = DateTimeField()
class Meta:
primary_key = CompositeKey("camera", "name")

File diff suppressed because it is too large Load Diff

View File

@ -1,405 +1,458 @@
"""Unit tests for recordings/media API endpoints."""
from datetime import datetime, timezone
import pytz
from fastapi import Request
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
from frigate.models import Recordings
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
class TestHttpMedia(BaseTestHttp):
"""Test media API endpoints, particularly recordings with DST handling."""
def setUp(self):
"""Set up test fixtures."""
super().setUp([Recordings])
self.app = super().create_app()
# Mock get_current_user for all tests
async def mock_get_current_user(request: Request):
username = request.headers.get("remote-user")
role = request.headers.get("remote-role")
if not username or not role:
from fastapi.responses import JSONResponse
return JSONResponse(
content={"message": "No authorization headers."}, status_code=401
)
return {"username": username, "role": role}
self.app.dependency_overrides[get_current_user] = mock_get_current_user
async def mock_get_allowed_cameras_for_filter(request: Request):
return ["front_door"]
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
mock_get_allowed_cameras_for_filter
)
def tearDown(self):
"""Clean up after tests."""
self.app.dependency_overrides.clear()
super().tearDown()
def test_recordings_summary_across_dst_spring_forward(self):
"""
Test recordings summary across spring DST transition (spring forward).
In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
"""
tz = pytz.timezone("America/New_York")
# March 9, 2024 at 12:00 PM EST (before DST)
march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
# March 10, 2024 at 12:00 PM EDT (after DST transition)
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
# March 11, 2024 at 12:00 PM EDT (after DST)
march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Insert recordings for each day
Recordings.insert(
id="recording_march_9",
path="/media/recordings/march_9.mp4",
camera="front_door",
start_time=march_9_noon,
end_time=march_9_noon + 3600, # 1 hour recording
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="recording_march_10",
path="/media/recordings/march_10.mp4",
camera="front_door",
start_time=march_10_noon,
end_time=march_10_noon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
Recordings.insert(
id="recording_march_11",
path="/media/recordings/march_11.mp4",
camera="front_door",
start_time=march_11_noon,
end_time=march_11_noon + 3600,
duration=3600,
motion=200,
objects=10,
).execute()
# Test recordings summary with America/New_York timezone
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# Verify we get exactly 3 days
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
# Verify the correct dates are returned (API returns dict with True values)
assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
assert summary["2024-03-09"] is True
assert summary["2024-03-10"] is True
assert summary["2024-03-11"] is True
def test_recordings_summary_across_dst_fall_back(self):
"""
Test recordings summary across fall DST transition (fall back).
In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
"""
tz = pytz.timezone("America/New_York")
# November 2, 2024 at 12:00 PM EDT (before DST transition)
nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
# November 3, 2024 at 12:00 PM EST (after DST transition)
# Need to specify is_dst=False to get the time after fall back
nov_3_noon = tz.localize(
datetime(2024, 11, 3, 12, 0, 0), is_dst=False
).timestamp()
# November 4, 2024 at 12:00 PM EST (after DST)
nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Insert recordings for each day
Recordings.insert(
id="recording_nov_2",
path="/media/recordings/nov_2.mp4",
camera="front_door",
start_time=nov_2_noon,
end_time=nov_2_noon + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="recording_nov_3",
path="/media/recordings/nov_3.mp4",
camera="front_door",
start_time=nov_3_noon,
end_time=nov_3_noon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
Recordings.insert(
id="recording_nov_4",
path="/media/recordings/nov_4.mp4",
camera="front_door",
start_time=nov_4_noon,
end_time=nov_4_noon + 3600,
duration=3600,
motion=200,
objects=10,
).execute()
# Test recordings summary with America/New_York timezone
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# Verify we get exactly 3 days
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
# Verify the correct dates are returned (API returns dict with True values)
assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
assert summary["2024-11-02"] is True
assert summary["2024-11-03"] is True
assert summary["2024-11-04"] is True
def test_recordings_summary_multiple_cameras_across_dst(self):
"""
Test recordings summary with multiple cameras across DST boundary.
"""
tz = pytz.timezone("America/New_York")
# March 9, 2024 at 10:00 AM EST (before DST)
march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
# March 10, 2024 at 3:00 PM EDT (after DST transition)
march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Override allowed cameras for this test to include both
async def mock_get_allowed_cameras_for_filter(_request: Request):
return ["front_door", "back_door"]
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
mock_get_allowed_cameras_for_filter
)
# Insert recordings for front_door on March 9
Recordings.insert(
id="front_march_9",
path="/media/recordings/front_march_9.mp4",
camera="front_door",
start_time=march_9_morning,
end_time=march_9_morning + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
# Insert recordings for back_door on March 10
Recordings.insert(
id="back_march_10",
path="/media/recordings/back_march_10.mp4",
camera="back_door",
start_time=march_10_afternoon,
end_time=march_10_afternoon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
# Test with all cameras
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# Verify we get both days
assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
assert "2024-03-09" in summary
assert "2024-03-10" in summary
assert summary["2024-03-09"] is True
assert summary["2024-03-10"] is True
# Reset dependency override back to default single camera for other tests
async def reset_allowed_cameras(_request: Request):
return ["front_door"]
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
reset_allowed_cameras
)
def test_recordings_summary_at_dst_transition_time(self):
"""
Test recordings that span the exact DST transition time.
"""
tz = pytz.timezone("America/New_York")
# March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
# At 2:00 AM, clocks jump to 3:00 AM
before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
# Recording that spans the transition (1:00 AM to 3:30 AM EDT)
# This is 1.5 hours of actual time but spans the "missing" hour
after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
with AuthTestClient(self.app) as client:
Recordings.insert(
id="recording_during_transition",
path="/media/recordings/transition.mp4",
camera="front_door",
start_time=before_transition,
end_time=after_transition,
duration=after_transition - before_transition,
motion=100,
objects=5,
).execute()
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# The recording should appear on March 10
assert len(summary) == 1
assert "2024-03-10" in summary
assert summary["2024-03-10"] is True
def test_recordings_summary_utc_timezone(self):
"""
Test recordings summary with UTC timezone (no DST).
"""
# Use UTC timestamps directly
march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
with AuthTestClient(self.app) as client:
Recordings.insert(
id="recording_march_9_utc",
path="/media/recordings/march_9_utc.mp4",
camera="front_door",
start_time=march_9_utc,
end_time=march_9_utc + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="recording_march_10_utc",
path="/media/recordings/march_10_utc.mp4",
camera="front_door",
start_time=march_10_utc,
end_time=march_10_utc + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
# Test with UTC timezone
response = client.get(
"/recordings/summary", params={"timezone": "utc", "cameras": "all"}
)
assert response.status_code == 200
summary = response.json()
# Verify we get both days
assert len(summary) == 2
assert "2024-03-09" in summary
assert "2024-03-10" in summary
assert summary["2024-03-09"] is True
assert summary["2024-03-10"] is True
def test_recordings_summary_no_recordings(self):
"""
Test recordings summary when no recordings exist.
"""
with AuthTestClient(self.app) as client:
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
assert len(summary) == 0
def test_recordings_summary_single_camera_filter(self):
"""
Test recordings summary filtered to a single camera.
"""
tz = pytz.timezone("America/New_York")
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Insert recordings for both cameras
Recordings.insert(
id="front_recording",
path="/media/recordings/front.mp4",
camera="front_door",
start_time=march_10_noon,
end_time=march_10_noon + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="back_recording",
path="/media/recordings/back.mp4",
camera="back_door",
start_time=march_10_noon,
end_time=march_10_noon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
# Test with only front_door camera
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "front_door"},
)
assert response.status_code == 200
summary = response.json()
assert len(summary) == 1
assert "2024-03-10" in summary
assert summary["2024-03-10"] is True
"""Unit tests for recordings/media API endpoints."""
from datetime import datetime, timezone
import pytz
from fastapi import Request
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
from frigate.models import Recordings
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
class TestHttpMedia(BaseTestHttp):
"""Test media API endpoints, particularly recordings with DST handling."""
def setUp(self):
"""Set up test fixtures."""
super().setUp([Recordings])
self.app = super().create_app()
# Mock get_current_user for all tests
async def mock_get_current_user(request: Request):
username = request.headers.get("remote-user")
role = request.headers.get("remote-role")
if not username or not role:
from fastapi.responses import JSONResponse
return JSONResponse(
content={"message": "No authorization headers."}, status_code=401
)
return {"username": username, "role": role}
self.app.dependency_overrides[get_current_user] = mock_get_current_user
async def mock_get_allowed_cameras_for_filter(request: Request):
return ["front_door"]
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
mock_get_allowed_cameras_for_filter
)
def tearDown(self):
"""Clean up after tests."""
self.app.dependency_overrides.clear()
super().tearDown()
def test_camera_recordings_variant_filter(self):
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
end_ts = start_ts + 3600
with AuthTestClient(self.app) as client:
Recordings.insert(
id="recording_main",
path="/media/recordings/front/main.mp4",
camera="front_door",
variant="main",
start_time=start_ts,
end_time=end_ts,
duration=3600,
motion=100,
objects=5,
codec_name="h264",
width=1920,
height=1080,
bitrate=4_000_000,
).execute()
Recordings.insert(
id="recording_sub",
path="/media/recordings/front/sub.mp4",
camera="front_door",
variant="sub",
start_time=start_ts,
end_time=end_ts,
duration=3600,
motion=100,
objects=5,
codec_name="h264",
width=640,
height=360,
bitrate=512_000,
).execute()
default_response = client.get(
"/front_door/recordings",
params={"after": start_ts, "before": end_ts},
)
assert default_response.status_code == 200
default_recordings = default_response.json()
assert len(default_recordings) == 1
assert default_recordings[0]["variant"] == "main"
all_response = client.get(
"/front_door/recordings",
params={"after": start_ts, "before": end_ts, "variant": "all"},
)
assert all_response.status_code == 200
variants = {recording["variant"] for recording in all_response.json()}
assert variants == {"main", "sub"}
def test_recordings_summary_across_dst_spring_forward(self):
"""
Test recordings summary across spring DST transition (spring forward).
In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
"""
tz = pytz.timezone("America/New_York")
# March 9, 2024 at 12:00 PM EST (before DST)
march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
# March 10, 2024 at 12:00 PM EDT (after DST transition)
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
# March 11, 2024 at 12:00 PM EDT (after DST)
march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Insert recordings for each day
Recordings.insert(
id="recording_march_9",
path="/media/recordings/march_9.mp4",
camera="front_door",
start_time=march_9_noon,
end_time=march_9_noon + 3600, # 1 hour recording
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="recording_march_10",
path="/media/recordings/march_10.mp4",
camera="front_door",
start_time=march_10_noon,
end_time=march_10_noon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
Recordings.insert(
id="recording_march_11",
path="/media/recordings/march_11.mp4",
camera="front_door",
start_time=march_11_noon,
end_time=march_11_noon + 3600,
duration=3600,
motion=200,
objects=10,
).execute()
# Test recordings summary with America/New_York timezone
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# Verify we get exactly 3 days
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
# Verify the correct dates are returned (API returns dict with True values)
assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
assert summary["2024-03-09"] is True
assert summary["2024-03-10"] is True
assert summary["2024-03-11"] is True
def test_recordings_summary_across_dst_fall_back(self):
"""
Test recordings summary across fall DST transition (fall back).
In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
"""
tz = pytz.timezone("America/New_York")
# November 2, 2024 at 12:00 PM EDT (before DST transition)
nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
# November 3, 2024 at 12:00 PM EST (after DST transition)
# Need to specify is_dst=False to get the time after fall back
nov_3_noon = tz.localize(
datetime(2024, 11, 3, 12, 0, 0), is_dst=False
).timestamp()
# November 4, 2024 at 12:00 PM EST (after DST)
nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Insert recordings for each day
Recordings.insert(
id="recording_nov_2",
path="/media/recordings/nov_2.mp4",
camera="front_door",
start_time=nov_2_noon,
end_time=nov_2_noon + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="recording_nov_3",
path="/media/recordings/nov_3.mp4",
camera="front_door",
start_time=nov_3_noon,
end_time=nov_3_noon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
Recordings.insert(
id="recording_nov_4",
path="/media/recordings/nov_4.mp4",
camera="front_door",
start_time=nov_4_noon,
end_time=nov_4_noon + 3600,
duration=3600,
motion=200,
objects=10,
).execute()
# Test recordings summary with America/New_York timezone
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# Verify we get exactly 3 days
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
# Verify the correct dates are returned (API returns dict with True values)
assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
assert summary["2024-11-02"] is True
assert summary["2024-11-03"] is True
assert summary["2024-11-04"] is True
def test_recordings_summary_multiple_cameras_across_dst(self):
"""
Test recordings summary with multiple cameras across DST boundary.
"""
tz = pytz.timezone("America/New_York")
# March 9, 2024 at 10:00 AM EST (before DST)
march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
# March 10, 2024 at 3:00 PM EDT (after DST transition)
march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Override allowed cameras for this test to include both
async def mock_get_allowed_cameras_for_filter(_request: Request):
return ["front_door", "back_door"]
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
mock_get_allowed_cameras_for_filter
)
# Insert recordings for front_door on March 9
Recordings.insert(
id="front_march_9",
path="/media/recordings/front_march_9.mp4",
camera="front_door",
start_time=march_9_morning,
end_time=march_9_morning + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
# Insert recordings for back_door on March 10
Recordings.insert(
id="back_march_10",
path="/media/recordings/back_march_10.mp4",
camera="back_door",
start_time=march_10_afternoon,
end_time=march_10_afternoon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
# Test with all cameras
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# Verify we get both days
assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
assert "2024-03-09" in summary
assert "2024-03-10" in summary
assert summary["2024-03-09"] is True
assert summary["2024-03-10"] is True
# Reset dependency override back to default single camera for other tests
async def reset_allowed_cameras(_request: Request):
return ["front_door"]
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
reset_allowed_cameras
)
def test_recordings_summary_at_dst_transition_time(self):
"""
Test recordings that span the exact DST transition time.
"""
tz = pytz.timezone("America/New_York")
# March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
# At 2:00 AM, clocks jump to 3:00 AM
before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
# Recording that spans the transition (1:00 AM to 3:30 AM EDT)
# This is 1.5 hours of actual time but spans the "missing" hour
after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
with AuthTestClient(self.app) as client:
Recordings.insert(
id="recording_during_transition",
path="/media/recordings/transition.mp4",
camera="front_door",
start_time=before_transition,
end_time=after_transition,
duration=after_transition - before_transition,
motion=100,
objects=5,
).execute()
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
# The recording should appear on March 10
assert len(summary) == 1
assert "2024-03-10" in summary
assert summary["2024-03-10"] is True
def test_recordings_summary_utc_timezone(self):
"""
Test recordings summary with UTC timezone (no DST).
"""
# Use UTC timestamps directly
march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
with AuthTestClient(self.app) as client:
Recordings.insert(
id="recording_march_9_utc",
path="/media/recordings/march_9_utc.mp4",
camera="front_door",
start_time=march_9_utc,
end_time=march_9_utc + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="recording_march_10_utc",
path="/media/recordings/march_10_utc.mp4",
camera="front_door",
start_time=march_10_utc,
end_time=march_10_utc + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
# Test with UTC timezone
response = client.get(
"/recordings/summary", params={"timezone": "utc", "cameras": "all"}
)
assert response.status_code == 200
summary = response.json()
# Verify we get both days
assert len(summary) == 2
assert "2024-03-09" in summary
assert "2024-03-10" in summary
assert summary["2024-03-09"] is True
assert summary["2024-03-10"] is True
def test_recordings_summary_no_recordings(self):
"""
Test recordings summary when no recordings exist.
"""
with AuthTestClient(self.app) as client:
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "all"},
)
assert response.status_code == 200
summary = response.json()
assert len(summary) == 0
def test_recordings_summary_single_camera_filter(self):
"""
Test recordings summary filtered to a single camera.
"""
tz = pytz.timezone("America/New_York")
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
with AuthTestClient(self.app) as client:
# Insert recordings for both cameras
Recordings.insert(
id="front_recording",
path="/media/recordings/front.mp4",
camera="front_door",
start_time=march_10_noon,
end_time=march_10_noon + 3600,
duration=3600,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="back_recording",
path="/media/recordings/back.mp4",
camera="back_door",
start_time=march_10_noon,
end_time=march_10_noon + 3600,
duration=3600,
motion=150,
objects=8,
).execute()
# Test with only front_door camera
response = client.get(
"/recordings/summary",
params={"timezone": "America/New_York", "cameras": "front_door"},
)
assert response.status_code == 200
summary = response.json()
assert len(summary) == 1
assert "2024-03-10" in summary
assert summary["2024-03-10"] is True

View File

@ -1,66 +1,78 @@
import sys
import unittest
from unittest.mock import MagicMock, patch
# Mock complex imports before importing maintainer
sys.modules["frigate.comms.inter_process"] = MagicMock()
sys.modules["frigate.comms.detections_updater"] = MagicMock()
sys.modules["frigate.comms.recordings_updater"] = MagicMock()
sys.modules["frigate.config.camera.updater"] = MagicMock()
# Now import the class under test
from frigate.config import FrigateConfig # noqa: E402
from frigate.record.maintainer import RecordingMaintainer # noqa: E402
class TestMaintainer(unittest.IsolatedAsyncioTestCase):
async def test_move_files_survives_bad_filename(self):
config = MagicMock(spec=FrigateConfig)
config.cameras = {}
stop_event = MagicMock()
maintainer = RecordingMaintainer(config, stop_event)
# We need to mock end_time_cache to avoid key errors if logic proceeds
maintainer.end_time_cache = {}
# Mock filesystem
# One bad file, one good file
files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
with patch("os.listdir", return_value=files):
with patch("os.path.isfile", return_value=True):
with patch(
"frigate.record.maintainer.psutil.process_iter", return_value=[]
):
with patch("frigate.record.maintainer.logger.warning") as warn:
# Mock validate_and_move_segment to avoid further logic
maintainer.validate_and_move_segment = MagicMock()
try:
await maintainer.move_files()
except ValueError as e:
if "not enough values to unpack" in str(e):
self.fail("move_files() crashed on bad filename!")
raise e
except Exception:
# Ignore other errors (like DB connection) as we only care about the unpack crash
pass
# The bad filename is encountered in multiple loops, but should only warn once.
matching = [
c
for c in warn.call_args_list
if c.args
and isinstance(c.args[0], str)
and "Skipping unexpected files in cache" in c.args[0]
]
self.assertEqual(
1,
len(matching),
f"Expected a single warning for unexpected files, got {len(matching)}",
)
if __name__ == "__main__":
unittest.main()
import sys
import unittest
from unittest.mock import MagicMock, patch
# Mock complex imports before importing maintainer
sys.modules["frigate.comms.inter_process"] = MagicMock()
sys.modules["frigate.comms.detections_updater"] = MagicMock()
sys.modules["frigate.comms.recordings_updater"] = MagicMock()
sys.modules["frigate.config.camera.updater"] = MagicMock()
# Now import the class under test
from frigate.config import FrigateConfig # noqa: E402
from frigate.record.maintainer import RecordingMaintainer # noqa: E402
class TestMaintainer(unittest.IsolatedAsyncioTestCase):
async def test_parse_cache_segment_supports_variant(self):
config = MagicMock(spec=FrigateConfig)
config.cameras = {}
stop_event = MagicMock()
maintainer = RecordingMaintainer(config, stop_event)
parsed = maintainer._parse_cache_segment("front@sub@20210101000000+0000.mp4")
self.assertIsNotNone(parsed)
self.assertEqual("front", parsed["camera"])
self.assertEqual("sub", parsed["variant"])
async def test_move_files_survives_bad_filename(self):
config = MagicMock(spec=FrigateConfig)
config.cameras = {}
stop_event = MagicMock()
maintainer = RecordingMaintainer(config, stop_event)
# We need to mock end_time_cache to avoid key errors if logic proceeds
maintainer.end_time_cache = {}
# Mock filesystem
# One bad file, one good file
files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
with patch("os.listdir", return_value=files):
with patch("os.path.isfile", return_value=True):
with patch(
"frigate.record.maintainer.psutil.process_iter", return_value=[]
):
with patch("frigate.record.maintainer.logger.warning") as warn:
# Mock validate_and_move_segment to avoid further logic
maintainer.validate_and_move_segment = MagicMock()
try:
await maintainer.move_files()
except ValueError as e:
if "not enough values to unpack" in str(e):
self.fail("move_files() crashed on bad filename!")
raise e
except Exception:
# Ignore other errors (like DB connection) as we only care about the unpack crash
pass
# The bad filename is encountered in multiple loops, but should only warn once.
matching = [
c
for c in warn.call_args_list
if c.args
and isinstance(c.args[0], str)
and "Skipping unexpected files in cache" in c.args[0]
]
self.assertEqual(
1,
len(matching),
f"Expected a single warning for unexpected files, got {len(matching)}",
)
if __name__ == "__main__":
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
"""Peewee migrations -- 036_add_recording_variants.py."""
import peewee as pw
from frigate.models import Recordings
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
existing_columns = {
row[1] for row in database.execute_sql('PRAGMA table_info("recordings")').fetchall()
}
fields_to_add = {}
if "variant" not in existing_columns:
fields_to_add["variant"] = pw.CharField(default="main", max_length=20)
if "codec_name" not in existing_columns:
fields_to_add["codec_name"] = pw.CharField(null=True, max_length=32)
if "width" not in existing_columns:
fields_to_add["width"] = pw.IntegerField(null=True)
if "height" not in existing_columns:
fields_to_add["height"] = pw.IntegerField(null=True)
if "bitrate" not in existing_columns:
fields_to_add["bitrate"] = pw.IntegerField(null=True)
if fields_to_add:
migrator.add_fields(Recordings, **fields_to_add)
migrator.sql(
'CREATE INDEX IF NOT EXISTS "recordings_camera_variant_start_time_end_time" ON "recordings" ("camera", "variant", "start_time" DESC, "end_time" DESC)'
)
def rollback(migrator, database, fake=False, **kwargs):
migrator.remove_fields(
Recordings, ["variant", "codec_name", "width", "height", "bitrate"]
)

83
scripts/README.md Normal file
View File

@ -0,0 +1,83 @@
# Scripts
## Transcode benchmarks
Proof-of-concept benchmarks for **real-time VOD transcoding**: transcode a video file with FFmpeg (optionally with hardware acceleration) and measure time and throughput. Used to de-risk the real-time VOD transcoding feature (segment-level transcode + cache): we need ~10s segments to transcode in well under 10s (ideally &lt;2s) so timeline scrubbing stays responsive.
### Python (recommended)
From the repo root:
```bash
# Full file, CPU
python scripts/transcode_benchmark.py path/to/recording.mp4
# First 10 seconds only (simulates one HLS segment)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10
# 10s segment with NVIDIA HW accel
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel nvidia
# Simulate scrubbing: start 60s in, transcode 10s (VAAPI)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --seek 60 --hwaccel vaapi
# Intel QSV H.265 (preset-intel-qsv-h265)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel qsv-h265
# Custom FFmpeg binary (e.g. Frigate container)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --ffmpeg /usr/lib/ffmpeg/7/bin/ffmpeg
```
Options:
- `--duration SEC` Transcode only this many seconds (default: full file). Use 10 to simulate one HLS segment.
- `--seek SEC` Start at this position (fast seek before `-i`). Simulates scrubbing into the file.
- `--hwaccel cpu|nvidia|vaapi|qsv-h265` Matches Frigate presets: libx264, h264_nvenc, h264_vaapi, preset-intel-qsv-h265 (hevc_qsv).
- `--vaapi-device` VAAPI device (default: `/dev/dri/renderD128`).
- `--qsv-device` Intel QSV device: on Linux defaults to `/dev/dri/renderD129` if present (else `renderD128`, else `0`). With two GPUs, the second node is often the Intel iGPU. Override if you get “No VA display found” (e.g. try the other node).
- `--output PATH` Write output here (default: temp file, deleted).
- `--keep-output` Keep the temp output file.
Output: real time, speed (× realtime), output size. The script suggests whether the speed is good for ~10s segment transcode.
### Shell
Quick one-liners without Python:
```bash
chmod +x scripts/transcode_benchmark.sh
./scripts/transcode_benchmark.sh path/to/recording.mp4
./scripts/transcode_benchmark.sh path/to/recording.mp4 10
./scripts/transcode_benchmark.sh path/to/recording.mp4 10 nvidia
```
Arguments: `INPUT [DURATION_SEC] [cpu|nvidia|vaapi|qsv-h265]`. Optional env: `FFMPEG`, `FFPROBE`, `VAAPI_DEVICE`, `QSV_DEVICE`.
### Interpreting results
- **Speed ≥ 5× realtime** A 10s segment transcodes in ~2s or less; good for on-demand segment transcode with cache.
- **Speed 15×** Marginal; segment may take several seconds; transcode-ahead or caching helps.
- **Speed &lt; 1×** Too slow for real-time; consider stronger HW or lower resolution/bitrate.
Run with a real Frigate recording (or any H.264/HEVC MP4) and try both `--duration 10` and full file to see segment vs full transcode cost.
### Troubleshooting `qsv-h265` (“No VA display found”)
Intel QSV (`qsv-h265`) only works on **Intel GPUs** with a working **Intel VA-API** stack. If both `/dev/dri/renderD128` and `renderD129` fail with “No VA display found” or “Device creation failed: -22”, then:
1. **Check which GPUs you have** With two cards, both may be non-Intel (e.g. NVIDIA + AMD). QSV is Intel-only. Use `lspci -k | grep -A3 VGA` to see adapters and drivers.
2. **Check VA-API** Run `vainfo` or `vainfo --display drm --device /dev/dri/renderD128` (then `renderD129`). If it errors or shows no Intel driver, QSV wont work. On Intel you typically need `intel-media-driver` (newer) or `intel-vaapi-driver` (i965, older).
3. **Permissions** Ensure your user is in the `render` (and often `video`) group: `groups`; add with `sudo usermod -aG render $USER` and log in again.
4. **Use another HW accel** If you have an **AMD** GPU, use `vaapi` (H.264). If you have **NVIDIA**, use `nvidia`. Otherwise use `cpu`.
5. **Frigate Docker uses QSV but host benchmark fails** The container has the Intel VA/QSV stack and device access; the host may not. Run the benchmark **inside the same environment** (e.g. inside the Frigate container):
```bash
# Copy script and a sample recording into the container (adjust container name)
docker cp scripts/transcode_benchmark.sh frigate:/tmp/
docker cp /path/to/59.24.mp4 frigate:/tmp/
docker exec -it frigate bash -c 'chmod +x /tmp/transcode_benchmark.sh && /tmp/transcode_benchmark.sh /tmp/59.24.mp4 10 qsv-h265'
```
The script auto-detects FFmpeg under `/usr/lib/ffmpeg/*/bin` when `ffmpeg` isnt on PATH (Frigate container). If it doesnt, set `FFMPEG` and `FFPROBE` explicitly, e.g. `docker exec ... env FFMPEG=/usr/lib/ffmpeg/7.0/bin/ffmpeg FFPROBE=/usr/lib/ffmpeg/7.0/bin/ffprobe /tmp/transcode_benchmark.sh ...`.

View File

@ -0,0 +1,289 @@
#!/usr/bin/env python3
"""
Proof-of-concept benchmark: transcode a video file with FFmpeg (optionally with
hardware acceleration) and report timing and throughput.
Used to de-risk real-time VOD transcoding: we need ~10s segments to transcode
in well under 10s (ideally <2s) so scrubbing stays responsive.
Usage:
python scripts/transcode_benchmark.py path/to/video.mp4
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --hwaccel nvidia
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --seek 60 --hwaccel vaapi
Output: real time, speed (x realtime), output size. Aligns with Frigate export/timelapse
HW presets (preset-nvidia, preset-vaapi, libx264 default).
"""
import argparse
import os
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from typing import Optional
def get_ffmpeg_command(
ffmpeg_path: str,
input_path: str,
output_path: str,
*,
duration_sec: Optional[float] = None,
seek_sec: float = 0,
hwaccel: str = "cpu",
gpu_device: str = "/dev/dri/renderD128",
qsv_device: str = "0",
) -> list[str]:
"""Build argv for FFmpeg transcode (H.264 or HEVC, no audio). Matches Frigate timelapse-style encode."""
cmd = [ffmpeg_path, "-hide_banner", "-y", "-loglevel", "warning", "-stats"]
# Optional seek: -ss before -i for fast seek (keyframe then decode)
if seek_sec > 0:
cmd.extend(["-ss", str(seek_sec)])
if hwaccel == "nvidia":
cmd.extend(
[
"-hwaccel",
"cuda",
"-hwaccel_output_format",
"cuda",
"-extra_hw_frames",
"8",
]
)
elif hwaccel == "vaapi":
cmd.extend(
[
"-hwaccel",
"vaapi",
"-hwaccel_device",
gpu_device,
"-hwaccel_output_format",
"vaapi",
]
)
elif hwaccel == "qsv-h265":
# preset-intel-qsv-h265: load_plugin for HEVC decode, QSV device for decode+encode
cmd.extend(
[
"-load_plugin",
"hevc_hw",
"-hwaccel",
"qsv",
"-qsv_device",
qsv_device,
"-hwaccel_output_format",
"qsv",
]
)
cmd.extend(["-i", input_path])
if duration_sec is not None and duration_sec > 0:
cmd.extend(["-t", str(duration_sec)])
cmd.extend(["-an"])
if hwaccel == "nvidia":
cmd.extend(["-c:v", "h264_nvenc"])
elif hwaccel == "vaapi":
# VAAPI encode needs frames in vaapi format; decoder outputs vaapi when hwaccel_output_format vaapi
cmd.extend(["-c:v", "h264_vaapi"])
elif hwaccel == "qsv-h265":
# Use CQP explicitly; profile/level can be unsupported on some QSV runtimes
cmd.extend(["-c:v", "hevc_qsv", "-global_quality", "23"])
else:
cmd.extend(
["-c:v", "libx264", "-preset:v", "ultrafast", "-tune:v", "zerolatency"]
)
cmd.extend(["-f", "mp4", "-movflags", "+faststart", output_path])
return cmd
def get_video_duration_sec(ffprobe_path: str, input_path: str) -> Optional[float]:
"""Return duration in seconds or None on failure."""
try:
out = subprocess.run(
[
ffprobe_path,
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
input_path,
],
capture_output=True,
text=True,
timeout=10,
)
if out.returncode == 0 and out.stdout.strip():
return float(out.stdout.strip())
except (subprocess.TimeoutExpired, ValueError, FileNotFoundError):
pass
return None
def main() -> int:
parser = argparse.ArgumentParser(
description="Benchmark FFmpeg transcode (H.264) with optional HW accel."
)
parser.add_argument(
"input",
type=Path,
help="Input video file (e.g. recording segment)",
)
parser.add_argument(
"--duration",
type=float,
default=None,
metavar="SEC",
help="Transcode only this many seconds (default: full file). Simulates segment length.",
)
parser.add_argument(
"--seek",
type=float,
default=0,
metavar="SEC",
help="Start at this position (before -i for fast seek). Simulates scrubbing into file.",
)
parser.add_argument(
"--hwaccel",
choices=("cpu", "nvidia", "vaapi", "qsv-h265"),
default="cpu",
help="HW accel: cpu (libx264), nvidia (h264_nvenc), vaapi (h264_vaapi), qsv-h265 (preset-intel-qsv-h265, hevc_qsv).",
)
parser.add_argument(
"--vaapi-device",
default="/dev/dri/renderD128",
help="VAAPI device (default: /dev/dri/renderD128).",
)
parser.add_argument(
"--qsv-device",
default=(
"/dev/dri/renderD129"
if os.path.exists("/dev/dri/renderD129")
else "/dev/dri/renderD128"
if os.path.exists("/dev/dri/renderD128")
else "0"
),
help="Intel QSV device: path (e.g. /dev/dri/renderD129 or renderD128 on Linux) or 0 (Windows). With two GPUs, try renderD129 if renderD128 fails. Used for --hwaccel qsv-h265.",
)
parser.add_argument(
"--ffmpeg",
default="ffmpeg",
metavar="PATH",
help="FFmpeg binary (default: ffmpeg in PATH).",
)
parser.add_argument(
"--ffprobe",
default="ffprobe",
metavar="PATH",
help="FFprobe binary (default: ffprobe in PATH).",
)
parser.add_argument(
"--output",
type=Path,
default=None,
help="Output file (default: temp file, deleted after).",
)
parser.add_argument(
"--keep-output",
action="store_true",
help="Keep output file when using default temp path.",
)
args = parser.parse_args()
input_path = args.input.resolve()
if not input_path.is_file():
print(f"Error: input file not found: {input_path}", file=sys.stderr)
return 1
effective_duration = args.duration
if effective_duration is None:
duration_from_probe = get_video_duration_sec(str(args.ffprobe), str(input_path))
if duration_from_probe is not None:
effective_duration = duration_from_probe - args.seek
if effective_duration <= 0:
print("Error: seek >= file duration", file=sys.stderr)
return 1
else:
print("Warning: could not probe duration; reporting real time only.", file=sys.stderr)
use_temp = args.output is None
if use_temp:
fd, out_path = tempfile.mkstemp(suffix=".mp4")
os.close(fd)
output_path = Path(out_path)
else:
output_path = args.output.resolve()
cmd = get_ffmpeg_command(
args.ffmpeg,
str(input_path),
str(output_path),
duration_sec=args.duration,
seek_sec=args.seek,
hwaccel=args.hwaccel,
gpu_device=args.vaapi_device,
qsv_device=args.qsv_device,
)
print(f"Input: {input_path}")
print(f"Output: {output_path}")
print(f"HW: {args.hwaccel}")
if args.duration is not None:
print(f"Limit: {args.duration}s")
if args.seek > 0:
print(f"Seek: {args.seek}s")
print(f"Run: {' '.join(cmd)}")
print()
start = time.perf_counter()
try:
subprocess.run(cmd, check=True, timeout=3600)
except subprocess.CalledProcessError as e:
print(f"FFmpeg failed: {e}", file=sys.stderr)
if use_temp and output_path.exists():
output_path.unlink()
return 1
except subprocess.TimeoutExpired:
print("FFmpeg timed out.", file=sys.stderr)
if use_temp and output_path.exists():
output_path.unlink()
return 1
elapsed = time.perf_counter() - start
size_bytes = output_path.stat().st_size if output_path.exists() else 0
print("--- Results ---")
print(f"Real time: {elapsed:.2f}s")
if effective_duration is not None and effective_duration > 0:
speed = effective_duration / elapsed
print(f"Video duration: {effective_duration:.2f}s")
print(f"Speed: {speed:.2f}x realtime")
if args.duration and args.duration <= 15:
if speed >= 5:
print("(Good for ~10s segment transcode: well under 2s.)")
elif speed >= 1:
print("(Marginal: segment may take several seconds.)")
else:
print("(Slow: segment transcode would exceed segment length.)")
print(f"Output size: {size_bytes / (1024*1024):.2f} MiB")
if use_temp:
if args.keep_output:
print(f"(Output kept: {output_path})")
else:
output_path.unlink(missing_ok=True)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,101 @@
#!/usr/bin/env bash
# Proof-of-concept: run FFmpeg transcode and report real time.
# Usage:
# ./scripts/transcode_benchmark.sh path/to/video.mp4
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 # first 10 seconds only
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 nvidia
#
# Optional: DURATION (seconds), HWACCEL (cpu|nvidia|vaapi|qsv-h265). Default: full file, cpu.
# Requires: ffmpeg, ffprobe. Output: temp file, then deleted. Reports real time and speed.
set -e
INPUT="${1:?Usage: $0 <input.mp4> [duration_sec] [cpu|nvidia|vaapi|qsv-h265]}"
DURATION="${2:-}"
HWACCEL="${3:-cpu}"
# On Linux, QSV needs a DRM render node. With two GPUs, renderD128 is often non-Intel and renderD129 the Intel iGPU; prefer 129 when both exist so QSV finds VA.
if [[ -z "${QSV_DEVICE:-}" ]]; then
if [[ -e /dev/dri/renderD129 ]]; then
QSV_DEVICE="/dev/dri/renderD129"
elif [[ -e /dev/dri/renderD128 ]]; then
QSV_DEVICE="/dev/dri/renderD128"
else
QSV_DEVICE="0"
fi
fi
# Frigate container has ffmpeg under /usr/lib/ffmpeg/<ver>/bin, not on PATH
if [[ -z "${FFMPEG:-}" ]]; then
if command -v ffmpeg &>/dev/null; then
FFMPEG="ffmpeg"
elif [[ -d /usr/lib/ffmpeg ]] && FFMPEG_CANDIDATE=$(find /usr/lib/ffmpeg -path '*/bin/ffmpeg' -type f 2>/dev/null | head -1); [[ -n "${FFMPEG_CANDIDATE:-}" ]]; then
FFMPEG="$FFMPEG_CANDIDATE"
else
FFMPEG="ffmpeg"
fi
fi
FFPROBE="${FFPROBE:-$(dirname "$FFMPEG")/ffprobe}"
if [[ ! -x "$FFPROBE" ]]; then
FFPROBE="ffprobe"
fi
OUTPUT=$(mktemp -u).mp4
cleanup() { rm -f "$OUTPUT"; }
trap cleanup EXIT
# Build base decode/input args
INPUT_ARGS=(-hide_banner -y -loglevel warning -stats -i "$INPUT")
if [[ -n "$DURATION" && "$DURATION" =~ ^[0-9]+\.?[0-9]*$ ]]; then
INPUT_ARGS+=(-t "$DURATION")
fi
case "$HWACCEL" in
nvidia)
PRE=( -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 )
ENC=(-c:v h264_nvenc)
;;
vaapi)
PRE=( -hwaccel vaapi -hwaccel_device "${VAAPI_DEVICE:-/dev/dri/renderD128}" -hwaccel_output_format vaapi )
ENC=(-c:v h264_vaapi)
;;
qsv-h265)
PRE=( -load_plugin hevc_hw -hwaccel qsv -qsv_device "$QSV_DEVICE" -hwaccel_output_format qsv )
# Use CQP explicitly; -profile:v/-level can be unsupported on some QSV runtimes
ENC=(-c:v hevc_qsv -global_quality 23)
;;
*)
PRE=()
ENC=(-c:v libx264 -preset:v ultrafast -tune:v zerolatency)
;;
esac
echo "Input: $INPUT"
echo "Output: $OUTPUT (temp)"
echo "HW: $HWACCEL"
[[ -n "$DURATION" ]] && echo "Limit: ${DURATION}s"
# QSV is Intel-only and needs a working Intel VA-API stack; if you see 'No VA display found', see scripts/README.md troubleshooting.
[[ "$HWACCEL" = "qsv-h265" ]] && echo "QSV device: $QSV_DEVICE"
echo ""
# Get duration for speed calculation (if not limiting, use full file length)
if [[ -n "$DURATION" ]]; then
DUR_SEC="$DURATION"
else
DUR_SEC=$("${FFPROBE:-ffprobe}" -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "$INPUT" 2>/dev/null || true)
fi
# Use $SECONDS (bash) so we don't rely on date %N or bc in minimal containers
START=$SECONDS
"$FFMPEG" "${PRE[@]}" "${INPUT_ARGS[@]}" -an "${ENC[@]}" -f mp4 -movflags +faststart "$OUTPUT"
ELAPSED=$((SECONDS - START))
[[ "$ELAPSED" -eq 0 ]] && ELAPSED=1
SIZE=$(stat -c%s "$OUTPUT" 2>/dev/null || stat -f%z "$OUTPUT" 2>/dev/null || echo 0)
SIZE_MB=$(awk "BEGIN {printf \"%.2f\", $SIZE/1048576}" 2>/dev/null || echo "$((SIZE / 1048576))")
echo "--- Results ---"
echo "Real time: ${ELAPSED}s"
if [[ -n "$DUR_SEC" && "$DUR_SEC" =~ ^[0-9]+\.?[0-9]*$ ]]; then
SPEED=$(awk "BEGIN {printf \"%.2f\", $DUR_SEC/$ELAPSED}" 2>/dev/null || echo "?")
echo "Duration: ${DUR_SEC}s"
echo "Speed: ${SPEED}x realtime"
fi
echo "Output size: ${SIZE_MB} MiB"

View File

@ -0,0 +1,69 @@
# Dev workflow: frigate-dev (single image with transcode proxy)
Use **frigate-dev** so your working Docker setup keeps using the stable image. You switch between stable and dev by changing the image in compose and restarting. The transcode proxy runs **inside** the Frigate container; there is no separate proxy image.
## Image names
- **frigate-dev** Frigate image built from this repo (includes transcode proxy, config + UI for transcode_proxy).
- Your normal setup keeps using **ghcr.io/blakeblackshear/frigate:stable-tensorrt** (or whatever you use today).
## Start / stop (switch between stable and dev)
You cant run both stacks at once (same ports). Use one compose file and swap the image.
**Stop everything:**
```bash
cd ~/docker-compose # or wherever your compose file is
docker compose down
```
**Run dev stack (Frigate with in-container transcode proxy):**
- In `docker-compose.yml`, set the frigate service to `image: frigate-dev` and publish port 5010 if you use transcode_proxy.
```bash
docker compose up -d
```
**Switch back to stable:**
- Stop: `docker compose down`
- In `docker-compose.yml`, set frigate back to `image: ghcr.io/blakeblackshear/frigate:stable-tensorrt`.
```bash
docker compose up -d
```
**Useful commands:**
- `docker compose down` stop and remove containers.
- `docker compose up -d` start in the background.
- `docker compose ps` see whats running.
- `docker compose logs -f frigate` follow Frigate logs.
## Building (Ubuntu server recommended)
Frigates image **is not** “just Python” it has a **compile phase** (nginx, sqlite-vec, etc.). Building is done with Docker and can take a while.
**Where to build:** On the **Ubuntu server** where you run Frigate. That way you get the right architecture and avoid Windows/Linux cross-build issues. Sync the repo from your Windows machine via git (clone or push from Windows to a repo and pull on the server, or copy the repo onto the server).
**On the Ubuntu server:**
1. Clone (or pull) the Frigate repo with this code.
2. **Build Frigate (TensorRT variant, same as stable-tensorrt):**
```bash
cd /path/to/frigate
make version
make local-trt
docker tag frigate:latest-tensorrt frigate-dev
```
(`make local-trt` uses buildx; first time may be slow.) The resulting image includes the transcode proxy; no separate proxy image is built.
**If you prefer to build on Windows:** You can use Docker buildx to build for `linux/amd64` and push to a registry, then pull `frigate-dev` on the Ubuntu server. The Frigate build is heavy and may be slower or more fragile on Windows; building on the server is simpler.
## One-time setup on the server
```bash
# Clone or copy the repo, then:
cd /path/to/frigate
make version
make local-trt
docker tag frigate:latest-tensorrt frigate-dev
```
Then in your compose use `image: frigate-dev`, publish port 5010 if you use the transcode proxy, and set `transcode_proxy` in Frigate config as in the main README.

55
transcode_proxy/README.md Normal file
View File

@ -0,0 +1,55 @@
# Frigate VOD Transcode Proxy
Optional proxy that runs **inside the Frigate container** and rewrites VOD HLS playback to an H.264 transport-stream rendition on the fly. Use it when recordings are HEVC (or high bitrate) and you want compatible or lower-bitrate playback.
## How it works
- **Manifest requests** (e.g. `.../master.m3u8` and `.../index-v1.m3u8`): Fetched from upstream and rewritten so the browser sees a proxy-owned H.264 HLS rendition.
- **Segment requests**: The rewritten media playlist points to proxy-owned `.transcoded.ts` segment URLs. Those requests fetch the upstream source segment, transcode it to H.264 MPEG-TS with FFmpeg, cache it in memory (LRU, configurable size), then serve it.
- **Init fragments**: The rewritten media playlist removes upstream `#EXT-X-MAP` usage, so the browser no longer depends on upstream fragmented MP4 init files for transcoded playback.
The proxy is an s6-managed service in the same Docker image as Frigate. It binds to port **5010** inside the container and starts after nginx is ready.
## Configuration
Environment variables (optional; defaults work when running in the same container):
| Variable | Default | Description |
|----------|---------|-------------|
| `TRANSCODE_PROXY_UPSTREAM` | `http://127.0.0.1:5000` | Upstream Frigate VOD base URL (nginx internal port when in-container). |
| `TRANSCODE_PROXY_PATH_PREFIX` | (empty) | If the proxy is mounted at a path (e.g. `/vod-transcoded`), set this so the proxy strips it when forwarding. |
| `TRANSCODE_PROXY_HOST` | `0.0.0.0` | Bind host. |
| `TRANSCODE_PROXY_PORT` | `5010` | Bind port. |
| `TRANSCODE_PROXY_CACHE_MB` | `500` | Max in-memory cache size (MB). |
| `TRANSCODE_PROXY_FFMPEG` | (system) | FFmpeg binary path; uses Frigates FFmpeg when not set. |
| `TRANSCODE_PROXY_H264_BITRATE` | `128k` | H.264 bitrate for transcoded segments. |
| `TRANSCODE_PROXY_MAX_WIDTH` | `640` | Max output width for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
| `TRANSCODE_PROXY_MAX_HEIGHT` | `480` | Max output height for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
## Enabling in Frigate
1. Build Frigate from this repo (e.g. `frigate-dev`) so the image includes the proxy and config/UI support.
2. Expose the proxy either internally through Frigate nginx (recommended, e.g. `/vod-transcoded`) or by publishing port **5010** for direct access.
3. In Frigate config (YAML), add:
```yaml
transcode_proxy:
enabled: true
vod_proxy_url: "http://YOUR_FRIGATE_HOST:5010" # same host as Frigate, port 5010
```
4. Restart Frigate. The UI will use the proxy for recording playback when enabled.
If Frigate is behind a reverse proxy and you expose the transcode service at a path (e.g. `https://frigate.example.com/vod-transcoded`), set `TRANSCODE_PROXY_PATH_PREFIX=/vod-transcoded` in the container environment and use that full URL as `vod_proxy_url`.
## Running (single container)
The proxy runs automatically inside the Frigate container. No separate container or image is needed. For same-origin playback, keep the service internal and route it through Frigate nginx on the normal UI origin.
See **transcode_proxy/DEV_WORKFLOW.md** for building the dev image (e.g. `frigate-dev`) and switching between stable and dev.
## Endpoints
- `GET /vod/.../master.m3u8` Rewritten HLS master playlist for the transcoded rendition.
- `GET /vod/.../index*.m3u8` Rewritten HLS media playlist that points at proxy-owned transcoded transport-stream segments.
- `GET /vod/.../*.transcoded.ts` Transcoded H.264 MPEG-TS segments.
- `GET /cache` Cache stats (size, entry count).
- `GET /health` Health check.

View File

@ -0,0 +1 @@
"""Transcode proxy: sits in front of Frigate VOD and transcodes segments on the fly to H.264."""

View File

@ -0,0 +1,5 @@
"""Run the transcode proxy: python -m transcode_proxy."""
from transcode_proxy.main import run
if __name__ == "__main__":
run()

47
transcode_proxy/cache.py Normal file
View File

@ -0,0 +1,47 @@
"""In-memory LRU cache for transcoded segments (byte-size limited)."""
import logging
import threading
from collections import OrderedDict
from typing import Optional
logger = logging.getLogger(__name__)
class ByteLRUCache:
"""LRU cache that evicts by total byte size."""
def __init__(self, max_bytes: int):
self._max_bytes = max_bytes
self._current_bytes = 0
self._order: OrderedDict[str, bytes] = OrderedDict()
self._lock = threading.Lock()
def get(self, key: str) -> Optional[bytes]:
with self._lock:
data = self._order.pop(key, None)
if data is not None:
self._order[key] = data # move to end (most recent)
return data
return None
def set(self, key: str, value: bytes) -> None:
size = len(value)
if size > self._max_bytes:
logger.warning("Segment larger than cache max (%s bytes), not caching", size)
return
with self._lock:
while self._current_bytes + size > self._max_bytes and self._order:
evicted_key = next(iter(self._order))
evicted = self._order.pop(evicted_key)
self._current_bytes -= len(evicted)
logger.debug("Evicted %s from transcode cache", evicted_key)
self._order[key] = value
self._current_bytes += size
def size_bytes(self) -> int:
with self._lock:
return self._current_bytes
def count(self) -> int:
with self._lock:
return len(self._order)

44
transcode_proxy/config.py Normal file
View File

@ -0,0 +1,44 @@
"""Configuration from environment."""
import os
from dataclasses import dataclass, field
@dataclass
class Config:
"""Proxy configuration."""
# Upstream Frigate VOD base URL (e.g. http://nginx:80 or http://127.0.0.1:5001)
upstream_base: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_UPSTREAM", "http://127.0.0.1:80")
)
# Optional path prefix the proxy is mounted at (e.g. /vod-transcoded); strip when forwarding
path_prefix: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_PATH_PREFIX", "").rstrip("/")
)
# Host/port to bind
host: str = field(default_factory=lambda: os.environ.get("TRANSCODE_PROXY_HOST", "0.0.0.0"))
port: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_PORT", "5010"))
)
# In-memory cache max size in bytes
cache_max_bytes: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_CACHE_MB", "500")) * 1024 * 1024
)
# FFmpeg binary
ffmpeg_path: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_FFMPEG", "ffmpeg")
)
# H.264 bitrate for transcoded segments
h264_bitrate: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_H264_BITRATE", "128k")
)
# Max output size for transcoded playback; preserves aspect ratio and will not upscale
max_width: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_WIDTH", "640"))
)
max_height: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_HEIGHT", "480"))
)
config = Config()

View File

@ -0,0 +1,24 @@
# Example: Frigate with in-container transcode proxy (single image).
#
# 1. Build Frigate from this repo (on Ubuntu recommended):
# make version && make local-trt && docker tag frigate:latest-tensorrt frigate-dev
#
# 2. Use image: frigate-dev and publish port 5010 for the transcode proxy.
# 3. In Frigate config (config.yml), set:
# transcode_proxy:
# enabled: true
# vod_proxy_url: "http://YOUR_HOST:5010"
services:
frigate:
container_name: frigate
restart: unless-stopped
image: frigate-dev
# ... your existing frigate config (gpus, shm_size, devices, volumes) ...
ports:
- "5000:5000" # or 8971:8971 depending on your setup
- "5010:5010" # transcode proxy (only needed if transcode_proxy.enabled is true)
# Optional: override proxy defaults
# environment:
# TRANSCODE_PROXY_PORT: "5010"
# TRANSCODE_PROXY_CACHE_MB: "500"

419
transcode_proxy/main.py Normal file
View File

@ -0,0 +1,419 @@
"""FastAPI app: proxy VOD requests, transcode segments on the fly."""
import logging
import re
from collections.abc import AsyncIterator
from typing import Optional
import httpx
from fastapi import FastAPI, Request, Response
from fastapi.responses import StreamingResponse
from transcode_proxy.cache import ByteLRUCache
from transcode_proxy.config import config
from transcode_proxy.transcode import (
TranscodeError,
stream_transcode_segment_to_h264_ts,
)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(title="Frigate VOD Transcode Proxy", version="0.1.0")
cache = ByteLRUCache(config.cache_max_bytes)
# Segment extensions that the upstream VOD may expose.
SEGMENT_EXTENSIONS = (".m4s", ".mp4", ".ts")
FORWARD_HEADERS = ("cookie", "authorization", "referer")
TRANSCODED_SEGMENT_SUFFIX = ".transcoded.ts"
H264_CODEC = "avc1.64001f"
LOCAL_QUERY_KEYS = {"bitrate", "max_width", "max_height"}
def _upstream_path(path: str) -> Optional[str]:
"""Strip path_prefix and only allow VOD paths through to upstream."""
p = path.lstrip("/")
if config.path_prefix:
prefix = config.path_prefix.strip("/")
if p.startswith(prefix + "/"):
p = p[len(prefix) + 1 :]
if p == "vod" or p.startswith("vod/"):
return "/" + p
if p.startswith("vod-transcoded/"):
return "/" + p[len("vod-transcoded/") :]
if p == "vod-transcoded":
return "/vod"
return None
def _is_segment(path: str) -> bool:
return path.rstrip("/").endswith(TRANSCODED_SEGMENT_SUFFIX) or any(
path.rstrip("/").endswith(ext) for ext in SEGMENT_EXTENSIONS
)
def _is_init_path(path: str) -> bool:
return bool(re.search(r"/init.*\.mp4$", path))
def _is_master_playlist(path: str) -> bool:
return path.endswith("/master.m3u8") or path.endswith("master.m3u8")
def _init_upstream_path(segment_path: str) -> Optional[str]:
"""Infer the matching init fragment for an fMP4 media fragment path."""
match = re.search(r"/seg-\d+(?P<suffix>.*)\.m4s$", segment_path)
if not match:
return None
suffix = match.group("suffix")
return re.sub(r"/seg-\d+.*\.m4s$", f"/init{suffix}.mp4", segment_path)
async def _fetch_upstream_bytes(
client: httpx.AsyncClient, url: str, headers: dict[str, str]
) -> Optional[bytes]:
try:
upstream_resp = await client.get(url, headers=headers)
upstream_resp.raise_for_status()
return upstream_resp.content
except Exception as e:
logger.warning("Upstream fetch failed %s: %s", url, e)
return None
async def _fetch_source_init_bytes(
client: httpx.AsyncClient,
init_path: str,
query: str,
headers: dict[str, str],
) -> Optional[bytes]:
init_url = f"{config.upstream_base.rstrip('/')}{init_path}"
if query:
init_url += f"?{query}"
cache_key = f"source-init:{init_url}"
cached = cache.get(cache_key)
if cached is not None:
return cached
init_bytes = await _fetch_upstream_bytes(client, init_url, headers)
if init_bytes is not None:
cache.set(cache_key, init_bytes)
return init_bytes
async def _stream_source_segment_bytes(
source_url: str,
headers: dict[str, str],
init_bytes: Optional[bytes] = None,
) -> AsyncIterator[bytes]:
if init_bytes is not None:
yield init_bytes
async with httpx.AsyncClient(timeout=60.0) as client:
async with client.stream("GET", source_url, headers=headers) as upstream_resp:
upstream_resp.raise_for_status()
async for chunk in upstream_resp.aiter_bytes():
if chunk:
yield chunk
def _proxy_segment_uri(entry: str) -> str:
return f"{entry}{TRANSCODED_SEGMENT_SUFFIX}"
def _source_segment_path(path: str) -> str:
if path.endswith(TRANSCODED_SEGMENT_SUFFIX):
return path[: -len(TRANSCODED_SEGMENT_SUFFIX)]
return path
def _resolution_for_transcode(
width: int, height: int, max_width: int, max_height: int
) -> tuple[int, int]:
if width <= 0 or height <= 0:
return (max_width, max_height)
max_width = max(max_width, 2)
max_height = max(max_height, 2)
scale = min(max_width / width, max_height / height, 1.0)
out_width = max(2, int(width * scale))
out_height = max(2, int(height * scale))
if out_width % 2:
out_width -= 1
if out_height % 2:
out_height -= 1
return (max(out_width, 2), max(out_height, 2))
def _bandwidth_bits(bitrate: str) -> int:
match = re.fullmatch(r"(?P<value>\d+(?:\.\d+)?)(?P<suffix>[kKmMgG]?)", bitrate.strip())
if not match:
return 2_000_000
value = float(match.group("value"))
suffix = match.group("suffix").upper()
multiplier = {
"": 1,
"K": 1_000,
"M": 1_000_000,
"G": 1_000_000_000,
}[suffix]
return int(value * multiplier)
def _transcode_request_profile(request: Request) -> tuple[str, int, int, str]:
bitrate = request.query_params.get("bitrate", config.h264_bitrate)
max_width = int(request.query_params.get("max_width", config.max_width))
max_height = int(request.query_params.get("max_height", config.max_height))
upstream_query = "&".join(
f"{key}={value}"
for key, value in request.query_params.multi_items()
if key not in LOCAL_QUERY_KEYS
)
return bitrate, max_width, max_height, upstream_query
def _rewrite_master_playlist(
upstream_bytes: bytes, bitrate: str, max_width: int, max_height: int
) -> bytes:
playlist = upstream_bytes.decode("utf-8", errors="replace")
lines = [line.strip() for line in playlist.splitlines() if line.strip()]
child_uri: Optional[str] = None
stream_inf_line: Optional[str] = None
for idx, line in enumerate(lines):
if line.startswith("#EXT-X-STREAM-INF:"):
stream_inf_line = line
for child_line in lines[idx + 1 :]:
if child_line and not child_line.startswith("#"):
child_uri = child_line
break
break
if child_uri is None or stream_inf_line is None:
logger.warning("Unable to parse master playlist, returning upstream manifest")
return upstream_bytes
attrs = [
f'BANDWIDTH={max(_bandwidth_bits(bitrate), 1)}',
f'CODECS="{H264_CODEC}"',
]
resolution_match = re.search(r"RESOLUTION=(\d+)x(\d+)", stream_inf_line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
out_width, out_height = _resolution_for_transcode(
width, height, max_width, max_height
)
attrs.insert(1, f"RESOLUTION={out_width}x{out_height}")
rewritten = [
"#EXTM3U",
"#EXT-X-STREAM-INF:" + ",".join(attrs),
child_uri,
"",
]
return "\n".join(rewritten).encode()
def _rewrite_media_playlist(upstream_bytes: bytes) -> bytes:
playlist = upstream_bytes.decode("utf-8", errors="replace")
output_lines: list[str] = []
segment_index = 0
for line in playlist.splitlines():
stripped = line.strip()
if stripped.startswith("#EXT-X-MAP:"):
continue
if stripped.startswith("#EXTINF:") and segment_index > 0:
output_lines.append("#EXT-X-DISCONTINUITY")
if stripped and not stripped.startswith("#"):
output_lines.append(_proxy_segment_uri(stripped))
segment_index += 1
continue
output_lines.append(line)
if output_lines and output_lines[-1] != "":
output_lines.append("")
return "\n".join(output_lines).encode()
async def _proxy_upstream_response(
client: httpx.AsyncClient, url: str, headers: dict[str, str]
) -> Optional[httpx.Response]:
try:
upstream_resp = await client.get(url, headers=headers)
upstream_resp.raise_for_status()
return upstream_resp
except Exception as e:
logger.warning("Upstream fetch failed %s: %s", url, e)
return None
async def _transcoded_segment_response(
source_url: str,
cache_key: str,
headers: dict[str, str],
init_bytes: Optional[bytes] = None,
bitrate: Optional[str] = None,
max_width: Optional[int] = None,
max_height: Optional[int] = None,
) -> Response:
stream = await stream_transcode_segment_to_h264_ts(
_stream_source_segment_bytes(source_url, headers, init_bytes),
config.ffmpeg_path,
bitrate or config.h264_bitrate,
max_width or config.max_width,
max_height or config.max_height,
)
try:
first_chunk = await stream.first_chunk()
except TranscodeError as e:
await stream.aclose()
logger.warning("Transcode stream failed %s: %s", source_url, e)
return Response(status_code=502, content=b"Transcode failed")
async def body() -> AsyncIterator[bytes]:
try:
async for chunk in stream.iter_chunks(first_chunk):
yield chunk
except TranscodeError as e:
logger.warning("Transcode stream failed %s: %s", source_url, e)
raise
else:
cache.set(cache_key, stream.output_bytes)
return StreamingResponse(
body(),
media_type="video/mp2t",
headers={"Cache-Control": "private, max-age=300"},
)
@app.get("/cache")
async def cache_info() -> dict:
"""Return cache size and entry count (for debugging)."""
return {
"size_bytes": cache.size_bytes(),
"size_mb": round(cache.size_bytes() / (1024 * 1024), 2),
"entries": cache.count(),
}
@app.get("/health")
async def health() -> dict:
return {"status": "ok"}
@app.get("/{full_path:path}")
async def vod_proxy(request: Request, full_path: str) -> Response:
"""Handle /vod/... or /vod-transcoded/... (when path_prefix is set)."""
path = "/" + full_path.lstrip("/")
upstream_path = _upstream_path(path)
if upstream_path is None or not (
upstream_path == "/vod" or upstream_path.startswith("/vod/")
):
return Response(status_code=404, content=b"Not found")
bitrate, max_width, max_height, upstream_query = _transcode_request_profile(request)
upstream_url = f"{config.upstream_base.rstrip('/')}{upstream_path}"
if upstream_query:
upstream_url += f"?{upstream_query}"
headers = {
k: v for k, v in request.headers.items() if k.lower() in FORWARD_HEADERS
}
if upstream_path.endswith(TRANSCODED_SEGMENT_SUFFIX):
cache_key = f"{upstream_url}|{bitrate}|{max_width}x{max_height}"
cached = cache.get(cache_key)
if cached is not None:
return Response(
content=cached,
media_type="video/mp2t",
headers={"Cache-Control": "private, max-age=300"},
)
source_path = _source_segment_path(upstream_path)
source_url = f"{config.upstream_base.rstrip('/')}{source_path}"
if upstream_query:
source_url += f"?{upstream_query}"
init_bytes: Optional[bytes] = None
if source_path.endswith(".m4s"):
init_path = _init_upstream_path(source_path)
if init_path is None:
return Response(status_code=502, content=b"Init segment inference failed")
async with httpx.AsyncClient(timeout=30.0) as client:
init_bytes = await _fetch_source_init_bytes(
client, init_path, upstream_query, headers
)
if init_bytes is None:
return Response(status_code=502, content=b"Init segment fetch failed")
return await _transcoded_segment_response(
source_url=source_url,
cache_key=cache_key,
headers=headers,
init_bytes=init_bytes,
bitrate=bitrate,
max_width=max_width,
max_height=max_height,
)
async with httpx.AsyncClient(timeout=30.0) as client:
if _is_master_playlist(upstream_path):
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
if upstream_resp is None:
return Response(status_code=502, content=b"Upstream fetch failed")
return Response(
content=_rewrite_master_playlist(
upstream_resp.content, bitrate, max_width, max_height
),
media_type="application/vnd.apple.mpegurl",
headers={"Cache-Control": "no-store"},
)
if upstream_path.endswith(".m3u8"):
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
if upstream_resp is None:
return Response(status_code=502, content=b"Upstream fetch failed")
return Response(
content=_rewrite_media_playlist(upstream_resp.content),
media_type="application/vnd.apple.mpegurl",
headers={"Cache-Control": "no-store"},
)
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
if upstream_resp is None:
return Response(status_code=502, content=b"Upstream fetch failed")
return Response(
content=upstream_resp.content,
media_type=upstream_resp.headers.get("content-type", "application/octet-stream"),
headers={"Cache-Control": "no-store"},
)
def run() -> None:
import uvicorn
uvicorn.run(
"transcode_proxy.main:app",
host=config.host,
port=config.port,
log_level="info",
)
if __name__ == "__main__":
run()

View File

@ -0,0 +1,5 @@
# Dependencies for running the transcode proxy standalone (e.g. in a separate container).
# Frigate's main container may already have these; the proxy can share the same env.
fastapi>=0.100.0
uvicorn>=0.22.0
httpx>=0.24.0

View File

@ -0,0 +1,256 @@
"""Transcode media segments to H.264 transport stream bytes using FFmpeg."""
import asyncio
import logging
import subprocess
from collections.abc import AsyncIterable, AsyncIterator
from typing import Optional
logger = logging.getLogger(__name__)
class TranscodeError(RuntimeError):
"""Raised when FFmpeg cannot produce a valid transcoded segment."""
def _build_scale_filter(max_width: int, max_height: int) -> Optional[str]:
if max_width <= 0 or max_height <= 0:
return None
return (
f"scale=w={max_width}:h={max_height}:"
"force_original_aspect_ratio=decrease:"
"force_divisible_by=2"
)
def _build_ffmpeg_cmd(
ffmpeg_path: str,
bitrate: str,
max_width: int,
max_height: int,
) -> list[str]:
cmd = [
ffmpeg_path,
"-hide_banner",
"-loglevel",
"error",
"-i",
"pipe:0",
"-an",
"-pix_fmt",
"yuv420p",
"-c:v",
"libx264",
"-preset",
"fast",
"-profile:v",
"high",
"-level:v",
"3.1",
"-b:v",
bitrate,
"-maxrate",
bitrate,
"-bufsize",
bitrate,
"-muxdelay",
"0",
"-muxpreload",
"0",
"-f",
"mpegts",
"-mpegts_flags",
"+initial_discontinuity",
"pipe:1",
]
scale_filter = _build_scale_filter(max_width, max_height)
if scale_filter:
cmd[7:7] = ["-vf", scale_filter]
return cmd
class H264TSStream:
"""Manage a streaming FFmpeg transcode process."""
def __init__(self, process: asyncio.subprocess.Process):
self._process = process
self._stderr = bytearray()
self._output = bytearray()
self._input_error: Exception | None = None
self._closed = False
self._stdin_task: asyncio.Task[None] | None = None
self._stderr_task: asyncio.Task[None] | None = None
@classmethod
async def start(
cls,
source_chunks: AsyncIterable[bytes],
ffmpeg_path: str,
bitrate: str = "2M",
max_width: int = 640,
max_height: int = 480,
) -> "H264TSStream":
process = await asyncio.create_subprocess_exec(
*_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stream = cls(process)
stream._stdin_task = asyncio.create_task(stream._feed_stdin(source_chunks))
stream._stderr_task = asyncio.create_task(stream._drain_stderr())
return stream
async def _feed_stdin(self, source_chunks: AsyncIterable[bytes]) -> None:
assert self._process.stdin is not None
try:
async for chunk in source_chunks:
if not chunk:
continue
self._process.stdin.write(chunk)
await self._process.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
self._input_error = exc
except Exception as exc: # pragma: no cover - depends on upstream/network failures
self._input_error = exc
finally:
stdin = self._process.stdin
if stdin is not None and not stdin.is_closing():
stdin.close()
try:
await stdin.wait_closed()
except Exception:
pass
async def _drain_stderr(self) -> None:
assert self._process.stderr is not None
while True:
chunk = await self._process.stderr.read(8192)
if not chunk:
break
self._stderr.extend(chunk)
async def _read_stdout_chunk(self) -> bytes:
assert self._process.stdout is not None
chunk = await self._process.stdout.read(65536)
if chunk:
self._output.extend(chunk)
return chunk
def _error_message(self) -> str:
if self._input_error is not None:
return f"Source stream failed: {self._input_error}"
if self._stderr:
return self._stderr.decode(errors="replace")
return "unknown FFmpeg error"
async def _ensure_success(self) -> bytes:
if self._stdin_task is not None:
await self._stdin_task
if self._stderr_task is not None:
await self._stderr_task
returncode = await self._process.wait()
if returncode != 0:
raise TranscodeError(self._error_message())
return bytes(self._output)
async def first_chunk(self) -> bytes:
chunk = await self._read_stdout_chunk()
if chunk:
return chunk
try:
await self._ensure_success()
finally:
self._closed = True
raise TranscodeError("FFmpeg produced no output")
async def iter_chunks(self, first_chunk: bytes) -> AsyncIterator[bytes]:
try:
yield first_chunk
while True:
chunk = await self._read_stdout_chunk()
if not chunk:
break
yield chunk
await self._ensure_success()
finally:
await self.aclose()
async def aclose(self) -> None:
if self._closed:
return
self._closed = True
if self._process.returncode is None:
self._process.kill()
await self._process.wait()
for task in (self._stdin_task, self._stderr_task):
if task is None or task.done():
continue
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
@property
def output_bytes(self) -> bytes:
return bytes(self._output)
async def stream_transcode_segment_to_h264_ts(
source_chunks: AsyncIterable[bytes],
ffmpeg_path: str,
bitrate: str = "2M",
max_width: int = 640,
max_height: int = 480,
) -> H264TSStream:
"""Start an FFmpeg process that streams H.264 MPEG-TS output."""
return await H264TSStream.start(
source_chunks,
ffmpeg_path,
bitrate,
max_width,
max_height,
)
def transcode_segment_to_h264_ts(
segment_bytes: bytes,
ffmpeg_path: str,
bitrate: str = "2M",
max_width: int = 640,
max_height: int = 480,
) -> Optional[bytes]:
"""Decode a segment and re-encode it as H.264 MPEG-TS bytes."""
try:
result = subprocess.run(
_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
input=segment_bytes,
capture_output=True,
timeout=60,
)
if result.returncode != 0:
logger.warning(
"FFmpeg transcode failed: %s",
result.stderr.decode(errors="replace") if result.stderr else "unknown",
)
return None
return result.stdout
except subprocess.TimeoutExpired:
logger.warning("FFmpeg transcode timed out")
return None
except Exception as e:
logger.warning("FFmpeg transcode error: %s", e)
return None

View File

@ -1,457 +1,469 @@
import { useCallback, useState } from "react";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "../ui/dialog";
import { Label } from "../ui/label";
import { RadioGroup, RadioGroupItem } from "../ui/radio-group";
import { Button } from "../ui/button";
import { ExportMode } from "@/types/filter";
import { FaArrowDown } from "react-icons/fa";
import axios from "axios";
import { toast } from "sonner";
import { Input } from "../ui/input";
import { TimeRange } from "@/types/timeline";
import useSWR from "swr";
import {
Select,
SelectContent,
SelectItem,
SelectSeparator,
SelectTrigger,
SelectValue,
} from "../ui/select";
import { isDesktop, isMobile } from "react-device-detect";
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
import SaveExportOverlay from "./SaveExportOverlay";
import { baseUrl } from "@/api/baseUrl";
import { cn } from "@/lib/utils";
import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
import { useTranslation } from "react-i18next";
import { ExportCase } from "@/types/export";
import { CustomTimeSelector } from "./CustomTimeSelector";
const EXPORT_OPTIONS = [
"1",
"4",
"8",
"12",
"24",
"timeline",
"custom",
] as const;
type ExportOption = (typeof EXPORT_OPTIONS)[number];
type ExportDialogProps = {
camera: string;
latestTime: number;
currentTime: number;
range?: TimeRange;
mode: ExportMode;
showPreview: boolean;
setRange: (range: TimeRange | undefined) => void;
setMode: (mode: ExportMode) => void;
setShowPreview: (showPreview: boolean) => void;
};
export default function ExportDialog({
camera,
latestTime,
currentTime,
range,
mode,
showPreview,
setRange,
setMode,
setShowPreview,
}: ExportDialogProps) {
const { t } = useTranslation(["components/dialog"]);
const [name, setName] = useState("");
const [selectedCaseId, setSelectedCaseId] = useState<string | undefined>(
undefined,
);
const onStartExport = useCallback(() => {
if (!range) {
toast.error(t("export.toast.error.noVaildTimeSelected"), {
position: "top-center",
});
return;
}
if (range.before < range.after) {
toast.error(t("export.toast.error.endTimeMustAfterStartTime"), {
position: "top-center",
});
return;
}
axios
.post(
`export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
{
playback: "realtime",
name,
export_case_id: selectedCaseId || undefined,
},
)
.then((response) => {
if (response.status == 200) {
toast.success(t("export.toast.success"), {
position: "top-center",
action: (
<a href="/export" target="_blank" rel="noopener noreferrer">
<Button>{t("export.toast.view")}</Button>
</a>
),
});
setName("");
setSelectedCaseId(undefined);
setRange(undefined);
setMode("none");
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(
t("export.toast.error.failed", {
error: errorMessage,
}),
{ position: "top-center" },
);
});
}, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]);
const handleCancel = useCallback(() => {
setName("");
setSelectedCaseId(undefined);
setMode("none");
setRange(undefined);
}, [setMode, setRange]);
const Overlay = isDesktop ? Dialog : Drawer;
const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
const Content = isDesktop ? DialogContent : DrawerContent;
return (
<>
<ExportPreviewDialog
camera={camera}
range={range}
showPreview={showPreview}
setShowPreview={setShowPreview}
/>
<SaveExportOverlay
className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2"
show={mode == "timeline"}
onPreview={() => setShowPreview(true)}
onSave={() => onStartExport()}
onCancel={handleCancel}
/>
<Overlay
open={mode == "select"}
onOpenChange={(open) => {
if (!open) {
setMode("none");
}
}}
>
{!isDesktop && (
<Trigger asChild>
<Button
className="flex items-center gap-2"
aria-label={t("menu.export", { ns: "common" })}
size="sm"
onClick={() => {
const now = new Date(latestTime * 1000);
let start = 0;
now.setHours(now.getHours() - 1);
start = now.getTime() / 1000;
setRange({
before: latestTime,
after: start,
});
setMode("select");
}}
>
<FaArrowDown className="rounded-md bg-secondary-foreground fill-secondary p-1" />
{isDesktop && (
<div className="text-primary">
{t("menu.export", { ns: "common" })}
</div>
)}
</Button>
</Trigger>
)}
<Content
className={
isDesktop
? "sm:rounded-lg md:rounded-2xl"
: "mx-4 rounded-lg px-4 pb-4 md:rounded-2xl"
}
>
<ExportContent
latestTime={latestTime}
currentTime={currentTime}
range={range}
name={name}
selectedCaseId={selectedCaseId}
onStartExport={onStartExport}
setName={setName}
setSelectedCaseId={setSelectedCaseId}
setRange={setRange}
setMode={setMode}
onCancel={handleCancel}
/>
</Content>
</Overlay>
</>
);
}
type ExportContentProps = {
latestTime: number;
currentTime: number;
range?: TimeRange;
name: string;
selectedCaseId?: string;
onStartExport: () => void;
setName: (name: string) => void;
setSelectedCaseId: (caseId: string | undefined) => void;
setRange: (range: TimeRange | undefined) => void;
setMode: (mode: ExportMode) => void;
onCancel: () => void;
};
export function ExportContent({
latestTime,
currentTime,
range,
name,
selectedCaseId,
onStartExport,
setName,
setSelectedCaseId,
setRange,
setMode,
onCancel,
}: ExportContentProps) {
const { t } = useTranslation(["components/dialog"]);
const [selectedOption, setSelectedOption] = useState<ExportOption>("1");
const { data: cases } = useSWR<ExportCase[]>("cases");
const onSelectTime = useCallback(
(option: ExportOption) => {
setSelectedOption(option);
const now = new Date(latestTime * 1000);
let start = 0;
switch (option) {
case "1":
now.setHours(now.getHours() - 1);
start = now.getTime() / 1000;
break;
case "4":
now.setHours(now.getHours() - 4);
start = now.getTime() / 1000;
break;
case "8":
now.setHours(now.getHours() - 8);
start = now.getTime() / 1000;
break;
case "12":
now.setHours(now.getHours() - 12);
start = now.getTime() / 1000;
break;
case "24":
now.setHours(now.getHours() - 24);
start = now.getTime() / 1000;
break;
case "custom":
start = latestTime - 3600;
break;
}
setRange({
before: latestTime,
after: start,
});
},
[latestTime, setRange],
);
return (
<div className="w-full">
{isDesktop && (
<>
<DialogHeader>
<DialogTitle>{t("menu.export", { ns: "common" })}</DialogTitle>
</DialogHeader>
<SelectSeparator className="my-4 bg-secondary" />
</>
)}
<RadioGroup
className={`flex flex-col gap-4 ${isDesktop ? "" : "mt-4"}`}
onValueChange={(value) => onSelectTime(value as ExportOption)}
>
{EXPORT_OPTIONS.map((opt) => {
return (
<div key={opt} className="flex items-center gap-2">
<RadioGroupItem
className={
opt == selectedOption
? "bg-selected from-selected/50 to-selected/90 text-selected"
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
}
id={opt}
value={opt}
/>
<Label className="cursor-pointer smart-capitalize" htmlFor={opt}>
{isNaN(parseInt(opt))
? opt == "timeline"
? t("export.time.fromTimeline")
: t("export.time." + opt)
: t("export.time.lastHour", {
count: parseInt(opt),
})}
</Label>
</div>
);
})}
</RadioGroup>
{selectedOption == "custom" && (
<CustomTimeSelector
latestTime={latestTime}
range={range}
setRange={setRange}
startLabel={t("export.time.start.title")}
endLabel={t("export.time.end.title")}
/>
)}
<Input
className="text-md my-6"
type="search"
placeholder={t("export.name.placeholder")}
value={name}
onChange={(e) => setName(e.target.value)}
/>
<div className="my-4">
<Label className="text-sm text-secondary-foreground">
{t("export.case.label", { defaultValue: "Case (optional)" })}
</Label>
<Select
value={selectedCaseId || "none"}
onValueChange={(value) =>
setSelectedCaseId(value === "none" ? undefined : value)
}
>
<SelectTrigger className="mt-2">
<SelectValue
placeholder={t("export.case.placeholder", {
defaultValue: "Select a case (optional)",
})}
/>
</SelectTrigger>
<SelectContent>
<SelectItem
value="none"
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
>
{t("label.none", { ns: "common" })}
</SelectItem>
{cases
?.sort((a, b) => a.name.localeCompare(b.name))
.map((caseItem) => (
<SelectItem
key={caseItem.id}
value={caseItem.id}
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
>
{caseItem.name}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
{isDesktop && <SelectSeparator className="my-4 bg-secondary" />}
<DialogFooter
className={isDesktop ? "" : "mt-3 flex flex-col-reverse gap-4"}
>
<div
className={`cursor-pointer p-2 text-center ${isDesktop ? "" : "w-full"}`}
onClick={onCancel}
>
{t("button.cancel", { ns: "common" })}
</div>
<Button
className={isDesktop ? "" : "w-full"}
aria-label={t("export.selectOrExport")}
variant="select"
size="sm"
onClick={() => {
if (selectedOption == "timeline") {
setRange({ before: currentTime + 30, after: currentTime - 30 });
setMode("timeline");
} else {
onStartExport();
setSelectedOption("1");
setMode("none");
}
}}
>
{selectedOption == "timeline"
? t("export.select")
: t("export.export")}
</Button>
</DialogFooter>
</div>
);
}
type ExportPreviewDialogProps = {
camera: string;
range?: TimeRange;
showPreview: boolean;
setShowPreview: (showPreview: boolean) => void;
};
export function ExportPreviewDialog({
camera,
range,
showPreview,
setShowPreview,
}: ExportPreviewDialogProps) {
const { t } = useTranslation(["components/dialog"]);
if (!range) {
return null;
}
const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`;
return (
<Dialog open={showPreview} onOpenChange={setShowPreview}>
<DialogContent
className={cn(
"scrollbar-container overflow-y-auto",
isDesktop &&
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
isMobile && "px-4",
)}
>
<DialogHeader>
<DialogTitle>{t("export.fromTimeline.previewExport")}</DialogTitle>
<DialogDescription className="sr-only">
{t("export.fromTimeline.previewExport")}
</DialogDescription>
</DialogHeader>
<GenericVideoPlayer source={source} />
</DialogContent>
</Dialog>
);
}
import { useCallback, useState } from "react";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "../ui/dialog";
import { Label } from "../ui/label";
import { RadioGroup, RadioGroupItem } from "../ui/radio-group";
import { Button } from "../ui/button";
import { ExportMode } from "@/types/filter";
import { FaArrowDown } from "react-icons/fa";
import axios from "axios";
import { toast } from "sonner";
import { Input } from "../ui/input";
import { TimeRange } from "@/types/timeline";
import useSWR from "swr";
import {
Select,
SelectContent,
SelectItem,
SelectSeparator,
SelectTrigger,
SelectValue,
} from "../ui/select";
import { isDesktop, isMobile } from "react-device-detect";
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
import SaveExportOverlay from "./SaveExportOverlay";
import { baseUrl } from "@/api/baseUrl";
import { cn } from "@/lib/utils";
import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
import { useTranslation } from "react-i18next";
import { ExportCase } from "@/types/export";
import { CustomTimeSelector } from "./CustomTimeSelector";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
const EXPORT_OPTIONS = [
"1",
"4",
"8",
"12",
"24",
"timeline",
"custom",
] as const;
type ExportOption = (typeof EXPORT_OPTIONS)[number];
type ExportDialogProps = {
camera: string;
latestTime: number;
currentTime: number;
range?: TimeRange;
mode: ExportMode;
showPreview: boolean;
setRange: (range: TimeRange | undefined) => void;
setMode: (mode: ExportMode) => void;
setShowPreview: (showPreview: boolean) => void;
};
export default function ExportDialog({
camera,
latestTime,
currentTime,
range,
mode,
showPreview,
setRange,
setMode,
setShowPreview,
}: ExportDialogProps) {
const { t } = useTranslation(["components/dialog"]);
const [name, setName] = useState("");
const [selectedCaseId, setSelectedCaseId] = useState<string | undefined>(
undefined,
);
const onStartExport = useCallback(() => {
if (!range) {
toast.error(t("export.toast.error.noVaildTimeSelected"), {
position: "top-center",
});
return;
}
if (range.before < range.after) {
toast.error(t("export.toast.error.endTimeMustAfterStartTime"), {
position: "top-center",
});
return;
}
axios
.post(
`export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
{
playback: "realtime",
name,
export_case_id: selectedCaseId || undefined,
},
)
.then((response) => {
if (response.status == 200) {
toast.success(t("export.toast.success"), {
position: "top-center",
action: (
<a href="/export" target="_blank" rel="noopener noreferrer">
<Button>{t("export.toast.view")}</Button>
</a>
),
});
setName("");
setSelectedCaseId(undefined);
setRange(undefined);
setMode("none");
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(
t("export.toast.error.failed", {
error: errorMessage,
}),
{ position: "top-center" },
);
});
}, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]);
const handleCancel = useCallback(() => {
setName("");
setSelectedCaseId(undefined);
setMode("none");
setRange(undefined);
}, [setMode, setRange]);
const Overlay = isDesktop ? Dialog : Drawer;
const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
const Content = isDesktop ? DialogContent : DrawerContent;
return (
<>
<ExportPreviewDialog
camera={camera}
range={range}
showPreview={showPreview}
setShowPreview={setShowPreview}
/>
<SaveExportOverlay
className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2"
show={mode == "timeline"}
onPreview={() => setShowPreview(true)}
onSave={() => onStartExport()}
onCancel={handleCancel}
/>
<Overlay
open={mode == "select"}
onOpenChange={(open) => {
if (!open) {
setMode("none");
}
}}
>
{!isDesktop && (
<Trigger asChild>
<Button
className="flex items-center gap-2"
aria-label={t("menu.export", { ns: "common" })}
size="sm"
onClick={() => {
const now = new Date(latestTime * 1000);
let start = 0;
now.setHours(now.getHours() - 1);
start = now.getTime() / 1000;
setRange({
before: latestTime,
after: start,
});
setMode("select");
}}
>
<FaArrowDown className="rounded-md bg-secondary-foreground fill-secondary p-1" />
{isDesktop && (
<div className="text-primary">
{t("menu.export", { ns: "common" })}
</div>
)}
</Button>
</Trigger>
)}
<Content
className={
isDesktop
? "sm:rounded-lg md:rounded-2xl"
: "mx-4 rounded-lg px-4 pb-4 md:rounded-2xl"
}
>
<ExportContent
latestTime={latestTime}
currentTime={currentTime}
range={range}
name={name}
selectedCaseId={selectedCaseId}
onStartExport={onStartExport}
setName={setName}
setSelectedCaseId={setSelectedCaseId}
setRange={setRange}
setMode={setMode}
onCancel={handleCancel}
/>
</Content>
</Overlay>
</>
);
}
type ExportContentProps = {
latestTime: number;
currentTime: number;
range?: TimeRange;
name: string;
selectedCaseId?: string;
onStartExport: () => void;
setName: (name: string) => void;
setSelectedCaseId: (caseId: string | undefined) => void;
setRange: (range: TimeRange | undefined) => void;
setMode: (mode: ExportMode) => void;
onCancel: () => void;
};
export function ExportContent({
latestTime,
currentTime,
range,
name,
selectedCaseId,
onStartExport,
setName,
setSelectedCaseId,
setRange,
setMode,
onCancel,
}: ExportContentProps) {
const { t } = useTranslation(["components/dialog"]);
const [selectedOption, setSelectedOption] = useState<ExportOption>("1");
const { data: cases } = useSWR<ExportCase[]>("cases");
const onSelectTime = useCallback(
(option: ExportOption) => {
setSelectedOption(option);
const now = new Date(latestTime * 1000);
let start = 0;
switch (option) {
case "1":
now.setHours(now.getHours() - 1);
start = now.getTime() / 1000;
break;
case "4":
now.setHours(now.getHours() - 4);
start = now.getTime() / 1000;
break;
case "8":
now.setHours(now.getHours() - 8);
start = now.getTime() / 1000;
break;
case "12":
now.setHours(now.getHours() - 12);
start = now.getTime() / 1000;
break;
case "24":
now.setHours(now.getHours() - 24);
start = now.getTime() / 1000;
break;
case "custom":
start = latestTime - 3600;
break;
}
setRange({
before: latestTime,
after: start,
});
},
[latestTime, setRange],
);
return (
<div className="w-full">
{isDesktop && (
<>
<DialogHeader>
<DialogTitle>{t("menu.export", { ns: "common" })}</DialogTitle>
</DialogHeader>
<SelectSeparator className="my-4 bg-secondary" />
</>
)}
<RadioGroup
className={`flex flex-col gap-4 ${isDesktop ? "" : "mt-4"}`}
onValueChange={(value) => onSelectTime(value as ExportOption)}
>
{EXPORT_OPTIONS.map((opt) => {
return (
<div key={opt} className="flex items-center gap-2">
<RadioGroupItem
className={
opt == selectedOption
? "bg-selected from-selected/50 to-selected/90 text-selected"
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
}
id={opt}
value={opt}
/>
<Label className="cursor-pointer smart-capitalize" htmlFor={opt}>
{isNaN(parseInt(opt))
? opt == "timeline"
? t("export.time.fromTimeline")
: t("export.time." + opt)
: t("export.time.lastHour", {
count: parseInt(opt),
})}
</Label>
</div>
);
})}
</RadioGroup>
{selectedOption == "custom" && (
<CustomTimeSelector
latestTime={latestTime}
range={range}
setRange={setRange}
startLabel={t("export.time.start.title")}
endLabel={t("export.time.end.title")}
/>
)}
<Input
className="text-md my-6"
type="search"
placeholder={t("export.name.placeholder")}
value={name}
onChange={(e) => setName(e.target.value)}
/>
<div className="my-4">
<Label className="text-sm text-secondary-foreground">
{t("export.case.label", { defaultValue: "Case (optional)" })}
</Label>
<Select
value={selectedCaseId || "none"}
onValueChange={(value) =>
setSelectedCaseId(value === "none" ? undefined : value)
}
>
<SelectTrigger className="mt-2">
<SelectValue
placeholder={t("export.case.placeholder", {
defaultValue: "Select a case (optional)",
})}
/>
</SelectTrigger>
<SelectContent>
<SelectItem
value="none"
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
>
{t("label.none", { ns: "common" })}
</SelectItem>
{cases
?.sort((a, b) => a.name.localeCompare(b.name))
.map((caseItem) => (
<SelectItem
key={caseItem.id}
value={caseItem.id}
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
>
{caseItem.name}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
{isDesktop && <SelectSeparator className="my-4 bg-secondary" />}
<DialogFooter
className={isDesktop ? "" : "mt-3 flex flex-col-reverse gap-4"}
>
<div
className={`cursor-pointer p-2 text-center ${isDesktop ? "" : "w-full"}`}
onClick={onCancel}
>
{t("button.cancel", { ns: "common" })}
</div>
<Button
className={isDesktop ? "" : "w-full"}
aria-label={t("export.selectOrExport")}
variant="select"
size="sm"
onClick={() => {
if (selectedOption == "timeline") {
setRange({ before: currentTime + 30, after: currentTime - 30 });
setMode("timeline");
} else {
onStartExport();
setSelectedOption("1");
setMode("none");
}
}}
>
{selectedOption == "timeline"
? t("export.select")
: t("export.export")}
</Button>
</DialogFooter>
</div>
);
}
type ExportPreviewDialogProps = {
camera: string;
range?: TimeRange;
showPreview: boolean;
setShowPreview: (showPreview: boolean) => void;
};
export function ExportPreviewDialog({
camera,
range,
showPreview,
setShowPreview,
}: ExportPreviewDialogProps) {
const { t } = useTranslation(["components/dialog"]);
const vodPath = range
? `/vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`
: `/vod/${camera}/start/0/end/0/index.m3u8`;
const playbackSource = useRecordingPlaybackSource({
camera,
after: range?.after ?? 0,
before: range?.before ?? 0,
vodPath,
enabled: !!range,
});
if (!range) {
return null;
}
const source = playbackSource ?? `${baseUrl}${vodPath}`;
return (
<Dialog open={showPreview} onOpenChange={setShowPreview}>
<DialogContent
className={cn(
"scrollbar-container overflow-y-auto",
isDesktop &&
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
isMobile && "px-4",
)}
>
<DialogHeader>
<DialogTitle>{t("export.fromTimeline.previewExport")}</DialogTitle>
<DialogDescription className="sr-only">
{t("export.fromTimeline.previewExport")}
</DialogDescription>
</DialogHeader>
<GenericVideoPlayer source={source} />
</DialogContent>
</Dialog>
);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,351 +1,429 @@
import {
ReactNode,
useCallback,
useEffect,
useMemo,
useRef,
useState,
} from "react";
import { useApiHost } from "@/api";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import { Recording } from "@/types/record";
import { Preview } from "@/types/preview";
import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
import { DynamicVideoController } from "./DynamicVideoController";
import HlsVideoPlayer, { HlsSource } from "../HlsVideoPlayer";
import { useDetailStream } from "@/context/detail-stream-context";
import { TimeRange } from "@/types/timeline";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { VideoResolutionType } from "@/types/live";
import axios from "axios";
import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next";
import {
calculateInpointOffset,
calculateSeekPosition,
} from "@/utils/videoUtil";
import { isFirefox } from "react-device-detect";
/**
* Dynamically switches between video playback and scrubbing preview player.
*/
type DynamicVideoPlayerProps = {
className?: string;
camera: string;
timeRange: TimeRange;
cameraPreviews: Preview[];
startTimestamp?: number;
isScrubbing: boolean;
hotKeys: boolean;
supportsFullscreen: boolean;
fullscreen: boolean;
onControllerReady: (controller: DynamicVideoController) => void;
onTimestampUpdate?: (timestamp: number) => void;
onClipEnded?: () => void;
onSeekToTime?: (timestamp: number, play?: boolean) => void;
setFullResolution: React.Dispatch<React.SetStateAction<VideoResolutionType>>;
toggleFullscreen: () => void;
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
transformedOverlay?: ReactNode;
};
export default function DynamicVideoPlayer({
className,
camera,
timeRange,
cameraPreviews,
startTimestamp,
isScrubbing,
hotKeys,
supportsFullscreen,
fullscreen,
onControllerReady,
onTimestampUpdate,
onClipEnded,
onSeekToTime,
setFullResolution,
toggleFullscreen,
containerRef,
transformedOverlay,
}: DynamicVideoPlayerProps) {
const { t } = useTranslation(["components/player"]);
const apiHost = useApiHost();
const { data: config } = useSWR<FrigateConfig>("config");
// for detail stream context in History
const {
isDetailMode,
camera: contextCamera,
currentTime,
} = useDetailStream();
// controlling playback
const playerRef = useRef<HTMLVideoElement | null>(null);
const [previewController, setPreviewController] =
useState<PreviewController | null>(null);
const [noRecording, setNoRecording] = useState(false);
const controller = useMemo(() => {
if (!config || !playerRef.current || !previewController) {
return undefined;
}
return new DynamicVideoController(
camera,
playerRef.current,
previewController,
(config.cameras[camera]?.detect?.annotation_offset || 0) / 1000,
isScrubbing ? "scrubbing" : "playback",
setNoRecording,
() => {},
);
// we only want to fire once when players are ready
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [camera, config, playerRef.current, previewController]);
useEffect(() => {
if (!controller) {
return;
}
if (controller) {
onControllerReady(controller);
}
// we only want to fire once when players are ready
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [controller]);
// initial state
const [isLoading, setIsLoading] = useState(false);
const [isBuffering, setIsBuffering] = useState(false);
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
// Don't set source until recordings load - we need accurate startPosition
// to avoid hls.js clamping to video end when startPosition exceeds duration
const [source, setSource] = useState<HlsSource | undefined>(undefined);
// start at correct time
useEffect(() => {
if (!isScrubbing) {
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
}
return () => {
if (loadingTimeout) {
clearTimeout(loadingTimeout);
}
};
// we only want trigger when scrubbing state changes
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [camera, isScrubbing]);
const onPlayerLoaded = useCallback(() => {
if (!controller || !startTimestamp) {
return;
}
controller.seekToTimestamp(startTimestamp, true);
}, [startTimestamp, controller]);
const onTimeUpdate = useCallback(
(time: number) => {
if (isScrubbing || !controller || !onTimestampUpdate || time == 0) {
return;
}
if (isLoading) {
setIsLoading(false);
}
if (isBuffering) {
setIsBuffering(false);
}
onTimestampUpdate(controller.getProgress(time));
},
[controller, onTimestampUpdate, isBuffering, isLoading, isScrubbing],
);
const onUploadFrameToPlus = useCallback(
(playTime: number) => {
if (!controller) {
return;
}
const time = controller.getProgress(playTime);
return axios.post(`/${camera}/plus/${time}`);
},
[camera, controller],
);
// state of playback player
const recordingParams = useMemo(
() => ({
before: timeRange.before,
after: timeRange.after,
}),
[timeRange],
);
const { data: recordings } = useSWR<Recording[]>(
[`${camera}/recordings`, recordingParams],
{ revalidateOnFocus: false },
);
useEffect(() => {
if (!recordings?.length) {
if (recordings?.length == 0) {
setNoRecording(true);
}
return;
}
let startPosition = undefined;
if (startTimestamp) {
const inpointOffset = calculateInpointOffset(
recordingParams.after,
(recordings || [])[0],
);
startPosition = calculateSeekPosition(
startTimestamp,
recordings,
inpointOffset,
);
}
setSource({
playlist: `${apiHost}vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`,
startPosition,
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [recordings]);
useEffect(() => {
if (!controller || !recordings?.length) {
return;
}
if (playerRef.current) {
playerRef.current.autoplay = !isScrubbing;
}
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
controller.newPlayback({
recordings: recordings ?? [],
timeRange,
});
// we only want this to change when controller or recordings update
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [controller, recordings]);
const inpointOffset = useMemo(
() => calculateInpointOffset(recordingParams.after, (recordings || [])[0]),
[recordingParams, recordings],
);
const onValidateClipEnd = useCallback(
(currentTime: number) => {
if (!onClipEnded || !controller || !recordings) {
return;
}
if (!isFirefox) {
onClipEnded();
}
// Firefox has a bug where clipEnded can be called prematurely due to buffering
// we need to validate if the current play-point is truly at the end of available recordings
const lastRecordingTime = recordings.at(-1)?.start_time;
if (
!lastRecordingTime ||
controller.getProgress(currentTime) < lastRecordingTime
) {
return;
}
onClipEnded();
},
[onClipEnded, controller, recordings],
);
return (
<>
{source && (
<HlsVideoPlayer
videoRef={playerRef}
containerRef={containerRef}
visible={!(isScrubbing || isLoading)}
currentSource={source}
hotKeys={hotKeys}
supportsFullscreen={supportsFullscreen}
fullscreen={fullscreen}
inpointOffset={inpointOffset}
onTimeUpdate={onTimeUpdate}
onPlayerLoaded={onPlayerLoaded}
onClipEnded={onValidateClipEnd}
onSeekToTime={(timestamp, play) => {
if (onSeekToTime) {
onSeekToTime(timestamp, play);
}
}}
onPlaying={() => {
if (isScrubbing) {
playerRef.current?.pause();
}
if (loadingTimeout) {
clearTimeout(loadingTimeout);
}
setNoRecording(false);
}}
setFullResolution={setFullResolution}
onUploadFrame={onUploadFrameToPlus}
toggleFullscreen={toggleFullscreen}
onError={(error) => {
if (error == "stalled" && !isScrubbing) {
setIsBuffering(true);
}
}}
isDetailMode={isDetailMode}
camera={contextCamera || camera}
currentTimeOverride={currentTime}
transformedOverlay={transformedOverlay}
/>
)}
<PreviewPlayer
className={cn(
className,
isScrubbing || isLoading ? "visible" : "hidden",
)}
camera={camera}
timeRange={timeRange}
cameraPreviews={cameraPreviews}
startTime={startTimestamp}
isScrubbing={isScrubbing}
onControllerReady={(previewController) =>
setPreviewController(previewController)
}
/>
{!isScrubbing && (isLoading || isBuffering) && !noRecording && (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
)}
{!isScrubbing && !isLoading && noRecording && (
<div className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2">
{t("noRecordingsFoundForThisTime")}
</div>
)}
</>
);
}
import {
ReactNode,
useCallback,
useEffect,
useMemo,
useRef,
useState,
} from "react";
import { useApiHost } from "@/api";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import {
Recording,
RecordingPlaybackPreference,
} from "@/types/record";
import { Preview } from "@/types/preview";
import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
import { DynamicVideoController } from "./DynamicVideoController";
import HlsVideoPlayer, { HlsSource } from "../HlsVideoPlayer";
import { useDetailStream } from "@/context/detail-stream-context";
import { TimeRange } from "@/types/timeline";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { VideoResolutionType } from "@/types/live";
import axios from "axios";
import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next";
import { useUserPersistence } from "@/hooks/use-user-persistence";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
import {
calculateInpointOffset,
calculateSeekPosition,
} from "@/utils/videoUtil";
import { isFirefox } from "react-device-detect";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
/**
* Dynamically switches between video playback and scrubbing preview player.
*/
type DynamicVideoPlayerProps = {
className?: string;
camera: string;
timeRange: TimeRange;
cameraPreviews: Preview[];
startTimestamp?: number;
isScrubbing: boolean;
hotKeys: boolean;
supportsFullscreen: boolean;
fullscreen: boolean;
onControllerReady: (controller: DynamicVideoController) => void;
onTimestampUpdate?: (timestamp: number) => void;
onClipEnded?: () => void;
onSeekToTime?: (timestamp: number, play?: boolean) => void;
setFullResolution: React.Dispatch<React.SetStateAction<VideoResolutionType>>;
toggleFullscreen: () => void;
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
transformedOverlay?: ReactNode;
};
export default function DynamicVideoPlayer({
className,
camera,
timeRange,
cameraPreviews,
startTimestamp,
isScrubbing,
hotKeys,
supportsFullscreen,
fullscreen,
onControllerReady,
onTimestampUpdate,
onClipEnded,
onSeekToTime,
setFullResolution,
toggleFullscreen,
containerRef,
transformedOverlay,
}: DynamicVideoPlayerProps) {
const { t } = useTranslation(["components/player"]);
const apiHost = useApiHost();
const { data: config } = useSWR<FrigateConfig>("config");
// for detail stream context in History
const {
isDetailMode,
camera: contextCamera,
currentTime,
} = useDetailStream();
// controlling playback
const playerRef = useRef<HTMLVideoElement | null>(null);
const [previewController, setPreviewController] =
useState<PreviewController | null>(null);
const [noRecording, setNoRecording] = useState(false);
const controller = useMemo(() => {
if (!config || !playerRef.current || !previewController) {
return undefined;
}
return new DynamicVideoController(
camera,
playerRef.current,
previewController,
(config.cameras[camera]?.detect?.annotation_offset || 0) / 1000,
isScrubbing ? "scrubbing" : "playback",
setNoRecording,
() => {},
);
// we only want to fire once when players are ready
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [camera, config, playerRef.current, previewController]);
useEffect(() => {
if (!controller) {
return;
}
if (controller) {
onControllerReady(controller);
}
// we only want to fire once when players are ready
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [controller]);
// initial state
const [isLoading, setIsLoading] = useState(false);
const [isBuffering, setIsBuffering] = useState(false);
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
const [playbackPreference, setPlaybackPreference] =
useUserPersistence<RecordingPlaybackPreference>(
`${camera}-recording-playback-v2`,
"sub",
);
// Don't set source until recordings load - we need accurate startPosition
// to avoid hls.js clamping to video end when startPosition exceeds duration
const [source, setSource] = useState<HlsSource | undefined>(undefined);
// start at correct time
useEffect(() => {
if (!isScrubbing) {
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
}
return () => {
if (loadingTimeout) {
clearTimeout(loadingTimeout);
}
};
// we only want trigger when scrubbing state changes
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [camera, isScrubbing]);
const onPlayerLoaded = useCallback(() => {
if (!controller || !startTimestamp) {
return;
}
controller.seekToTimestamp(startTimestamp, true);
}, [startTimestamp, controller]);
const onTimeUpdate = useCallback(
(time: number) => {
if (isScrubbing || !controller || !onTimestampUpdate || time == 0) {
return;
}
if (isLoading) {
setIsLoading(false);
}
if (isBuffering) {
setIsBuffering(false);
}
onTimestampUpdate(controller.getProgress(time));
},
[controller, onTimestampUpdate, isBuffering, isLoading, isScrubbing],
);
const onUploadFrameToPlus = useCallback(
(playTime: number) => {
if (!controller) {
return;
}
const time = controller.getProgress(playTime);
return axios.post(`/${camera}/plus/${time}`);
},
[camera, controller],
);
// state of playback player
const recordingParams = useMemo(
() => ({
before: timeRange.before,
after: timeRange.after,
}),
[timeRange],
);
const { data: allRecordings } = useSWR<Recording[]>(
[`${camera}/recordings`, { ...recordingParams, variant: "all" }],
{ revalidateOnFocus: false },
);
const recordings = useMemo(() => {
if (!allRecordings?.length) {
return allRecordings;
}
const mainRecordings = allRecordings.filter(
(recording) => (recording.variant || "main") === "main",
);
return mainRecordings.length > 0 ? mainRecordings : allRecordings;
}, [allRecordings]);
const codecNames = useMemo(
() =>
Array.from(
new Set((allRecordings ?? []).map((recording) => recording.codec_name)),
),
[allRecordings],
);
const playbackCapabilities = usePlaybackCapabilities(codecNames);
useEffect(() => {
if (!recordings?.length) {
if (recordings?.length == 0) {
setNoRecording(true);
}
return;
}
let startPosition = undefined;
if (startTimestamp) {
const inpointOffset = calculateInpointOffset(
recordingParams.after,
(recordings || [])[0],
);
startPosition = calculateSeekPosition(
startTimestamp,
recordings,
inpointOffset,
);
}
const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
const decision = chooseRecordingPlayback({
apiHost,
config,
recordings: allRecordings ?? recordings,
preference: playbackPreference ?? "sub",
vodPath,
capabilities: playbackCapabilities,
});
setSource({
playlist: decision.url,
startPosition,
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [
apiHost,
camera,
recordingParams.after,
recordingParams.before,
allRecordings,
recordings,
startTimestamp,
playbackPreference,
playbackCapabilities,
config?.transcode_proxy?.enabled,
config?.transcode_proxy?.vod_proxy_url,
]);
useEffect(() => {
if (!controller || !recordings?.length) {
return;
}
if (playerRef.current) {
playerRef.current.autoplay = !isScrubbing;
}
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
controller.newPlayback({
recordings: recordings ?? [],
timeRange,
});
// we only want this to change when controller or recordings update
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [controller, recordings]);
const inpointOffset = useMemo(
() => calculateInpointOffset(recordingParams.after, (recordings || [])[0]),
[recordingParams, recordings],
);
const onValidateClipEnd = useCallback(
(currentTime: number) => {
if (!onClipEnded || !controller || !recordings) {
return;
}
if (!isFirefox) {
onClipEnded();
}
// Firefox has a bug where clipEnded can be called prematurely due to buffering
// we need to validate if the current play-point is truly at the end of available recordings
const lastRecordingTime = recordings.at(-1)?.start_time;
if (
!lastRecordingTime ||
controller.getProgress(currentTime) < lastRecordingTime
) {
return;
}
onClipEnded();
},
[onClipEnded, controller, recordings],
);
return (
<>
{source && (
<HlsVideoPlayer
videoRef={playerRef}
containerRef={containerRef}
visible={!(isScrubbing || isLoading)}
currentSource={source}
hotKeys={hotKeys}
supportsFullscreen={supportsFullscreen}
fullscreen={fullscreen}
inpointOffset={inpointOffset}
onTimeUpdate={onTimeUpdate}
onPlayerLoaded={onPlayerLoaded}
onClipEnded={onValidateClipEnd}
onSeekToTime={(timestamp, play) => {
if (onSeekToTime) {
onSeekToTime(timestamp, play);
}
}}
onPlaying={() => {
if (isScrubbing) {
playerRef.current?.pause();
}
if (loadingTimeout) {
clearTimeout(loadingTimeout);
}
setNoRecording(false);
}}
setFullResolution={setFullResolution}
onUploadFrame={onUploadFrameToPlus}
toggleFullscreen={toggleFullscreen}
onError={(error) => {
if (error == "stalled" && !isScrubbing) {
setIsBuffering(true);
}
}}
isDetailMode={isDetailMode}
camera={contextCamera || camera}
currentTimeOverride={currentTime}
transformedOverlay={transformedOverlay}
/>
)}
{!isScrubbing && source && (
<div className="absolute right-3 top-3 z-50">
<Select
value={playbackPreference ?? "sub"}
onValueChange={(value) =>
setPlaybackPreference(value as RecordingPlaybackPreference)
}
>
<SelectTrigger className="h-8 w-32 bg-background/90 text-xs backdrop-blur">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="auto">Auto</SelectItem>
<SelectItem value="main">Main</SelectItem>
<SelectItem value="sub">Sub</SelectItem>
<SelectItem value="transcoded">Transcoded</SelectItem>
</SelectContent>
</Select>
</div>
)}
<PreviewPlayer
className={cn(
className,
isScrubbing || isLoading ? "visible" : "hidden",
)}
camera={camera}
timeRange={timeRange}
cameraPreviews={cameraPreviews}
startTime={startTimestamp}
isScrubbing={isScrubbing}
onControllerReady={(previewController) =>
setPreviewController(previewController)
}
/>
{!isScrubbing && (isLoading || isBuffering) && !noRecording && (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
)}
{!isScrubbing && !isLoading && noRecording && (
<div className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2">
{t("noRecordingsFoundForThisTime")}
</div>
)}
</>
);
}

View File

@ -0,0 +1,77 @@
import { useMemo } from "react";
import {
getCodecMimeTypes,
normalizeCodecName,
PlaybackCapabilities,
} from "@/utils/recordingPlayback";
type NavigatorConnection = {
downlink?: number;
effectiveType?: string;
rtt?: number;
saveData?: boolean;
};
declare global {
interface Navigator {
connection?: NavigatorConnection;
mozConnection?: NavigatorConnection;
webkitConnection?: NavigatorConnection;
}
interface Window {
ManagedMediaSource?: typeof MediaSource;
}
}
function canPlayMimeType(mimeType?: string): boolean {
if (!mimeType || typeof window === "undefined") {
return false;
}
if (window.ManagedMediaSource?.isTypeSupported(mimeType)) {
return true;
}
if (window.MediaSource?.isTypeSupported(mimeType)) {
return true;
}
const video = document.createElement("video");
return video.canPlayType(mimeType) !== "";
}
function canPlayAnyMimeType(mimeTypes: string[]): boolean {
return mimeTypes.some((mimeType) => canPlayMimeType(mimeType));
}
export default function usePlaybackCapabilities(codecNames: Array<string | null | undefined>) {
return useMemo<PlaybackCapabilities>(() => {
if (typeof window === "undefined") {
return { estimatedBandwidthBps: undefined, saveData: false, supports: {} };
}
const connection =
navigator.connection ?? navigator.mozConnection ?? navigator.webkitConnection;
const supports: Record<string, boolean> = {};
codecNames.forEach((codecName) => {
const normalized = normalizeCodecName(codecName);
if (!normalized || normalized in supports) {
return;
}
supports[normalized] = canPlayAnyMimeType(getCodecMimeTypes(normalized));
});
const downlinkMbps = connection?.downlink;
return {
estimatedBandwidthBps:
typeof downlinkMbps === "number" && downlinkMbps > 0
? downlinkMbps * 1_000_000
: undefined,
saveData: connection?.saveData === true,
supports,
};
}, [codecNames]);
}

View File

@ -0,0 +1,72 @@
import { useApiHost } from "@/api";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import {
Recording,
RecordingPlaybackPreference,
} from "@/types/record";
import { useMemo } from "react";
import { useUserPersistence } from "@/hooks/use-user-persistence";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
type RecordingPlaybackSourceOptions = {
camera: string;
after: number;
before: number;
vodPath: string;
preference?: RecordingPlaybackPreference;
enabled?: boolean;
};
export default function useRecordingPlaybackSource({
camera,
after,
before,
vodPath,
preference,
enabled = true,
}: RecordingPlaybackSourceOptions) {
const apiHost = useApiHost();
const { data: config } = useSWR<FrigateConfig>("config");
const [storedPreference] = useUserPersistence<RecordingPlaybackPreference>(
`${camera}-recording-playback-v2`,
"sub",
);
const { data: recordings } = useSWR<Recording[]>(
enabled ? [`${camera}/recordings`, { after, before, variant: "all" }] : null,
{ revalidateOnFocus: false },
);
const codecNames = useMemo(
() =>
Array.from(
new Set((recordings ?? []).map((recording) => recording.codec_name)),
),
[recordings],
);
const capabilities = usePlaybackCapabilities(codecNames);
return useMemo(() => {
if (!recordings?.length) {
return undefined;
}
return chooseRecordingPlayback({
apiHost,
config,
recordings,
preference: preference ?? storedPreference ?? "sub",
vodPath,
capabilities,
}).url;
}, [
apiHost,
capabilities,
config,
preference,
recordings,
storedPreference,
vodPath,
]);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,49 +1,60 @@
import { ReviewSeverity } from "./review";
import { TimelineType } from "./timeline";
export type Recording = {
id: string;
camera: string;
start_time: number;
end_time: number;
path: string;
segment_size: number;
duration: number;
motion: number;
objects: number;
motion_heatmap?: Record<string, number> | null;
dBFS: number;
};
export type RecordingSegment = {
id: string;
start_time: number;
end_time: number;
motion: number;
objects: number;
segment_size: number;
duration: number;
};
export type RecordingActivity = {
[hour: number]: RecordingSegmentActivity[];
};
type RecordingSegmentActivity = {
date: number;
count: number;
hasObjects: boolean;
};
export type RecordingStartingPoint = {
camera: string;
startTime: number;
severity: ReviewSeverity;
timelineType?: TimelineType;
};
export type RecordingPlayerError = "stalled" | "startup";
export const ASPECT_VERTICAL_LAYOUT = 1.5;
export const ASPECT_PORTRAIT_LAYOUT = 1.333;
export const ASPECT_WIDE_LAYOUT = 2;
import { ReviewSeverity } from "./review";
import { TimelineType } from "./timeline";
export type Recording = {
id: string;
camera: string;
start_time: number;
end_time: number;
path: string;
variant?: string;
segment_size: number;
duration: number;
motion: number;
objects: number;
motion_heatmap?: Record<string, number> | null;
dBFS: number;
codec_name?: string | null;
width?: number | null;
height?: number | null;
bitrate?: number | null;
};
export type RecordingSegment = {
id: string;
start_time: number;
end_time: number;
motion: number;
objects: number;
segment_size: number;
duration: number;
};
export type RecordingActivity = {
[hour: number]: RecordingSegmentActivity[];
};
type RecordingSegmentActivity = {
date: number;
count: number;
hasObjects: boolean;
};
export type RecordingStartingPoint = {
camera: string;
startTime: number;
severity: ReviewSeverity;
timelineType?: TimelineType;
};
export type RecordingPlayerError = "stalled" | "startup";
export type RecordingPlaybackPreference =
| "auto"
| "main"
| "sub"
| "transcoded";
export const ASPECT_VERTICAL_LAYOUT = 1.5;
export const ASPECT_PORTRAIT_LAYOUT = 1.333;
export const ASPECT_WIDE_LAYOUT = 2;

View File

@ -0,0 +1,44 @@
const LOW_BANDWIDTH_PATTERN = /\b(sub|low|mobile|small|sd|lowres|low-res)\b/i;
const HIGH_BANDWIDTH_PATTERN = /\b(main|high|hd|full|primary)\b/i;
function rankStreamLabel(label: string, preferLowBandwidth: boolean): number {
if (preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
return 3;
}
if (!preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
return 3;
}
if (preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
return 1;
}
if (!preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
return 1;
}
return 2;
}
export function chooseAutoLiveStream(
streams: Record<string, string>,
estimatedBandwidthBps?: number,
saveData = false,
): string {
const entries = Object.entries(streams || {});
if (entries.length === 0) {
return "";
}
const preferLowBandwidth =
saveData || !!(estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000);
return [...entries]
.sort(([leftLabel], [rightLabel]) => {
return (
rankStreamLabel(rightLabel, preferLowBandwidth) -
rankStreamLabel(leftLabel, preferLowBandwidth)
);
})[0][1];
}

View File

@ -0,0 +1,324 @@
import { FrigateConfig } from "@/types/frigateConfig";
import {
Recording,
RecordingPlaybackPreference,
} from "@/types/record";
export type PlaybackCapabilities = {
estimatedBandwidthBps?: number;
saveData: boolean;
supports: Record<string, boolean>;
};
export type RecordingPlaybackDecision = {
mode: "direct" | "transcoded";
variant: string;
url: string;
reason: string;
};
type DecisionOptions = {
apiHost: string;
config?: FrigateConfig;
recordings: Recording[];
preference: RecordingPlaybackPreference;
vodPath: string;
capabilities: PlaybackCapabilities;
};
const CODEC_SAMPLES: Record<string, string[]> = {
h264: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
avc1: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
hevc: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
h265: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
hev1: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
hvc1: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
av1: ['video/mp4; codecs="av01.0.05M.08"'],
av01: ['video/mp4; codecs="av01.0.05M.08"'],
vp9: ['video/mp4; codecs="vp09.00.10.08"'],
vp09: ['video/mp4; codecs="vp09.00.10.08"'],
};
function trimTrailingSlash(value: string): string {
return value.replace(/\/$/, "");
}
function appendQuery(url: string, params: Record<string, string | undefined>): string {
const entries = Object.entries(params).filter(([, value]) => value);
if (entries.length === 0) {
return url;
}
const search = new URLSearchParams(entries as [string, string][]);
return `${url}${url.includes("?") ? "&" : "?"}${search.toString()}`;
}
function average(values: number[]): number | undefined {
if (!values.length) {
return undefined;
}
return values.reduce((sum, value) => sum + value, 0) / values.length;
}
export function normalizeCodecName(codecName?: string | null): string | undefined {
return codecName?.toLowerCase().trim() || undefined;
}
export function getCodecMimeTypes(codecName?: string | null): string[] {
const normalized = normalizeCodecName(codecName);
if (!normalized) {
return [];
}
return CODEC_SAMPLES[normalized] ?? [];
}
export function estimateRecordingBitrate(recordings: Recording[]): number | undefined {
const explicit = recordings
.map((recording) => recording.bitrate)
.filter((value): value is number => typeof value === "number" && value > 0);
if (explicit.length > 0) {
return average(explicit);
}
const derived = recordings
.map((recording) => {
if (!recording.segment_size || !recording.duration) {
return undefined;
}
return (recording.segment_size * 1024 * 1024 * 8) / recording.duration;
})
.filter((value): value is number => typeof value === "number" && value > 0);
return average(derived);
}
export function groupRecordingsByVariant(
recordings: Recording[],
): Record<string, Recording[]> {
return recordings.reduce<Record<string, Recording[]>>((acc, recording) => {
const variant = recording.variant || "main";
if (!acc[variant]) {
acc[variant] = [];
}
acc[variant].push(recording);
return acc;
}, {});
}
function canDirectPlayVariant(
capabilities: PlaybackCapabilities,
recordings: Recording[],
): boolean {
const codecName = normalizeCodecName(recordings[0]?.codec_name);
if (!codecName) {
return false;
}
return capabilities.supports[codecName] === true;
}
function getDirectBaseUrl(apiHost: string): string {
return trimTrailingSlash(apiHost);
}
function getTranscodeBaseUrl(apiHost: string, config?: FrigateConfig): string | undefined {
if (!config?.transcode_proxy?.enabled) {
return undefined;
}
if (config.transcode_proxy.vod_proxy_url?.trim()) {
return trimTrailingSlash(config.transcode_proxy.vod_proxy_url);
}
return `${trimTrailingSlash(apiHost)}/vod-transcoded`;
}
function getTranscodeProfile(estimatedBandwidthBps?: number, saveData = false) {
if (saveData || (estimatedBandwidthBps && estimatedBandwidthBps <= 1_500_000)) {
return { bitrate: "512k", maxWidth: "640", maxHeight: "360" };
}
if (estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000) {
return { bitrate: "1200k", maxWidth: "960", maxHeight: "540" };
}
return { bitrate: "2500k", maxWidth: "1280", maxHeight: "720" };
}
function buildDirectUrl(apiHost: string, vodPath: string, variant: string): string {
const baseUrl = `${getDirectBaseUrl(apiHost)}${vodPath}`;
return appendQuery(baseUrl, {
variant: variant !== "main" ? variant : undefined,
});
}
function buildTranscodeUrl(
apiHost: string,
config: FrigateConfig | undefined,
vodPath: string,
variant: string,
capabilities: PlaybackCapabilities,
): string {
const transcodeBase = getTranscodeBaseUrl(apiHost, config);
if (!transcodeBase) {
return buildDirectUrl(apiHost, vodPath, variant);
}
const profile = getTranscodeProfile(
capabilities.estimatedBandwidthBps,
capabilities.saveData,
);
return appendQuery(`${transcodeBase}${vodPath}`, {
variant,
bitrate: profile.bitrate,
max_width: profile.maxWidth,
max_height: profile.maxHeight,
});
}
export function chooseRecordingPlayback({
apiHost,
config,
recordings,
preference,
vodPath,
capabilities,
}: DecisionOptions): RecordingPlaybackDecision {
const recordingsByVariant = groupRecordingsByVariant(recordings);
const mainRecordings = recordingsByVariant.main ?? [];
const subRecordings = recordingsByVariant.sub ?? [];
const transcodeAvailable = !!getTranscodeBaseUrl(apiHost, config);
const estimatedBandwidthBps =
capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000);
const candidates: Record<
"main" | "sub",
{ recordings: Recording[]; playable: boolean; bitrate?: number }
> = {
main: {
recordings: mainRecordings,
playable: canDirectPlayVariant(capabilities, mainRecordings),
bitrate: estimateRecordingBitrate(mainRecordings),
},
sub: {
recordings: subRecordings,
playable: canDirectPlayVariant(capabilities, subRecordings),
bitrate: estimateRecordingBitrate(subRecordings),
},
};
const preferDirect = (variant: "main" | "sub") => {
const candidate = candidates[variant];
return (
candidate.recordings.length > 0 &&
candidate.playable &&
(!candidate.bitrate || candidate.bitrate <= estimatedBandwidthBps * 0.85)
);
};
if (preference === "main" && candidates.main.recordings.length > 0) {
return {
mode: "direct",
variant: "main",
url: buildDirectUrl(apiHost, vodPath, "main"),
reason: "manual-main",
};
}
if (preference === "sub" && candidates.sub.recordings.length > 0) {
if (candidates.sub.playable) {
return {
mode: "direct",
variant: "sub",
url: buildDirectUrl(apiHost, vodPath, "sub"),
reason: "manual-sub",
};
}
return {
mode: "transcoded",
variant: "sub",
url: buildTranscodeUrl(apiHost, config, vodPath, "sub", capabilities),
reason: "manual-sub-transcoded",
};
}
if (preference === "transcoded") {
const targetVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
if (!transcodeAvailable) {
return {
mode: "direct",
variant: targetVariant,
url: buildDirectUrl(apiHost, vodPath, targetVariant),
reason: "manual-transcoded-unavailable",
};
}
return {
mode: "transcoded",
variant: targetVariant,
url: buildTranscodeUrl(apiHost, config, vodPath, targetVariant, capabilities),
reason: "manual-transcoded",
};
}
if (preferDirect("main")) {
return {
mode: "direct",
variant: "main",
url: buildDirectUrl(apiHost, vodPath, "main"),
reason: "raw-main",
};
}
if (preferDirect("sub")) {
return {
mode: "direct",
variant: "sub",
url: buildDirectUrl(apiHost, vodPath, "sub"),
reason: "raw-sub",
};
}
const transcodeVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
if (!transcodeAvailable) {
return {
mode: "direct",
variant: transcodeVariant,
url: buildDirectUrl(apiHost, vodPath, transcodeVariant),
reason: "direct-fallback",
};
}
return {
mode: "transcoded",
variant: transcodeVariant,
url: buildTranscodeUrl(apiHost, config, vodPath, transcodeVariant, capabilities),
reason: "transcode-fallback",
};
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff