mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-01 19:17:41 +03:00
Made-with:
This commit is contained in:
parent
687fefb343
commit
5560af611a
@ -1,356 +1,377 @@
|
|||||||
# syntax=docker/dockerfile:1.6
|
# syntax=docker/dockerfile:1.6
|
||||||
|
|
||||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||||
|
|
||||||
ARG BASE_IMAGE=debian:12
|
ARG BASE_IMAGE=debian:12
|
||||||
ARG SLIM_BASE=debian:12-slim
|
ARG SLIM_BASE=debian:12-slim
|
||||||
|
|
||||||
# A hook that allows us to inject commands right after the base images
|
# A hook that allows us to inject commands right after the base images
|
||||||
ARG BASE_HOOK=
|
ARG BASE_HOOK=
|
||||||
|
|
||||||
FROM ${BASE_IMAGE} AS base
|
FROM ${BASE_IMAGE} AS base
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
ARG BASE_HOOK
|
ARG BASE_HOOK
|
||||||
|
|
||||||
RUN sh -c "$BASE_HOOK"
|
RUN if [ -n "$BASE_HOOK" ]; then \
|
||||||
|
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
|
||||||
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
|
fi
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
|
||||||
|
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
|
||||||
FROM ${SLIM_BASE} AS slim-base
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
|
||||||
ARG BASE_HOOK
|
FROM ${SLIM_BASE} AS slim-base
|
||||||
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
RUN sh -c "$BASE_HOOK"
|
ARG BASE_HOOK
|
||||||
|
|
||||||
FROM slim-base AS wget
|
RUN if [ -n "$BASE_HOOK" ]; then \
|
||||||
ARG DEBIAN_FRONTEND
|
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
|
||||||
RUN apt-get update \
|
fi
|
||||||
&& apt-get install -y wget xz-utils \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
FROM slim-base AS wget
|
||||||
WORKDIR /rootfs
|
ARG DEBIAN_FRONTEND
|
||||||
|
RUN apt-get update \
|
||||||
FROM base AS nginx
|
&& apt-get install -y wget xz-utils \
|
||||||
ARG DEBIAN_FRONTEND
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
ENV CCACHE_DIR /root/.ccache
|
WORKDIR /rootfs
|
||||||
ENV CCACHE_MAXSIZE 2G
|
|
||||||
|
FROM base AS nginx
|
||||||
RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
|
ARG DEBIAN_FRONTEND
|
||||||
/deps/build_nginx.sh
|
ENV CCACHE_DIR /root/.ccache
|
||||||
|
ENV CCACHE_MAXSIZE 2G
|
||||||
FROM wget AS sqlite-vec
|
|
||||||
ARG DEBIAN_FRONTEND
|
RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
|
||||||
|
tr -d '\r' </deps/build_nginx.sh >/tmp/build_nginx.sh \
|
||||||
# Build sqlite_vec from source
|
&& bash /tmp/build_nginx.sh
|
||||||
COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
|
|
||||||
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
FROM wget AS sqlite-vec
|
||||||
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
|
ARG DEBIAN_FRONTEND
|
||||||
--mount=type=cache,target=/root/.ccache \
|
|
||||||
/deps/build_sqlite_vec.sh
|
# Build sqlite_vec from source
|
||||||
|
COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
|
||||||
FROM scratch AS go2rtc
|
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||||
ARG TARGETARCH
|
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
|
||||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
--mount=type=cache,target=/root/.ccache \
|
||||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
|
tr -d '\r' </deps/build_sqlite_vec.sh >/tmp/build_sqlite_vec.sh \
|
||||||
|
&& bash /tmp/build_sqlite_vec.sh
|
||||||
FROM wget AS tempio
|
|
||||||
ARG TARGETARCH
|
FROM scratch AS go2rtc
|
||||||
RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
|
ARG TARGETARCH
|
||||||
/deps/install_tempio.sh
|
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||||
|
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||||
####
|
|
||||||
#
|
FROM wget AS tempio
|
||||||
# OpenVino Support
|
ARG TARGETARCH
|
||||||
#
|
RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
|
||||||
# 1. Download and convert a model from Intel's Public Open Model Zoo
|
tr -d '\r' </deps/install_tempio.sh >/tmp/install_tempio.sh \
|
||||||
#
|
&& bash /tmp/install_tempio.sh
|
||||||
####
|
|
||||||
# Download and Convert OpenVino model
|
####
|
||||||
FROM base_host AS ov-converter
|
#
|
||||||
ARG DEBIAN_FRONTEND
|
# OpenVino Support
|
||||||
|
#
|
||||||
# Install OpenVino Runtime and Dev library
|
# 1. Download and convert a model from Intel's Public Open Model Zoo
|
||||||
COPY docker/main/requirements-ov.txt /requirements-ov.txt
|
#
|
||||||
RUN apt-get -qq update \
|
####
|
||||||
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
|
# Download and Convert OpenVino model
|
||||||
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
FROM base_host AS ov-converter
|
||||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
ARG DEBIAN_FRONTEND
|
||||||
&& python3 get-pip.py "pip" \
|
|
||||||
&& pip3 install -r /requirements-ov.txt
|
# Install OpenVino Runtime and Dev library
|
||||||
|
COPY docker/main/requirements-ov.txt /requirements-ov.txt
|
||||||
# Get OpenVino Model
|
RUN apt-get -qq update \
|
||||||
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
|
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
|
||||||
mkdir /models && cd /models \
|
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
&& wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
|
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||||
&& tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
|
&& python3 get-pip.py "pip" \
|
||||||
&& python3 /build_ov_model.py
|
&& pip3 install -r /requirements-ov.txt
|
||||||
|
|
||||||
####
|
# Get OpenVino Model
|
||||||
#
|
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
|
||||||
# Coral Compatibility
|
mkdir /models && cd /models \
|
||||||
#
|
&& wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
|
||||||
# Builds libusb without udev. Needed for synology and other devices with USB coral
|
&& tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \
|
||||||
####
|
&& python3 /build_ov_model.py
|
||||||
# libUSB - No Udev
|
|
||||||
FROM wget as libusb-build
|
####
|
||||||
ARG TARGETARCH
|
#
|
||||||
ARG DEBIAN_FRONTEND
|
# Coral Compatibility
|
||||||
ENV CCACHE_DIR /root/.ccache
|
#
|
||||||
ENV CCACHE_MAXSIZE 2G
|
# Builds libusb without udev. Needed for synology and other devices with USB coral
|
||||||
|
####
|
||||||
# Build libUSB without udev. Needed for Openvino NCS2 support
|
# libUSB - No Udev
|
||||||
WORKDIR /opt
|
FROM wget as libusb-build
|
||||||
RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config
|
ARG TARGETARCH
|
||||||
RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \
|
ARG DEBIAN_FRONTEND
|
||||||
unzip v1.0.26.zip && cd libusb-1.0.26 && \
|
ENV CCACHE_DIR /root/.ccache
|
||||||
./bootstrap.sh && \
|
ENV CCACHE_MAXSIZE 2G
|
||||||
./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
|
|
||||||
make -j $(nproc --all)
|
# Build libUSB without udev. Needed for Openvino NCS2 support
|
||||||
RUN apt-get update && \
|
WORKDIR /opt
|
||||||
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
|
RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config
|
||||||
rm -rf /var/lib/apt/lists/*
|
RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \
|
||||||
WORKDIR /opt/libusb-1.0.26/libusb
|
unzip v1.0.26.zip && cd libusb-1.0.26 && \
|
||||||
RUN /bin/mkdir -p '/usr/local/lib' && \
|
./bootstrap.sh && \
|
||||||
/bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
|
./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
|
||||||
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
|
make -j $(nproc --all)
|
||||||
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
|
RUN apt-get update && \
|
||||||
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
|
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
|
||||||
cd /opt/libusb-1.0.26/ && \
|
rm -rf /var/lib/apt/lists/*
|
||||||
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
WORKDIR /opt/libusb-1.0.26/libusb
|
||||||
ldconfig
|
RUN /bin/mkdir -p '/usr/local/lib' && \
|
||||||
|
/bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \
|
||||||
FROM wget AS models
|
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
|
||||||
|
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
|
||||||
# Get model and labels
|
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
|
||||||
RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
|
cd /opt/libusb-1.0.26/ && \
|
||||||
RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
|
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
||||||
COPY labelmap.txt .
|
ldconfig
|
||||||
# Copy OpenVino model
|
|
||||||
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/
|
FROM wget AS models
|
||||||
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/
|
|
||||||
RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
|
# Get model and labels
|
||||||
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
|
RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
|
||||||
# Get Audio Model and labels
|
RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite
|
||||||
RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite
|
COPY labelmap.txt .
|
||||||
COPY audio-labelmap.txt .
|
# Copy OpenVino model
|
||||||
|
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/
|
||||||
|
COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/
|
||||||
FROM wget AS s6-overlay
|
RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
|
||||||
ARG TARGETARCH
|
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
|
||||||
RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
|
# Get Audio Model and labels
|
||||||
/deps/install_s6_overlay.sh
|
RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite
|
||||||
|
COPY audio-labelmap.txt .
|
||||||
|
|
||||||
FROM base AS wheels
|
|
||||||
ARG DEBIAN_FRONTEND
|
FROM wget AS s6-overlay
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG DEBUG=false
|
RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
|
||||||
|
tr -d '\r' </deps/install_s6_overlay.sh >/tmp/install_s6_overlay.sh \
|
||||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
&& bash /tmp/install_s6_overlay.sh
|
||||||
RUN apt-get -qq update \
|
|
||||||
&& apt-get -qq install -y \
|
|
||||||
apt-transport-https wget unzip \
|
FROM base AS wheels
|
||||||
&& apt-get -qq update \
|
ARG DEBIAN_FRONTEND
|
||||||
&& apt-get -qq install -y \
|
ARG TARGETARCH
|
||||||
python3.11 \
|
ARG DEBUG=false
|
||||||
python3.11-dev \
|
|
||||||
# opencv dependencies
|
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||||
build-essential cmake git pkg-config libgtk-3-dev \
|
RUN apt-get -qq update \
|
||||||
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
&& apt-get -qq install -y \
|
||||||
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
|
apt-transport-https wget unzip \
|
||||||
gfortran openexr libatlas-base-dev libssl-dev\
|
&& apt-get -qq update \
|
||||||
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
|
&& apt-get -qq install -y \
|
||||||
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
python3.11 \
|
||||||
# sqlite3 dependencies
|
python3.11-dev \
|
||||||
tclsh \
|
# opencv dependencies
|
||||||
# scipy dependencies
|
build-essential cmake git pkg-config libgtk-3-dev \
|
||||||
gcc gfortran libopenblas-dev liblapack-dev && \
|
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
||||||
rm -rf /var/lib/apt/lists/*
|
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
|
||||||
|
gfortran openexr libatlas-base-dev libssl-dev\
|
||||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
|
||||||
|
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
||||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
# sqlite3 dependencies
|
||||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
tclsh \
|
||||||
&& python3 get-pip.py "pip"
|
# scipy dependencies
|
||||||
|
gcc gfortran libopenblas-dev liblapack-dev && \
|
||||||
COPY docker/main/requirements.txt /requirements.txt
|
rm -rf /var/lib/apt/lists/*
|
||||||
COPY docker/main/requirements-dev.txt /requirements-dev.txt
|
|
||||||
|
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||||
RUN pip3 install -r /requirements.txt
|
|
||||||
|
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
# Build pysqlite3 from source
|
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||||
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
&& python3 get-pip.py "pip"
|
||||||
RUN /build_pysqlite3.sh
|
|
||||||
|
COPY docker/main/requirements.txt /requirements.txt
|
||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-dev.txt /requirements-dev.txt
|
||||||
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
|
|
||||||
if [ "$DEBUG" = "true" ]; then \
|
RUN pip3 install -r /requirements.txt
|
||||||
pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
|
|
||||||
fi
|
# Build pysqlite3 from source
|
||||||
|
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
||||||
# Install HailoRT & Wheels
|
RUN tr -d '\r' </build_pysqlite3.sh >/tmp/build_pysqlite3.sh \
|
||||||
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
|
&& bash /tmp/build_pysqlite3.sh
|
||||||
/deps/install_hailort.sh
|
|
||||||
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
# Collect deps in a single layer
|
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
|
||||||
FROM scratch AS deps-rootfs
|
if [ "$DEBUG" = "true" ]; then \
|
||||||
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
|
pip3 wheel --wheel-dir=/wheels -r /requirements-dev.txt; \
|
||||||
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
|
fi
|
||||||
COPY --from=go2rtc /rootfs/ /
|
|
||||||
COPY --from=libusb-build /usr/local/lib /usr/local/lib
|
# Install HailoRT & Wheels
|
||||||
COPY --from=tempio /rootfs/ /
|
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||||
COPY --from=s6-overlay /rootfs/ /
|
tr -d '\r' </deps/install_hailort.sh >/tmp/install_hailort.sh \
|
||||||
COPY --from=models /rootfs/ /
|
&& bash /tmp/install_hailort.sh
|
||||||
COPY --from=wheels /rootfs/ /
|
|
||||||
COPY docker/main/rootfs/ /
|
# Collect deps in a single layer
|
||||||
|
FROM scratch AS deps-rootfs
|
||||||
|
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
|
||||||
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
|
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
|
||||||
FROM slim-base AS deps
|
COPY --from=go2rtc /rootfs/ /
|
||||||
ARG TARGETARCH
|
COPY --from=libusb-build /usr/local/lib /usr/local/lib
|
||||||
ARG BASE_IMAGE
|
COPY --from=tempio /rootfs/ /
|
||||||
|
COPY --from=s6-overlay /rootfs/ /
|
||||||
ARG DEBIAN_FRONTEND
|
COPY --from=models /rootfs/ /
|
||||||
# http://stackoverflow.com/questions/48162574/ddg#49462622
|
COPY --from=wheels /rootfs/ /
|
||||||
ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
|
COPY docker/main/rootfs/ /
|
||||||
|
|
||||||
# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
|
|
||||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
|
||||||
ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
|
FROM slim-base AS deps
|
||||||
|
ARG TARGETARCH
|
||||||
# Disable tokenizer parallelism warning
|
ARG BASE_IMAGE
|
||||||
# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
|
|
||||||
ENV TOKENIZERS_PARALLELISM=true
|
ARG DEBIAN_FRONTEND
|
||||||
# https://github.com/huggingface/transformers/issues/27214
|
# http://stackoverflow.com/questions/48162574/ddg#49462622
|
||||||
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
|
||||||
|
|
||||||
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
|
# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
|
||||||
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
|
||||||
# Set NumPy to ignore getlimits warning
|
|
||||||
ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
|
# Disable tokenizer parallelism warning
|
||||||
|
# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
|
||||||
# Set HailoRT to disable logging
|
ENV TOKENIZERS_PARALLELISM=true
|
||||||
ENV HAILORT_LOGGER_PATH=NONE
|
# https://github.com/huggingface/transformers/issues/27214
|
||||||
|
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||||
# TensorFlow C++ logging suppression (must be set before import)
|
|
||||||
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
|
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
|
||||||
ENV TF_CPP_MIN_LOG_LEVEL=3
|
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
||||||
# Suppress verbose logging from TensorFlow C++ code
|
|
||||||
ENV TF_CPP_MIN_VLOG_LEVEL=3
|
# Set NumPy to ignore getlimits warning
|
||||||
# Disable oneDNN optimization messages ("optimized with oneDNN...")
|
ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
|
||||||
ENV TF_ENABLE_ONEDNN_OPTS=0
|
|
||||||
# Suppress AutoGraph verbosity during conversion
|
# Set HailoRT to disable logging
|
||||||
ENV AUTOGRAPH_VERBOSITY=0
|
ENV HAILORT_LOGGER_PATH=NONE
|
||||||
# Google Logging (GLOG) suppression for TensorFlow components
|
|
||||||
ENV GLOG_minloglevel=3
|
# TensorFlow C++ logging suppression (must be set before import)
|
||||||
ENV GLOG_logtostderr=0
|
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
|
||||||
|
ENV TF_CPP_MIN_LOG_LEVEL=3
|
||||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
# Suppress verbose logging from TensorFlow C++ code
|
||||||
|
ENV TF_CPP_MIN_VLOG_LEVEL=3
|
||||||
# Install dependencies
|
# Disable oneDNN optimization messages ("optimized with oneDNN...")
|
||||||
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
ENV TF_ENABLE_ONEDNN_OPTS=0
|
||||||
/deps/install_deps.sh
|
# Suppress AutoGraph verbosity during conversion
|
||||||
|
ENV AUTOGRAPH_VERBOSITY=0
|
||||||
ENV DEFAULT_FFMPEG_VERSION="7.0"
|
# Google Logging (GLOG) suppression for TensorFlow components
|
||||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
|
ENV GLOG_minloglevel=3
|
||||||
|
ENV GLOG_logtostderr=0
|
||||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
|
||||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||||
&& python3 get-pip.py "pip"
|
|
||||||
|
# Install dependencies
|
||||||
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
||||||
pip3 install -U /deps/wheels/*.whl
|
tr -d '\r' </deps/install_deps.sh >/tmp/install_deps.sh \
|
||||||
|
&& bash /tmp/install_deps.sh
|
||||||
# Install Axera Engine
|
|
||||||
RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
|
ENV DEFAULT_FFMPEG_VERSION="7.0"
|
||||||
|
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
|
||||||
ENV PATH="${PATH}:/usr/bin/axcl"
|
|
||||||
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
|
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
|
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||||
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
|
&& python3 get-pip.py "pip"
|
||||||
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
|
|
||||||
bash -c "bash /deps/install_memryx.sh"
|
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||||
|
pip3 install -U /deps/wheels/*.whl
|
||||||
COPY --from=deps-rootfs / /
|
|
||||||
|
# Install Axera Engine
|
||||||
RUN ldconfig
|
RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
|
||||||
|
|
||||||
EXPOSE 5000
|
ENV PATH="${PATH}:/usr/bin/axcl"
|
||||||
EXPOSE 8554
|
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
|
||||||
EXPOSE 8555/tcp 8555/udp
|
|
||||||
|
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
|
||||||
# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
|
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
|
||||||
ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
|
tr -d '\r' </deps/install_memryx.sh >/tmp/install_memryx.sh \
|
||||||
# Do not fail on long-running download scripts
|
&& bash /tmp/install_memryx.sh
|
||||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
|
||||||
|
COPY --from=deps-rootfs / /
|
||||||
ENTRYPOINT ["/init"]
|
|
||||||
CMD []
|
RUN find /etc/s6-overlay/s6-rc.d -type f -exec sed -i 's/\r$//' {} +
|
||||||
|
|
||||||
HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
RUN find /etc/s6-overlay/s6-rc.d -type f \
|
||||||
CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
\( -name run -o -name up \) \
|
||||||
|
-exec chmod +x {} +
|
||||||
# Frigate deps with Node.js and NPM for devcontainer
|
|
||||||
FROM deps AS devcontainer
|
RUN ldconfig
|
||||||
|
|
||||||
# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
|
EXPOSE 5000
|
||||||
# But start a fake service for simulating the logs
|
EXPOSE 5010
|
||||||
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
|
EXPOSE 8554
|
||||||
|
EXPOSE 8555/tcp 8555/udp
|
||||||
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
|
|
||||||
RUN mkdir -p /opt/frigate \
|
# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
|
||||||
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
|
ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
|
||||||
|
# Do not fail on long-running download scripts
|
||||||
# Install Node 20
|
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||||
RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \
|
|
||||||
chmod 500 nsolid_setup_deb.sh && \
|
ENTRYPOINT ["/init"]
|
||||||
./nsolid_setup_deb.sh 20 && \
|
CMD []
|
||||||
apt-get install nodejs -y \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
||||||
&& npm install -g npm@10
|
CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||||
|
|
||||||
WORKDIR /workspace/frigate
|
# Frigate deps with Node.js and NPM for devcontainer
|
||||||
|
FROM deps AS devcontainer
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install make -y \
|
# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
# But start a fake service for simulating the logs
|
||||||
|
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
|
||||||
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
|
|
||||||
pip3 install -r requirements-dev.txt
|
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
|
||||||
|
RUN mkdir -p /opt/frigate \
|
||||||
HEALTHCHECK NONE
|
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
|
||||||
|
|
||||||
CMD ["sleep", "infinity"]
|
# Install Node 20
|
||||||
|
RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \
|
||||||
|
chmod 500 nsolid_setup_deb.sh && \
|
||||||
# Frigate web build
|
./nsolid_setup_deb.sh 20 && \
|
||||||
# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
|
apt-get install nodejs -y \
|
||||||
FROM --platform=$BUILDPLATFORM node:20 AS web-build
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& npm install -g npm@10
|
||||||
WORKDIR /work
|
|
||||||
COPY web/package.json web/package-lock.json ./
|
WORKDIR /workspace/frigate
|
||||||
RUN npm install
|
|
||||||
|
RUN apt-get update \
|
||||||
COPY web/ ./
|
&& apt-get install make -y \
|
||||||
RUN npm run build \
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
&& mv dist/BASE_PATH/monacoeditorwork/* dist/assets/ \
|
|
||||||
&& rm -rf dist/BASE_PATH
|
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
|
||||||
|
pip3 install -r requirements-dev.txt
|
||||||
# Collect final files in a single layer
|
|
||||||
FROM scratch AS rootfs
|
HEALTHCHECK NONE
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
CMD ["sleep", "infinity"]
|
||||||
COPY frigate frigate/
|
|
||||||
COPY migrations migrations/
|
|
||||||
COPY --from=web-build /work/dist/ web/
|
# Frigate web build
|
||||||
|
# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
|
||||||
# Frigate final container
|
FROM --platform=$BUILDPLATFORM node:20 AS web-build
|
||||||
FROM deps AS frigate
|
|
||||||
|
WORKDIR /work
|
||||||
WORKDIR /opt/frigate/
|
COPY web/package.json web/package-lock.json ./
|
||||||
COPY --from=rootfs / /
|
RUN npm install
|
||||||
|
|
||||||
|
COPY web/ ./
|
||||||
|
RUN npm run build \
|
||||||
|
&& mv dist/BASE_PATH/monacoeditorwork/* dist/assets/ \
|
||||||
|
&& rm -rf dist/BASE_PATH
|
||||||
|
|
||||||
|
# Collect final files in a single layer
|
||||||
|
FROM scratch AS rootfs
|
||||||
|
|
||||||
|
WORKDIR /opt/frigate/
|
||||||
|
COPY frigate frigate/
|
||||||
|
COPY migrations migrations/
|
||||||
|
COPY transcode_proxy transcode_proxy/
|
||||||
|
COPY --from=web-build /work/dist/ web/
|
||||||
|
|
||||||
|
# Frigate final container
|
||||||
|
FROM deps AS frigate
|
||||||
|
|
||||||
|
WORKDIR /opt/frigate/
|
||||||
|
COPY --from=rootfs / /
|
||||||
|
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
|
||||||
|
|||||||
@ -1,33 +1,56 @@
|
|||||||
#!/command/with-contenv bash
|
#!/command/with-contenv bash
|
||||||
# shellcheck shell=bash
|
# shellcheck shell=bash
|
||||||
# Start the Frigate service
|
# Start the Frigate service
|
||||||
|
|
||||||
set -o errexit -o nounset -o pipefail
|
set -o errexit -o nounset -o pipefail
|
||||||
|
|
||||||
# opt out of openvino telemetry
|
# opt out of openvino telemetry
|
||||||
if [ -e /usr/local/bin/opt_in_out ]; then
|
if [ -e /usr/local/bin/opt_in_out ]; then
|
||||||
/usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1
|
/usr/local/bin/opt_in_out --opt_out > /dev/null 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Logs should be sent to stdout so that s6 can collect them
|
# Logs should be sent to stdout so that s6 can collect them
|
||||||
|
|
||||||
# Tell S6-Overlay not to restart this service
|
# Tell S6-Overlay not to restart this service
|
||||||
s6-svc -O .
|
s6-svc -O .
|
||||||
|
|
||||||
function set_libva_version() {
|
function set_libva_version() {
|
||||||
local ffmpeg_path
|
local ffmpeg_path
|
||||||
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||||
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
|
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
|
||||||
export LIBAVFORMAT_VERSION_MAJOR
|
export LIBAVFORMAT_VERSION_MAJOR
|
||||||
}
|
}
|
||||||
|
|
||||||
echo "[INFO] Preparing Frigate..."
|
function start_transcode_proxy() {
|
||||||
set_libva_version
|
(
|
||||||
|
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
|
||||||
echo "[INFO] Starting Frigate..."
|
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
|
||||||
|
|
||||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
if [[ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]]; then
|
||||||
|
TRANSCODE_PROXY_FFMPEG=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||||
# Replace the bash process with the Frigate process, redirecting stderr to stdout
|
export TRANSCODE_PROXY_FFMPEG
|
||||||
exec 2>&1
|
fi
|
||||||
exec python3 -u -m frigate
|
|
||||||
|
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "[INFO] Starting transcode proxy..."
|
||||||
|
exec python3 -m uvicorn transcode_proxy.main:app \
|
||||||
|
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
|
||||||
|
--port "${TRANSCODE_PROXY_PORT:-5010}"
|
||||||
|
) &
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "[INFO] Preparing Frigate..."
|
||||||
|
set_libva_version
|
||||||
|
|
||||||
|
start_transcode_proxy
|
||||||
|
|
||||||
|
echo "[INFO] Starting Frigate..."
|
||||||
|
|
||||||
|
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||||
|
|
||||||
|
# Replace the bash process with the Frigate process, redirecting stderr to stdout
|
||||||
|
exec 2>&1
|
||||||
|
exec python3 -u -m frigate
|
||||||
|
|||||||
@ -1,11 +1,11 @@
|
|||||||
#!/command/with-contenv bash
|
#!/command/with-contenv bash
|
||||||
# shellcheck shell=bash
|
# shellcheck shell=bash
|
||||||
# Prepare the logs folder for s6-log
|
# Prepare the logs folder for s6-log
|
||||||
|
|
||||||
set -o errexit -o nounset -o pipefail
|
set -o errexit -o nounset -o pipefail
|
||||||
|
|
||||||
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync)
|
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync /dev/shm/logs/transcode-proxy)
|
||||||
|
|
||||||
mkdir -p "${dirs[@]}"
|
mkdir -p "${dirs[@]}"
|
||||||
chown nobody:nogroup "${dirs[@]}"
|
chown nobody:nogroup "${dirs[@]}"
|
||||||
chmod 02755 "${dirs[@]}"
|
chmod 02755 "${dirs[@]}"
|
||||||
|
|||||||
@ -0,0 +1 @@
|
|||||||
|
transcode-proxy
|
||||||
@ -0,0 +1 @@
|
|||||||
|
transcode-proxy-pipeline
|
||||||
@ -0,0 +1,4 @@
|
|||||||
|
#!/command/with-contenv bash
|
||||||
|
# shellcheck shell=bash
|
||||||
|
|
||||||
|
exec logutil-service /dev/shm/logs/transcode-proxy
|
||||||
@ -0,0 +1 @@
|
|||||||
|
longrun
|
||||||
@ -0,0 +1 @@
|
|||||||
|
|
||||||
@ -0,0 +1 @@
|
|||||||
|
transcode-proxy-log
|
||||||
@ -0,0 +1,32 @@
|
|||||||
|
#!/command/with-contenv bash
|
||||||
|
# shellcheck shell=bash
|
||||||
|
# Start the transcode proxy (in-process with Frigate container)
|
||||||
|
|
||||||
|
set -o errexit -o nounset -o pipefail
|
||||||
|
|
||||||
|
# Logs should be sent to stdout so that s6 can collect them
|
||||||
|
|
||||||
|
echo "[INFO] Starting transcode proxy..."
|
||||||
|
|
||||||
|
# Default upstream to nginx internal port when not set
|
||||||
|
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
|
||||||
|
|
||||||
|
# Use Frigate's FFmpeg when not set
|
||||||
|
if [ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]; then
|
||||||
|
export TRANSCODE_PROXY_FFMPEG="$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for nginx/API to be ready so proxy can reach upstream
|
||||||
|
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
|
||||||
|
echo "[INFO] Waiting for upstream ${TRANSCODE_PROXY_UPSTREAM}..."
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "[INFO] Upstream ready, starting transcode proxy on port ${TRANSCODE_PROXY_PORT:-5010}"
|
||||||
|
|
||||||
|
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
|
||||||
|
|
||||||
|
exec 2>&1
|
||||||
|
exec python3 -m uvicorn transcode_proxy.main:app \
|
||||||
|
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
|
||||||
|
--port "${TRANSCODE_PROXY_PORT:-5010}"
|
||||||
@ -0,0 +1 @@
|
|||||||
|
longrun
|
||||||
@ -1,365 +1,375 @@
|
|||||||
daemon off;
|
daemon off;
|
||||||
user root;
|
user root;
|
||||||
worker_processes auto;
|
worker_processes auto;
|
||||||
|
|
||||||
error_log /dev/stdout warn;
|
error_log /dev/stdout warn;
|
||||||
pid /var/run/nginx.pid;
|
pid /var/run/nginx.pid;
|
||||||
|
|
||||||
events {
|
events {
|
||||||
worker_connections 1024;
|
worker_connections 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
http {
|
http {
|
||||||
map_hash_bucket_size 256;
|
map_hash_bucket_size 256;
|
||||||
|
|
||||||
include mime.types;
|
include mime.types;
|
||||||
default_type application/octet-stream;
|
default_type application/octet-stream;
|
||||||
|
|
||||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
'$status $body_bytes_sent "$http_referer" '
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
'"$http_user_agent" "$http_x_forwarded_for" '
|
'"$http_user_agent" "$http_x_forwarded_for" '
|
||||||
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
|
'request_time="$request_time" upstream_response_time="$upstream_response_time"';
|
||||||
|
|
||||||
|
|
||||||
access_log /dev/stdout main;
|
access_log /dev/stdout main;
|
||||||
|
|
||||||
# send headers in one piece, it is better than sending them one by one
|
# send headers in one piece, it is better than sending them one by one
|
||||||
tcp_nopush on;
|
tcp_nopush on;
|
||||||
|
|
||||||
sendfile on;
|
sendfile on;
|
||||||
|
|
||||||
keepalive_timeout 65;
|
keepalive_timeout 65;
|
||||||
|
|
||||||
gzip on;
|
gzip on;
|
||||||
gzip_comp_level 6;
|
gzip_comp_level 6;
|
||||||
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
|
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
|
||||||
gzip_proxied no-cache no-store private expired auth;
|
gzip_proxied no-cache no-store private expired auth;
|
||||||
gzip_vary on;
|
gzip_vary on;
|
||||||
|
|
||||||
proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
|
proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
|
||||||
|
|
||||||
map $sent_http_content_type $should_not_cache {
|
map $sent_http_content_type $should_not_cache {
|
||||||
'application/json' 0;
|
'application/json' 0;
|
||||||
default 1;
|
default 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
upstream frigate_api {
|
upstream frigate_api {
|
||||||
server 127.0.0.1:5001;
|
server 127.0.0.1:5001;
|
||||||
keepalive 1024;
|
keepalive 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
upstream mqtt_ws {
|
upstream mqtt_ws {
|
||||||
server 127.0.0.1:5002;
|
server 127.0.0.1:5002;
|
||||||
keepalive 1024;
|
keepalive 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
upstream jsmpeg {
|
upstream jsmpeg {
|
||||||
server 127.0.0.1:8082;
|
server 127.0.0.1:8082;
|
||||||
keepalive 1024;
|
keepalive 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
include go2rtc_upstream.conf;
|
include go2rtc_upstream.conf;
|
||||||
|
|
||||||
server {
|
server {
|
||||||
include listen.conf;
|
include listen.conf;
|
||||||
|
|
||||||
# enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
|
# enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
|
||||||
http2 on;
|
http2 on;
|
||||||
|
|
||||||
# vod settings
|
# vod settings
|
||||||
vod_base_url '';
|
vod_base_url '';
|
||||||
vod_segments_base_url '';
|
vod_segments_base_url '';
|
||||||
vod_mode mapped;
|
vod_mode mapped;
|
||||||
vod_max_mapping_response_size 1m;
|
vod_max_mapping_response_size 1m;
|
||||||
vod_upstream_location /api;
|
vod_upstream_location /api;
|
||||||
vod_align_segments_to_key_frames on;
|
vod_align_segments_to_key_frames on;
|
||||||
vod_manifest_segment_durations_mode accurate;
|
vod_manifest_segment_durations_mode accurate;
|
||||||
vod_ignore_edit_list on;
|
vod_ignore_edit_list on;
|
||||||
vod_segment_duration 10000;
|
vod_segment_duration 10000;
|
||||||
|
|
||||||
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
|
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
|
||||||
vod_hls_mpegts_align_frames off;
|
vod_hls_mpegts_align_frames off;
|
||||||
vod_hls_mpegts_interleave_frames on;
|
vod_hls_mpegts_interleave_frames on;
|
||||||
|
|
||||||
# file handle caching / aio
|
# file handle caching / aio
|
||||||
open_file_cache max=1000 inactive=5m;
|
open_file_cache max=1000 inactive=5m;
|
||||||
open_file_cache_valid 2m;
|
open_file_cache_valid 2m;
|
||||||
open_file_cache_min_uses 1;
|
open_file_cache_min_uses 1;
|
||||||
open_file_cache_errors on;
|
open_file_cache_errors on;
|
||||||
aio on;
|
aio on;
|
||||||
|
|
||||||
# file upload size
|
# file upload size
|
||||||
client_max_body_size 20M;
|
client_max_body_size 20M;
|
||||||
|
|
||||||
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
|
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
|
||||||
vod_open_file_thread_pool default;
|
vod_open_file_thread_pool default;
|
||||||
|
|
||||||
# vod caches
|
# vod caches
|
||||||
vod_metadata_cache metadata_cache 512m;
|
vod_metadata_cache metadata_cache 512m;
|
||||||
vod_mapping_cache mapping_cache 5m 10m;
|
vod_mapping_cache mapping_cache 5m 10m;
|
||||||
|
|
||||||
# gzip manifests
|
# gzip manifests
|
||||||
gzip on;
|
gzip on;
|
||||||
gzip_types application/vnd.apple.mpegurl;
|
gzip_types application/vnd.apple.mpegurl;
|
||||||
|
|
||||||
include auth_location.conf;
|
include auth_location.conf;
|
||||||
include base_path.conf;
|
include base_path.conf;
|
||||||
|
|
||||||
location /vod/ {
|
location = /vod-transcoded {
|
||||||
include auth_request.conf;
|
return 302 /vod-transcoded/;
|
||||||
aio threads;
|
}
|
||||||
vod hls;
|
|
||||||
|
location /vod-transcoded/ {
|
||||||
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
|
include auth_request.conf;
|
||||||
# Smaller segments, faster generation, better browser compatibility
|
proxy_pass http://127.0.0.1:5010;
|
||||||
vod_hls_container_format fmp4;
|
include proxy.conf;
|
||||||
|
}
|
||||||
secure_token $args;
|
|
||||||
secure_token_types application/vnd.apple.mpegurl;
|
location /vod/ {
|
||||||
|
include auth_request.conf;
|
||||||
add_header Cache-Control "no-store";
|
aio threads;
|
||||||
expires off;
|
vod hls;
|
||||||
|
|
||||||
keepalive_disable safari;
|
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
|
||||||
|
# Smaller segments, faster generation, better browser compatibility
|
||||||
# vod module returns 502 for non-existent media
|
vod_hls_container_format fmp4;
|
||||||
# https://github.com/kaltura/nginx-vod-module/issues/468
|
|
||||||
error_page 502 =404 /vod-not-found;
|
secure_token $args;
|
||||||
}
|
secure_token_types application/vnd.apple.mpegurl;
|
||||||
|
|
||||||
location = /vod-not-found {
|
add_header Cache-Control "no-store";
|
||||||
return 404;
|
expires off;
|
||||||
}
|
|
||||||
|
keepalive_disable safari;
|
||||||
location /stream/ {
|
|
||||||
include auth_request.conf;
|
# vod module returns 502 for non-existent media
|
||||||
add_header Cache-Control "no-store";
|
# https://github.com/kaltura/nginx-vod-module/issues/468
|
||||||
expires off;
|
error_page 502 =404 /vod-not-found;
|
||||||
|
}
|
||||||
types {
|
|
||||||
application/dash+xml mpd;
|
location = /vod-not-found {
|
||||||
application/vnd.apple.mpegurl m3u8;
|
return 404;
|
||||||
video/mp2t ts;
|
}
|
||||||
image/jpeg jpg;
|
|
||||||
}
|
location /stream/ {
|
||||||
|
include auth_request.conf;
|
||||||
root /tmp;
|
add_header Cache-Control "no-store";
|
||||||
}
|
expires off;
|
||||||
|
|
||||||
location /clips/ {
|
types {
|
||||||
include auth_request.conf;
|
application/dash+xml mpd;
|
||||||
types {
|
application/vnd.apple.mpegurl m3u8;
|
||||||
video/mp4 mp4;
|
video/mp2t ts;
|
||||||
image/jpeg jpg;
|
image/jpeg jpg;
|
||||||
}
|
}
|
||||||
|
|
||||||
expires 7d;
|
root /tmp;
|
||||||
add_header Cache-Control "public";
|
}
|
||||||
autoindex on;
|
|
||||||
root /media/frigate;
|
location /clips/ {
|
||||||
}
|
include auth_request.conf;
|
||||||
|
types {
|
||||||
location /cache/ {
|
video/mp4 mp4;
|
||||||
internal; # This tells nginx it's not accessible from the outside
|
image/jpeg jpg;
|
||||||
alias /tmp/cache/;
|
}
|
||||||
}
|
|
||||||
|
expires 7d;
|
||||||
location /recordings/ {
|
add_header Cache-Control "public";
|
||||||
include auth_request.conf;
|
autoindex on;
|
||||||
types {
|
root /media/frigate;
|
||||||
video/mp4 mp4;
|
}
|
||||||
}
|
|
||||||
|
location /cache/ {
|
||||||
autoindex on;
|
internal; # This tells nginx it's not accessible from the outside
|
||||||
autoindex_format json;
|
alias /tmp/cache/;
|
||||||
root /media/frigate;
|
}
|
||||||
}
|
|
||||||
|
location /recordings/ {
|
||||||
location /exports/ {
|
include auth_request.conf;
|
||||||
include auth_request.conf;
|
types {
|
||||||
types {
|
video/mp4 mp4;
|
||||||
video/mp4 mp4;
|
}
|
||||||
}
|
|
||||||
|
autoindex on;
|
||||||
autoindex on;
|
autoindex_format json;
|
||||||
autoindex_format json;
|
root /media/frigate;
|
||||||
root /media/frigate;
|
}
|
||||||
}
|
|
||||||
|
location /exports/ {
|
||||||
location /ws {
|
include auth_request.conf;
|
||||||
include auth_request.conf;
|
types {
|
||||||
proxy_pass http://mqtt_ws/;
|
video/mp4 mp4;
|
||||||
include proxy.conf;
|
}
|
||||||
}
|
|
||||||
|
autoindex on;
|
||||||
location /live/jsmpeg/ {
|
autoindex_format json;
|
||||||
include auth_request.conf;
|
root /media/frigate;
|
||||||
proxy_pass http://jsmpeg/;
|
}
|
||||||
include proxy.conf;
|
|
||||||
}
|
location /ws {
|
||||||
|
include auth_request.conf;
|
||||||
# frigate lovelace card uses this path
|
proxy_pass http://mqtt_ws/;
|
||||||
location /live/mse/api/ws {
|
include proxy.conf;
|
||||||
include auth_request.conf;
|
}
|
||||||
limit_except GET {
|
|
||||||
deny all;
|
location /live/jsmpeg/ {
|
||||||
}
|
include auth_request.conf;
|
||||||
proxy_pass http://go2rtc/api/ws;
|
proxy_pass http://jsmpeg/;
|
||||||
include proxy.conf;
|
include proxy.conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /live/webrtc/api/ws {
|
# frigate lovelace card uses this path
|
||||||
include auth_request.conf;
|
location /live/mse/api/ws {
|
||||||
limit_except GET {
|
include auth_request.conf;
|
||||||
deny all;
|
limit_except GET {
|
||||||
}
|
deny all;
|
||||||
proxy_pass http://go2rtc/api/ws;
|
}
|
||||||
include proxy.conf;
|
proxy_pass http://go2rtc/api/ws;
|
||||||
}
|
include proxy.conf;
|
||||||
|
}
|
||||||
# pass through go2rtc player
|
|
||||||
location /live/webrtc/webrtc.html {
|
location /live/webrtc/api/ws {
|
||||||
include auth_request.conf;
|
include auth_request.conf;
|
||||||
limit_except GET {
|
limit_except GET {
|
||||||
deny all;
|
deny all;
|
||||||
}
|
}
|
||||||
proxy_pass http://go2rtc/webrtc.html;
|
proxy_pass http://go2rtc/api/ws;
|
||||||
include proxy.conf;
|
include proxy.conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
# frontend uses this to fetch the version
|
# pass through go2rtc player
|
||||||
location /api/go2rtc/api {
|
location /live/webrtc/webrtc.html {
|
||||||
include auth_request.conf;
|
include auth_request.conf;
|
||||||
limit_except GET {
|
limit_except GET {
|
||||||
deny all;
|
deny all;
|
||||||
}
|
}
|
||||||
proxy_pass http://go2rtc/api;
|
proxy_pass http://go2rtc/webrtc.html;
|
||||||
include proxy.conf;
|
include proxy.conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
# integration uses this to add webrtc candidate
|
# frontend uses this to fetch the version
|
||||||
location /api/go2rtc/webrtc {
|
location /api/go2rtc/api {
|
||||||
include auth_request.conf;
|
include auth_request.conf;
|
||||||
limit_except POST {
|
limit_except GET {
|
||||||
deny all;
|
deny all;
|
||||||
}
|
}
|
||||||
proxy_pass http://go2rtc/api/webrtc;
|
proxy_pass http://go2rtc/api;
|
||||||
include proxy.conf;
|
include proxy.conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
|
# integration uses this to add webrtc candidate
|
||||||
include auth_request.conf;
|
location /api/go2rtc/webrtc {
|
||||||
rewrite ^/api/(.*)$ /$1 break;
|
include auth_request.conf;
|
||||||
proxy_pass http://frigate_api;
|
limit_except POST {
|
||||||
include proxy.conf;
|
deny all;
|
||||||
}
|
}
|
||||||
|
proxy_pass http://go2rtc/api/webrtc;
|
||||||
location /api/ {
|
include proxy.conf;
|
||||||
include auth_request.conf;
|
}
|
||||||
add_header Cache-Control "no-store";
|
|
||||||
expires off;
|
location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
|
||||||
proxy_pass http://frigate_api/;
|
include auth_request.conf;
|
||||||
include proxy.conf;
|
rewrite ^/api/(.*)$ /$1 break;
|
||||||
|
proxy_pass http://frigate_api;
|
||||||
proxy_cache api_cache;
|
include proxy.conf;
|
||||||
proxy_cache_lock on;
|
}
|
||||||
proxy_cache_use_stale updating;
|
|
||||||
proxy_cache_valid 200 5s;
|
location /api/ {
|
||||||
proxy_cache_bypass $http_x_cache_bypass;
|
include auth_request.conf;
|
||||||
proxy_no_cache $should_not_cache;
|
add_header Cache-Control "no-store";
|
||||||
add_header X-Cache-Status $upstream_cache_status;
|
expires off;
|
||||||
|
proxy_pass http://frigate_api/;
|
||||||
location /api/vod/ {
|
include proxy.conf;
|
||||||
include auth_request.conf;
|
|
||||||
proxy_pass http://frigate_api/vod/;
|
proxy_cache api_cache;
|
||||||
include proxy.conf;
|
proxy_cache_lock on;
|
||||||
proxy_cache off;
|
proxy_cache_use_stale updating;
|
||||||
}
|
proxy_cache_valid 200 5s;
|
||||||
|
proxy_cache_bypass $http_x_cache_bypass;
|
||||||
location /api/login {
|
proxy_no_cache $should_not_cache;
|
||||||
auth_request off;
|
add_header X-Cache-Status $upstream_cache_status;
|
||||||
rewrite ^/api(/.*)$ $1 break;
|
|
||||||
proxy_pass http://frigate_api;
|
location /api/vod/ {
|
||||||
include proxy.conf;
|
include auth_request.conf;
|
||||||
}
|
proxy_pass http://frigate_api/vod/;
|
||||||
|
include proxy.conf;
|
||||||
# Allow unauthenticated access to the first_time_login endpoint
|
proxy_cache off;
|
||||||
# so the login page can load help text before authentication.
|
}
|
||||||
location /api/auth/first_time_login {
|
|
||||||
auth_request off;
|
location /api/login {
|
||||||
limit_except GET {
|
auth_request off;
|
||||||
deny all;
|
rewrite ^/api(/.*)$ $1 break;
|
||||||
}
|
proxy_pass http://frigate_api;
|
||||||
rewrite ^/api(/.*)$ $1 break;
|
include proxy.conf;
|
||||||
proxy_pass http://frigate_api;
|
}
|
||||||
include proxy.conf;
|
|
||||||
}
|
# Allow unauthenticated access to the first_time_login endpoint
|
||||||
|
# so the login page can load help text before authentication.
|
||||||
location /api/stats {
|
location /api/auth/first_time_login {
|
||||||
include auth_request.conf;
|
auth_request off;
|
||||||
access_log off;
|
limit_except GET {
|
||||||
rewrite ^/api(/.*)$ $1 break;
|
deny all;
|
||||||
proxy_pass http://frigate_api;
|
}
|
||||||
include proxy.conf;
|
rewrite ^/api(/.*)$ $1 break;
|
||||||
}
|
proxy_pass http://frigate_api;
|
||||||
|
include proxy.conf;
|
||||||
location /api/version {
|
}
|
||||||
include auth_request.conf;
|
|
||||||
access_log off;
|
location /api/stats {
|
||||||
rewrite ^/api(/.*)$ $1 break;
|
include auth_request.conf;
|
||||||
proxy_pass http://frigate_api;
|
access_log off;
|
||||||
include proxy.conf;
|
rewrite ^/api(/.*)$ $1 break;
|
||||||
}
|
proxy_pass http://frigate_api;
|
||||||
}
|
include proxy.conf;
|
||||||
|
}
|
||||||
location / {
|
|
||||||
# do not require auth for static assets
|
location /api/version {
|
||||||
add_header Cache-Control "no-store";
|
include auth_request.conf;
|
||||||
expires off;
|
access_log off;
|
||||||
|
rewrite ^/api(/.*)$ $1 break;
|
||||||
location /assets/ {
|
proxy_pass http://frigate_api;
|
||||||
access_log off;
|
include proxy.conf;
|
||||||
expires 1y;
|
}
|
||||||
add_header Cache-Control "public";
|
}
|
||||||
}
|
|
||||||
|
location / {
|
||||||
location /fonts/ {
|
# do not require auth for static assets
|
||||||
access_log off;
|
add_header Cache-Control "no-store";
|
||||||
expires 1y;
|
expires off;
|
||||||
add_header Cache-Control "public";
|
|
||||||
}
|
location /assets/ {
|
||||||
|
access_log off;
|
||||||
location /locales/ {
|
expires 1y;
|
||||||
access_log off;
|
add_header Cache-Control "public";
|
||||||
add_header Cache-Control "public";
|
}
|
||||||
}
|
|
||||||
|
location /fonts/ {
|
||||||
location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
|
access_log off;
|
||||||
access_log off;
|
expires 1y;
|
||||||
expires 1y;
|
add_header Cache-Control "public";
|
||||||
add_header Cache-Control "public";
|
}
|
||||||
default_type application/json;
|
|
||||||
proxy_set_header Accept-Encoding "";
|
location /locales/ {
|
||||||
sub_filter_once off;
|
access_log off;
|
||||||
sub_filter_types application/json;
|
add_header Cache-Control "public";
|
||||||
sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
|
}
|
||||||
sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
|
|
||||||
}
|
location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
|
||||||
|
access_log off;
|
||||||
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
|
expires 1y;
|
||||||
sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
|
add_header Cache-Control "public";
|
||||||
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
|
default_type application/json;
|
||||||
sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
|
proxy_set_header Accept-Encoding "";
|
||||||
sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
|
sub_filter_once off;
|
||||||
sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
|
sub_filter_types application/json;
|
||||||
sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
|
sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
|
||||||
sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
|
sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
|
||||||
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';
|
}
|
||||||
sub_filter_types text/css application/javascript;
|
|
||||||
sub_filter_once off;
|
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
|
||||||
|
sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
|
||||||
root /opt/frigate/web;
|
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
|
||||||
try_files $uri $uri.html $uri/ /index.html;
|
sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
|
||||||
}
|
sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
|
||||||
}
|
sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
|
||||||
}
|
sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
|
||||||
|
sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
|
||||||
|
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';
|
||||||
|
sub_filter_types text/css application/javascript;
|
||||||
|
sub_filter_once off;
|
||||||
|
|
||||||
|
root /opt/frigate/web;
|
||||||
|
try_files $uri $uri.html $uri/ /index.html;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -1,37 +1,38 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1.4
|
||||||
|
|
||||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||||
|
|
||||||
FROM wheels AS trt-wheels
|
FROM wheels AS trt-wheels
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
|
|
||||||
# Install TensorRT wheels
|
# Install TensorRT wheels
|
||||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
|
|
||||||
# remove dependencies from the requirements that have type constraints
|
# remove dependencies from the requirements that have type constraints
|
||||||
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
|
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
|
||||||
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
|
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
|
||||||
|
|
||||||
FROM deps AS frigate-tensorrt
|
FROM deps AS frigate-tensorrt
|
||||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
|
|
||||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
pip3 uninstall -y onnxruntime \
|
pip3 uninstall -y onnxruntime \
|
||||||
&& pip3 install -U /deps/trt-wheels/*.whl
|
&& pip3 install -U /deps/trt-wheels/*.whl
|
||||||
|
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
|
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
|
||||||
RUN ldconfig
|
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
|
||||||
|
RUN ldconfig
|
||||||
WORKDIR /opt/frigate/
|
|
||||||
|
WORKDIR /opt/frigate/
|
||||||
# Dev Container w/ TRT
|
|
||||||
FROM devcontainer AS devcontainer-trt
|
# Dev Container w/ TRT
|
||||||
|
FROM devcontainer AS devcontainer-trt
|
||||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
|
||||||
pip3 install -U /deps/trt-wheels/*.whl
|
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
|
pip3 install -U /deps/trt-wheels/*.whl
|
||||||
|
|||||||
@ -1,105 +1,105 @@
|
|||||||
variable "ARCH" {
|
variable "ARCH" {
|
||||||
default = "amd64"
|
default = "amd64"
|
||||||
}
|
}
|
||||||
variable "BASE_IMAGE" {
|
variable "BASE_IMAGE" {
|
||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
variable "SLIM_BASE" {
|
variable "SLIM_BASE" {
|
||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
variable "TRT_BASE" {
|
variable "TRT_BASE" {
|
||||||
default = null
|
default = null
|
||||||
}
|
}
|
||||||
variable "COMPUTE_LEVEL" {
|
variable "COMPUTE_LEVEL" {
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
variable "BASE_HOOK" {
|
variable "BASE_HOOK" {
|
||||||
# Ensure an up-to-date python 3.11 is available in jetson images
|
# Ensure an up-to-date python 3.11 is available in jetson images
|
||||||
default = <<EOT
|
default = <<EOT
|
||||||
if grep -iq \"ubuntu\" /etc/os-release; then
|
if grep -iq "ubuntu" /etc/os-release; then
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
|
|
||||||
# Add the deadsnakes PPA repository
|
# Add the deadsnakes PPA repository
|
||||||
echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
|
echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
|
||||||
echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
|
echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
|
||||||
|
|
||||||
# Add deadsnakes signing key
|
# Add deadsnakes signing key
|
||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
|
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
|
||||||
fi
|
fi
|
||||||
EOT
|
EOT
|
||||||
}
|
}
|
||||||
|
|
||||||
target "_build_args" {
|
target "_build_args" {
|
||||||
args = {
|
args = {
|
||||||
BASE_IMAGE = BASE_IMAGE,
|
BASE_IMAGE = BASE_IMAGE,
|
||||||
SLIM_BASE = SLIM_BASE,
|
SLIM_BASE = SLIM_BASE,
|
||||||
TRT_BASE = TRT_BASE,
|
TRT_BASE = TRT_BASE,
|
||||||
COMPUTE_LEVEL = COMPUTE_LEVEL,
|
COMPUTE_LEVEL = COMPUTE_LEVEL,
|
||||||
BASE_HOOK = BASE_HOOK
|
BASE_HOOK = BASE_HOOK
|
||||||
}
|
}
|
||||||
platforms = ["linux/${ARCH}"]
|
platforms = ["linux/${ARCH}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target wget {
|
target wget {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
target = "wget"
|
target = "wget"
|
||||||
inherits = ["_build_args"]
|
inherits = ["_build_args"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target deps {
|
target deps {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
target = "deps"
|
target = "deps"
|
||||||
inherits = ["_build_args"]
|
inherits = ["_build_args"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target rootfs {
|
target rootfs {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
target = "rootfs"
|
target = "rootfs"
|
||||||
inherits = ["_build_args"]
|
inherits = ["_build_args"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target wheels {
|
target wheels {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
target = "wheels"
|
target = "wheels"
|
||||||
inherits = ["_build_args"]
|
inherits = ["_build_args"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target devcontainer {
|
target devcontainer {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
platforms = ["linux/amd64"]
|
platforms = ["linux/amd64"]
|
||||||
target = "devcontainer"
|
target = "devcontainer"
|
||||||
}
|
}
|
||||||
|
|
||||||
target "trt-deps" {
|
target "trt-deps" {
|
||||||
dockerfile = "docker/tensorrt/Dockerfile.base"
|
dockerfile = "docker/tensorrt/Dockerfile.base"
|
||||||
context = "."
|
context = "."
|
||||||
contexts = {
|
contexts = {
|
||||||
deps = "target:deps",
|
deps = "target:deps",
|
||||||
}
|
}
|
||||||
inherits = ["_build_args"]
|
inherits = ["_build_args"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "tensorrt" {
|
target "tensorrt" {
|
||||||
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
|
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
|
||||||
context = "."
|
context = "."
|
||||||
contexts = {
|
contexts = {
|
||||||
wget = "target:wget",
|
wget = "target:wget",
|
||||||
wheels = "target:wheels",
|
wheels = "target:wheels",
|
||||||
deps = "target:deps",
|
deps = "target:deps",
|
||||||
rootfs = "target:rootfs"
|
rootfs = "target:rootfs"
|
||||||
}
|
}
|
||||||
target = "frigate-tensorrt"
|
target = "frigate-tensorrt"
|
||||||
inherits = ["_build_args"]
|
inherits = ["_build_args"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "devcontainer-trt" {
|
target "devcontainer-trt" {
|
||||||
dockerfile = "docker/tensorrt/Dockerfile.amd64"
|
dockerfile = "docker/tensorrt/Dockerfile.amd64"
|
||||||
context = "."
|
context = "."
|
||||||
contexts = {
|
contexts = {
|
||||||
wheels = "target:wheels",
|
wheels = "target:wheels",
|
||||||
trt-deps = "target:trt-deps",
|
trt-deps = "target:trt-deps",
|
||||||
devcontainer = "target:devcontainer"
|
devcontainer = "target:devcontainer"
|
||||||
}
|
}
|
||||||
platforms = ["linux/amd64"]
|
platforms = ["linux/amd64"]
|
||||||
target = "devcontainer-trt"
|
target = "devcontainer-trt"
|
||||||
}
|
}
|
||||||
|
|||||||
3427
frigate/api/media.py
3427
frigate/api/media.py
File diff suppressed because it is too large
Load Diff
@ -1,458 +1,468 @@
|
|||||||
"""Recording APIs."""
|
"""Recording APIs."""
|
||||||
|
|
||||||
import datetime as dt
|
import datetime as dt
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
|
|
||||||
from fastapi import APIRouter, Depends, Request
|
from fastapi import APIRouter, Depends, Request
|
||||||
from fastapi import Path as PathParam
|
from fastapi import Path as PathParam
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
from peewee import fn, operator
|
from peewee import fn, operator
|
||||||
|
|
||||||
from frigate.api.auth import (
|
from frigate.api.auth import (
|
||||||
allow_any_authenticated,
|
allow_any_authenticated,
|
||||||
get_allowed_cameras_for_filter,
|
get_allowed_cameras_for_filter,
|
||||||
require_camera_access,
|
require_camera_access,
|
||||||
require_role,
|
require_role,
|
||||||
)
|
)
|
||||||
from frigate.api.defs.query.recordings_query_parameters import (
|
from frigate.api.defs.query.recordings_query_parameters import (
|
||||||
MediaRecordingsAvailabilityQueryParams,
|
MediaRecordingsAvailabilityQueryParams,
|
||||||
MediaRecordingsSummaryQueryParams,
|
MediaRecordingsSummaryQueryParams,
|
||||||
RecordingsDeleteQueryParams,
|
RecordingsDeleteQueryParams,
|
||||||
)
|
)
|
||||||
from frigate.api.defs.response.generic_response import GenericResponse
|
from frigate.api.defs.response.generic_response import GenericResponse
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.const import RECORD_DIR
|
from frigate.const import RECORD_DIR
|
||||||
from frigate.models import Event, Recordings
|
from frigate.models import Event, Recordings
|
||||||
from frigate.util.time import get_dst_transitions
|
from frigate.util.time import get_dst_transitions
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
router = APIRouter(tags=[Tags.recordings])
|
router = APIRouter(tags=[Tags.recordings])
|
||||||
|
|
||||||
|
|
||||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||||
def get_recordings_storage_usage(request: Request):
|
def get_recordings_storage_usage(request: Request):
|
||||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||||
"storage"
|
"storage"
|
||||||
][RECORD_DIR]
|
][RECORD_DIR]
|
||||||
|
|
||||||
if not recording_stats:
|
if not recording_stats:
|
||||||
return JSONResponse({})
|
return JSONResponse({})
|
||||||
|
|
||||||
total_mb = recording_stats["total"]
|
total_mb = recording_stats["total"]
|
||||||
|
|
||||||
camera_usages: dict[str, dict] = (
|
camera_usages: dict[str, dict] = (
|
||||||
request.app.storage_maintainer.calculate_camera_usages()
|
request.app.storage_maintainer.calculate_camera_usages()
|
||||||
)
|
)
|
||||||
|
|
||||||
for camera_name in camera_usages.keys():
|
for camera_name in camera_usages.keys():
|
||||||
if camera_usages.get(camera_name, {}).get("usage"):
|
if camera_usages.get(camera_name, {}).get("usage"):
|
||||||
camera_usages[camera_name]["usage_percent"] = (
|
camera_usages[camera_name]["usage_percent"] = (
|
||||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||||
) * 100
|
) * 100
|
||||||
|
|
||||||
return JSONResponse(content=camera_usages)
|
return JSONResponse(content=camera_usages)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||||
def all_recordings_summary(
|
def all_recordings_summary(
|
||||||
request: Request,
|
request: Request,
|
||||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
):
|
):
|
||||||
"""Returns true/false by day indicating if recordings exist"""
|
"""Returns true/false by day indicating if recordings exist"""
|
||||||
|
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
if cameras != "all":
|
if cameras != "all":
|
||||||
requested = set(unquote(cameras).split(","))
|
requested = set(unquote(cameras).split(","))
|
||||||
filtered = requested.intersection(allowed_cameras)
|
filtered = requested.intersection(allowed_cameras)
|
||||||
if not filtered:
|
if not filtered:
|
||||||
return JSONResponse(content={})
|
return JSONResponse(content={})
|
||||||
camera_list = list(filtered)
|
camera_list = list(filtered)
|
||||||
else:
|
else:
|
||||||
camera_list = allowed_cameras
|
camera_list = allowed_cameras
|
||||||
|
|
||||||
time_range_query = (
|
time_range_query = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||||
)
|
)
|
||||||
.where(Recordings.camera << camera_list)
|
.where(Recordings.camera << camera_list)
|
||||||
.dicts()
|
.dicts()
|
||||||
.get()
|
.get()
|
||||||
)
|
)
|
||||||
|
|
||||||
min_time = time_range_query.get("min_time")
|
min_time = time_range_query.get("min_time")
|
||||||
max_time = time_range_query.get("max_time")
|
max_time = time_range_query.get("max_time")
|
||||||
|
|
||||||
if min_time is None or max_time is None:
|
if min_time is None or max_time is None:
|
||||||
return JSONResponse(content={})
|
return JSONResponse(content={})
|
||||||
|
|
||||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||||
|
|
||||||
days: dict[str, bool] = {}
|
days: dict[str, bool] = {}
|
||||||
|
|
||||||
for period_start, period_end, period_offset in dst_periods:
|
for period_start, period_end, period_offset in dst_periods:
|
||||||
day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
|
day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
|
||||||
|
|
||||||
period_query = (
|
period_query = (
|
||||||
Recordings.select(day_expr.alias("day_idx"))
|
Recordings.select(day_expr.alias("day_idx"))
|
||||||
.where(
|
.where(
|
||||||
(Recordings.camera << camera_list)
|
(Recordings.camera << camera_list)
|
||||||
& (Recordings.end_time >= period_start)
|
& (Recordings.end_time >= period_start)
|
||||||
& (Recordings.start_time <= period_end)
|
& (Recordings.start_time <= period_end)
|
||||||
)
|
)
|
||||||
.distinct()
|
.distinct()
|
||||||
.namedtuples()
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
for g in period_query:
|
for g in period_query:
|
||||||
day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
|
day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
|
||||||
days[day_str] = True
|
days[day_str] = True
|
||||||
|
|
||||||
return JSONResponse(content=dict(sorted(days.items())))
|
return JSONResponse(content=dict(sorted(days.items())))
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||||
)
|
)
|
||||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||||
"""Returns hourly summary for recordings of given camera"""
|
"""Returns hourly summary for recordings of given camera"""
|
||||||
|
|
||||||
time_range_query = (
|
time_range_query = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||||
)
|
)
|
||||||
.where(Recordings.camera == camera_name)
|
.where(Recordings.camera == camera_name)
|
||||||
.dicts()
|
.dicts()
|
||||||
.get()
|
.get()
|
||||||
)
|
)
|
||||||
|
|
||||||
min_time = time_range_query.get("min_time")
|
min_time = time_range_query.get("min_time")
|
||||||
max_time = time_range_query.get("max_time")
|
max_time = time_range_query.get("max_time")
|
||||||
|
|
||||||
days: dict[str, dict] = {}
|
days: dict[str, dict] = {}
|
||||||
|
|
||||||
if min_time is None or max_time is None:
|
if min_time is None or max_time is None:
|
||||||
return JSONResponse(content=list(days.values()))
|
return JSONResponse(content=list(days.values()))
|
||||||
|
|
||||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||||
|
|
||||||
for period_start, period_end, period_offset in dst_periods:
|
for period_start, period_end, period_offset in dst_periods:
|
||||||
hours_offset = int(period_offset / 60 / 60)
|
hours_offset = int(period_offset / 60 / 60)
|
||||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||||
period_hour_modifier = f"{hours_offset} hour"
|
period_hour_modifier = f"{hours_offset} hour"
|
||||||
period_minute_modifier = f"{minutes_offset} minute"
|
period_minute_modifier = f"{minutes_offset} minute"
|
||||||
|
|
||||||
recording_groups = (
|
recording_groups = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d %H",
|
"%Y-%m-%d %H",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Recordings.start_time,
|
Recordings.start_time,
|
||||||
"unixepoch",
|
"unixepoch",
|
||||||
period_hour_modifier,
|
period_hour_modifier,
|
||||||
period_minute_modifier,
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("hour"),
|
).alias("hour"),
|
||||||
fn.SUM(Recordings.duration).alias("duration"),
|
fn.SUM(Recordings.duration).alias("duration"),
|
||||||
fn.SUM(Recordings.motion).alias("motion"),
|
fn.SUM(Recordings.motion).alias("motion"),
|
||||||
fn.SUM(Recordings.objects).alias("objects"),
|
fn.SUM(Recordings.objects).alias("objects"),
|
||||||
)
|
)
|
||||||
.where(
|
.where(
|
||||||
(Recordings.camera == camera_name)
|
(Recordings.camera == camera_name)
|
||||||
& (Recordings.end_time >= period_start)
|
& (Recordings.end_time >= period_start)
|
||||||
& (Recordings.start_time <= period_end)
|
& (Recordings.start_time <= period_end)
|
||||||
)
|
)
|
||||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||||
.order_by(Recordings.start_time.desc())
|
.order_by(Recordings.start_time.desc())
|
||||||
.namedtuples()
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
event_groups = (
|
event_groups = (
|
||||||
Event.select(
|
Event.select(
|
||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d %H",
|
"%Y-%m-%d %H",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Event.start_time,
|
Event.start_time,
|
||||||
"unixepoch",
|
"unixepoch",
|
||||||
period_hour_modifier,
|
period_hour_modifier,
|
||||||
period_minute_modifier,
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("hour"),
|
).alias("hour"),
|
||||||
fn.COUNT(Event.id).alias("count"),
|
fn.COUNT(Event.id).alias("count"),
|
||||||
)
|
)
|
||||||
.where(Event.camera == camera_name, Event.has_clip)
|
.where(Event.camera == camera_name, Event.has_clip)
|
||||||
.where(
|
.where(
|
||||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||||
)
|
)
|
||||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||||
.namedtuples()
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
event_map = {g.hour: g.count for g in event_groups}
|
event_map = {g.hour: g.count for g in event_groups}
|
||||||
|
|
||||||
for recording_group in recording_groups:
|
for recording_group in recording_groups:
|
||||||
parts = recording_group.hour.split()
|
parts = recording_group.hour.split()
|
||||||
hour = parts[1]
|
hour = parts[1]
|
||||||
day = parts[0]
|
day = parts[0]
|
||||||
events_count = event_map.get(recording_group.hour, 0)
|
events_count = event_map.get(recording_group.hour, 0)
|
||||||
hour_data = {
|
hour_data = {
|
||||||
"hour": hour,
|
"hour": hour,
|
||||||
"events": events_count,
|
"events": events_count,
|
||||||
"motion": recording_group.motion,
|
"motion": recording_group.motion,
|
||||||
"objects": recording_group.objects,
|
"objects": recording_group.objects,
|
||||||
"duration": round(recording_group.duration),
|
"duration": round(recording_group.duration),
|
||||||
}
|
}
|
||||||
if day in days:
|
if day in days:
|
||||||
# merge counts if already present (edge-case at DST boundary)
|
# merge counts if already present (edge-case at DST boundary)
|
||||||
days[day]["events"] += events_count or 0
|
days[day]["events"] += events_count or 0
|
||||||
days[day]["hours"].append(hour_data)
|
days[day]["hours"].append(hour_data)
|
||||||
else:
|
else:
|
||||||
days[day] = {
|
days[day] = {
|
||||||
"events": events_count or 0,
|
"events": events_count or 0,
|
||||||
"hours": [hour_data],
|
"hours": [hour_data],
|
||||||
"day": day,
|
"day": day,
|
||||||
}
|
}
|
||||||
|
|
||||||
return JSONResponse(content=list(days.values()))
|
return JSONResponse(content=list(days.values()))
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||||
async def recordings(
|
async def recordings(
|
||||||
camera_name: str,
|
camera_name: str,
|
||||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||||
before: float = datetime.now().timestamp(),
|
before: float = datetime.now().timestamp(),
|
||||||
):
|
variant: str = "main",
|
||||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
):
|
||||||
recordings = (
|
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||||
Recordings.select(
|
query = (
|
||||||
Recordings.id,
|
Recordings.select(
|
||||||
Recordings.start_time,
|
Recordings.id,
|
||||||
Recordings.end_time,
|
Recordings.camera,
|
||||||
Recordings.segment_size,
|
Recordings.start_time,
|
||||||
Recordings.motion,
|
Recordings.end_time,
|
||||||
Recordings.objects,
|
Recordings.path,
|
||||||
Recordings.motion_heatmap,
|
Recordings.variant,
|
||||||
Recordings.duration,
|
Recordings.segment_size,
|
||||||
)
|
Recordings.motion,
|
||||||
.where(
|
Recordings.objects,
|
||||||
Recordings.camera == camera_name,
|
Recordings.motion_heatmap,
|
||||||
Recordings.end_time >= after,
|
Recordings.duration,
|
||||||
Recordings.start_time <= before,
|
Recordings.codec_name,
|
||||||
)
|
Recordings.width,
|
||||||
.order_by(Recordings.start_time)
|
Recordings.height,
|
||||||
.dicts()
|
Recordings.bitrate,
|
||||||
.iterator()
|
)
|
||||||
)
|
.where(
|
||||||
|
Recordings.camera == camera_name,
|
||||||
return JSONResponse(content=list(recordings))
|
Recordings.end_time >= after,
|
||||||
|
Recordings.start_time <= before,
|
||||||
|
)
|
||||||
@router.get(
|
)
|
||||||
"/recordings/unavailable",
|
|
||||||
response_model=list[dict],
|
if variant != "all":
|
||||||
dependencies=[Depends(allow_any_authenticated())],
|
query = query.where(Recordings.variant == variant)
|
||||||
)
|
|
||||||
async def no_recordings(
|
recordings = query.order_by(Recordings.start_time).dicts().iterator()
|
||||||
request: Request,
|
|
||||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
return JSONResponse(content=list(recordings))
|
||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
|
||||||
):
|
|
||||||
"""Get time ranges with no recordings."""
|
@router.get(
|
||||||
cameras = params.cameras
|
"/recordings/unavailable",
|
||||||
if cameras != "all":
|
response_model=list[dict],
|
||||||
requested = set(unquote(cameras).split(","))
|
dependencies=[Depends(allow_any_authenticated())],
|
||||||
filtered = requested.intersection(allowed_cameras)
|
)
|
||||||
if not filtered:
|
async def no_recordings(
|
||||||
return JSONResponse(content=[])
|
request: Request,
|
||||||
cameras = ",".join(filtered)
|
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||||
else:
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
cameras = allowed_cameras
|
):
|
||||||
|
"""Get time ranges with no recordings."""
|
||||||
before = params.before or datetime.datetime.now().timestamp()
|
cameras = params.cameras
|
||||||
after = (
|
if cameras != "all":
|
||||||
params.after
|
requested = set(unquote(cameras).split(","))
|
||||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
filtered = requested.intersection(allowed_cameras)
|
||||||
)
|
if not filtered:
|
||||||
scale = params.scale
|
return JSONResponse(content=[])
|
||||||
|
cameras = ",".join(filtered)
|
||||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
else:
|
||||||
if cameras != "all":
|
cameras = allowed_cameras
|
||||||
camera_list = cameras.split(",")
|
|
||||||
clauses.append((Recordings.camera << camera_list))
|
before = params.before or datetime.datetime.now().timestamp()
|
||||||
else:
|
after = (
|
||||||
camera_list = allowed_cameras
|
params.after
|
||||||
|
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||||
# Get recording start times
|
)
|
||||||
data: list[Recordings] = (
|
scale = params.scale
|
||||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
|
||||||
.where(reduce(operator.and_, clauses))
|
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||||
.order_by(Recordings.start_time.asc())
|
if cameras != "all":
|
||||||
.dicts()
|
camera_list = cameras.split(",")
|
||||||
.iterator()
|
clauses.append((Recordings.camera << camera_list))
|
||||||
)
|
else:
|
||||||
|
camera_list = allowed_cameras
|
||||||
# Convert recordings to list of (start, end) tuples
|
|
||||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
# Get recording start times
|
||||||
|
data: list[Recordings] = (
|
||||||
# Iterate through time segments and check if each has any recording
|
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||||
no_recording_segments = []
|
.where(reduce(operator.and_, clauses))
|
||||||
current = after
|
.order_by(Recordings.start_time.asc())
|
||||||
current_gap_start = None
|
.dicts()
|
||||||
|
.iterator()
|
||||||
while current < before:
|
)
|
||||||
segment_end = min(current + scale, before)
|
|
||||||
|
# Convert recordings to list of (start, end) tuples
|
||||||
# Check if this segment overlaps with any recording
|
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||||
has_recording = any(
|
|
||||||
rec_start < segment_end and rec_end > current
|
# Iterate through time segments and check if each has any recording
|
||||||
for rec_start, rec_end in recordings
|
no_recording_segments = []
|
||||||
)
|
current = after
|
||||||
|
current_gap_start = None
|
||||||
if not has_recording:
|
|
||||||
# This segment has no recordings
|
while current < before:
|
||||||
if current_gap_start is None:
|
segment_end = min(current + scale, before)
|
||||||
current_gap_start = current # Start a new gap
|
|
||||||
else:
|
# Check if this segment overlaps with any recording
|
||||||
# This segment has recordings
|
has_recording = any(
|
||||||
if current_gap_start is not None:
|
rec_start < segment_end and rec_end > current
|
||||||
# End the current gap and append it
|
for rec_start, rec_end in recordings
|
||||||
no_recording_segments.append(
|
)
|
||||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
|
||||||
)
|
if not has_recording:
|
||||||
current_gap_start = None
|
# This segment has no recordings
|
||||||
|
if current_gap_start is None:
|
||||||
current = segment_end
|
current_gap_start = current # Start a new gap
|
||||||
|
else:
|
||||||
# Append the last gap if it exists
|
# This segment has recordings
|
||||||
if current_gap_start is not None:
|
if current_gap_start is not None:
|
||||||
no_recording_segments.append(
|
# End the current gap and append it
|
||||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
no_recording_segments.append(
|
||||||
)
|
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||||
|
)
|
||||||
return JSONResponse(content=no_recording_segments)
|
current_gap_start = None
|
||||||
|
|
||||||
|
current = segment_end
|
||||||
@router.delete(
|
|
||||||
"/recordings/start/{start}/end/{end}",
|
# Append the last gap if it exists
|
||||||
response_model=GenericResponse,
|
if current_gap_start is not None:
|
||||||
dependencies=[Depends(require_role(["admin"]))],
|
no_recording_segments.append(
|
||||||
summary="Delete recordings",
|
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||||
description="""Deletes recordings within the specified time range.
|
)
|
||||||
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
|
|
||||||
""",
|
return JSONResponse(content=no_recording_segments)
|
||||||
)
|
|
||||||
async def delete_recordings(
|
|
||||||
start: float = PathParam(..., description="Start timestamp (unix)"),
|
@router.delete(
|
||||||
end: float = PathParam(..., description="End timestamp (unix)"),
|
"/recordings/start/{start}/end/{end}",
|
||||||
params: RecordingsDeleteQueryParams = Depends(),
|
response_model=GenericResponse,
|
||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
dependencies=[Depends(require_role(["admin"]))],
|
||||||
):
|
summary="Delete recordings",
|
||||||
"""Delete recordings in the specified time range."""
|
description="""Deletes recordings within the specified time range.
|
||||||
if start >= end:
|
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
|
||||||
return JSONResponse(
|
""",
|
||||||
content={
|
)
|
||||||
"success": False,
|
async def delete_recordings(
|
||||||
"message": "Start time must be less than end time.",
|
start: float = PathParam(..., description="Start timestamp (unix)"),
|
||||||
},
|
end: float = PathParam(..., description="End timestamp (unix)"),
|
||||||
status_code=400,
|
params: RecordingsDeleteQueryParams = Depends(),
|
||||||
)
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
|
):
|
||||||
cameras = params.cameras
|
"""Delete recordings in the specified time range."""
|
||||||
|
if start >= end:
|
||||||
if cameras != "all":
|
return JSONResponse(
|
||||||
requested = set(cameras.split(","))
|
content={
|
||||||
filtered = requested.intersection(allowed_cameras)
|
"success": False,
|
||||||
|
"message": "Start time must be less than end time.",
|
||||||
if not filtered:
|
},
|
||||||
return JSONResponse(
|
status_code=400,
|
||||||
content={
|
)
|
||||||
"success": False,
|
|
||||||
"message": "No valid cameras found in the request.",
|
cameras = params.cameras
|
||||||
},
|
|
||||||
status_code=400,
|
if cameras != "all":
|
||||||
)
|
requested = set(cameras.split(","))
|
||||||
|
filtered = requested.intersection(allowed_cameras)
|
||||||
camera_list = list(filtered)
|
|
||||||
else:
|
if not filtered:
|
||||||
camera_list = allowed_cameras
|
return JSONResponse(
|
||||||
|
content={
|
||||||
# Parse keep parameter
|
"success": False,
|
||||||
keep_set = set()
|
"message": "No valid cameras found in the request.",
|
||||||
|
},
|
||||||
if params.keep:
|
status_code=400,
|
||||||
keep_set = set(params.keep.split(","))
|
)
|
||||||
|
|
||||||
# Build query to find overlapping recordings
|
camera_list = list(filtered)
|
||||||
clauses = [
|
else:
|
||||||
(
|
camera_list = allowed_cameras
|
||||||
Recordings.start_time.between(start, end)
|
|
||||||
| Recordings.end_time.between(start, end)
|
# Parse keep parameter
|
||||||
| ((start > Recordings.start_time) & (end < Recordings.end_time))
|
keep_set = set()
|
||||||
),
|
|
||||||
(Recordings.camera << camera_list),
|
if params.keep:
|
||||||
]
|
keep_set = set(params.keep.split(","))
|
||||||
|
|
||||||
keep_clauses = []
|
# Build query to find overlapping recordings
|
||||||
|
clauses = [
|
||||||
if "motion" in keep_set:
|
(
|
||||||
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
|
Recordings.start_time.between(start, end)
|
||||||
|
| Recordings.end_time.between(start, end)
|
||||||
if "object" in keep_set:
|
| ((start > Recordings.start_time) & (end < Recordings.end_time))
|
||||||
keep_clauses.append(
|
),
|
||||||
Recordings.objects.is_null(False) & (Recordings.objects > 0)
|
(Recordings.camera << camera_list),
|
||||||
)
|
]
|
||||||
|
|
||||||
if "audio" in keep_set:
|
keep_clauses = []
|
||||||
keep_clauses.append(Recordings.dBFS.is_null(False))
|
|
||||||
|
if "motion" in keep_set:
|
||||||
if keep_clauses:
|
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
|
||||||
keep_condition = reduce(operator.or_, keep_clauses)
|
|
||||||
clauses.append(~keep_condition)
|
if "object" in keep_set:
|
||||||
|
keep_clauses.append(
|
||||||
recordings_to_delete = (
|
Recordings.objects.is_null(False) & (Recordings.objects > 0)
|
||||||
Recordings.select(Recordings.id, Recordings.path)
|
)
|
||||||
.where(reduce(operator.and_, clauses))
|
|
||||||
.dicts()
|
if "audio" in keep_set:
|
||||||
.iterator()
|
keep_clauses.append(Recordings.dBFS.is_null(False))
|
||||||
)
|
|
||||||
|
if keep_clauses:
|
||||||
recording_ids = []
|
keep_condition = reduce(operator.or_, keep_clauses)
|
||||||
deleted_count = 0
|
clauses.append(~keep_condition)
|
||||||
error_count = 0
|
|
||||||
|
recordings_to_delete = (
|
||||||
for recording in recordings_to_delete:
|
Recordings.select(Recordings.id, Recordings.path)
|
||||||
recording_ids.append(recording["id"])
|
.where(reduce(operator.and_, clauses))
|
||||||
|
.dicts()
|
||||||
try:
|
.iterator()
|
||||||
Path(recording["path"]).unlink(missing_ok=True)
|
)
|
||||||
deleted_count += 1
|
|
||||||
except Exception as e:
|
recording_ids = []
|
||||||
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
|
deleted_count = 0
|
||||||
error_count += 1
|
error_count = 0
|
||||||
|
|
||||||
if recording_ids:
|
for recording in recordings_to_delete:
|
||||||
max_deletes = 100000
|
recording_ids.append(recording["id"])
|
||||||
recording_ids_list = list(recording_ids)
|
|
||||||
|
try:
|
||||||
for i in range(0, len(recording_ids_list), max_deletes):
|
Path(recording["path"]).unlink(missing_ok=True)
|
||||||
Recordings.delete().where(
|
deleted_count += 1
|
||||||
Recordings.id << recording_ids_list[i : i + max_deletes]
|
except Exception as e:
|
||||||
).execute()
|
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
|
||||||
|
error_count += 1
|
||||||
message = f"Successfully deleted {deleted_count} recording(s)."
|
|
||||||
|
if recording_ids:
|
||||||
if error_count > 0:
|
max_deletes = 100000
|
||||||
message += f" {error_count} file deletion error(s) occurred."
|
recording_ids_list = list(recording_ids)
|
||||||
|
|
||||||
return JSONResponse(
|
for i in range(0, len(recording_ids_list), max_deletes):
|
||||||
content={"success": True, "message": message},
|
Recordings.delete().where(
|
||||||
status_code=200,
|
Recordings.id << recording_ids_list[i : i + max_deletes]
|
||||||
)
|
).execute()
|
||||||
|
|
||||||
|
message = f"Successfully deleted {deleted_count} recording(s)."
|
||||||
|
|
||||||
|
if error_count > 0:
|
||||||
|
message += f" {error_count} file deletion error(s) occurred."
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content={"success": True, "message": message},
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|||||||
@ -1,337 +1,346 @@
|
|||||||
import os
|
import os
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import Field, PrivateAttr, model_validator
|
from pydantic import Field, PrivateAttr, model_validator
|
||||||
|
|
||||||
from frigate.const import CACHE_DIR, CACHE_SEGMENT_FORMAT, REGEX_CAMERA_NAME
|
from frigate.const import CACHE_DIR, CACHE_SEGMENT_FORMAT, REGEX_CAMERA_NAME
|
||||||
from frigate.ffmpeg_presets import (
|
from frigate.ffmpeg_presets import (
|
||||||
parse_preset_hardware_acceleration_decode,
|
parse_preset_hardware_acceleration_decode,
|
||||||
parse_preset_hardware_acceleration_scale,
|
parse_preset_hardware_acceleration_scale,
|
||||||
parse_preset_input,
|
parse_preset_input,
|
||||||
parse_preset_output_record,
|
parse_preset_output_record,
|
||||||
)
|
)
|
||||||
from frigate.util.builtin import (
|
from frigate.util.builtin import (
|
||||||
escape_special_characters,
|
escape_special_characters,
|
||||||
generate_color_palette,
|
generate_color_palette,
|
||||||
get_ffmpeg_arg_list,
|
get_ffmpeg_arg_list,
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..base import FrigateBaseModel
|
from ..base import FrigateBaseModel
|
||||||
from ..classification import (
|
from ..classification import (
|
||||||
CameraAudioTranscriptionConfig,
|
CameraAudioTranscriptionConfig,
|
||||||
CameraFaceRecognitionConfig,
|
CameraFaceRecognitionConfig,
|
||||||
CameraLicensePlateRecognitionConfig,
|
CameraLicensePlateRecognitionConfig,
|
||||||
CameraSemanticSearchConfig,
|
CameraSemanticSearchConfig,
|
||||||
)
|
)
|
||||||
from .audio import AudioConfig
|
from .audio import AudioConfig
|
||||||
from .birdseye import BirdseyeCameraConfig
|
from .birdseye import BirdseyeCameraConfig
|
||||||
from .detect import DetectConfig
|
from .detect import DetectConfig
|
||||||
from .ffmpeg import CameraFfmpegConfig, CameraInput
|
from .ffmpeg import CameraFfmpegConfig, CameraInput
|
||||||
from .live import CameraLiveConfig
|
from .live import CameraLiveConfig
|
||||||
from .motion import MotionConfig
|
from .motion import MotionConfig
|
||||||
from .mqtt import CameraMqttConfig
|
from .mqtt import CameraMqttConfig
|
||||||
from .notification import NotificationConfig
|
from .notification import NotificationConfig
|
||||||
from .objects import ObjectConfig
|
from .objects import ObjectConfig
|
||||||
from .onvif import OnvifConfig
|
from .onvif import OnvifConfig
|
||||||
from .record import RecordConfig
|
from .record import RecordConfig
|
||||||
from .review import ReviewConfig
|
from .review import ReviewConfig
|
||||||
from .snapshots import SnapshotsConfig
|
from .snapshots import SnapshotsConfig
|
||||||
from .timestamp import TimestampStyleConfig
|
from .timestamp import TimestampStyleConfig
|
||||||
from .ui import CameraUiConfig
|
from .ui import CameraUiConfig
|
||||||
from .zone import ZoneConfig
|
from .zone import ZoneConfig
|
||||||
|
|
||||||
__all__ = ["CameraConfig"]
|
__all__ = ["CameraConfig"]
|
||||||
|
|
||||||
|
|
||||||
class CameraTypeEnum(str, Enum):
|
class CameraTypeEnum(str, Enum):
|
||||||
generic = "generic"
|
generic = "generic"
|
||||||
lpr = "lpr"
|
lpr = "lpr"
|
||||||
|
|
||||||
|
|
||||||
class CameraConfig(FrigateBaseModel):
|
class CameraConfig(FrigateBaseModel):
|
||||||
name: Optional[str] = Field(
|
name: Optional[str] = Field(
|
||||||
None,
|
None,
|
||||||
title="Camera name",
|
title="Camera name",
|
||||||
description="Camera name is required",
|
description="Camera name is required",
|
||||||
pattern=REGEX_CAMERA_NAME,
|
pattern=REGEX_CAMERA_NAME,
|
||||||
)
|
)
|
||||||
|
|
||||||
friendly_name: Optional[str] = Field(
|
friendly_name: Optional[str] = Field(
|
||||||
None,
|
None,
|
||||||
title="Friendly name",
|
title="Friendly name",
|
||||||
description="Camera friendly name used in the Frigate UI",
|
description="Camera friendly name used in the Frigate UI",
|
||||||
)
|
)
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def handle_friendly_name(cls, values):
|
def handle_friendly_name(cls, values):
|
||||||
if isinstance(values, dict) and "friendly_name" in values:
|
if isinstance(values, dict) and "friendly_name" in values:
|
||||||
pass
|
pass
|
||||||
return values
|
return values
|
||||||
|
|
||||||
enabled: bool = Field(default=True, title="Enabled", description="Enabled")
|
enabled: bool = Field(default=True, title="Enabled", description="Enabled")
|
||||||
|
|
||||||
# Options with global fallback
|
# Options with global fallback
|
||||||
audio: AudioConfig = Field(
|
audio: AudioConfig = Field(
|
||||||
default_factory=AudioConfig,
|
default_factory=AudioConfig,
|
||||||
title="Audio events",
|
title="Audio events",
|
||||||
description="Settings for audio-based event detection for this camera.",
|
description="Settings for audio-based event detection for this camera.",
|
||||||
)
|
)
|
||||||
audio_transcription: CameraAudioTranscriptionConfig = Field(
|
audio_transcription: CameraAudioTranscriptionConfig = Field(
|
||||||
default_factory=CameraAudioTranscriptionConfig,
|
default_factory=CameraAudioTranscriptionConfig,
|
||||||
title="Audio transcription",
|
title="Audio transcription",
|
||||||
description="Settings for live and speech audio transcription used for events and live captions.",
|
description="Settings for live and speech audio transcription used for events and live captions.",
|
||||||
)
|
)
|
||||||
birdseye: BirdseyeCameraConfig = Field(
|
birdseye: BirdseyeCameraConfig = Field(
|
||||||
default_factory=BirdseyeCameraConfig,
|
default_factory=BirdseyeCameraConfig,
|
||||||
title="Birdseye",
|
title="Birdseye",
|
||||||
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
|
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
|
||||||
)
|
)
|
||||||
detect: DetectConfig = Field(
|
detect: DetectConfig = Field(
|
||||||
default_factory=DetectConfig,
|
default_factory=DetectConfig,
|
||||||
title="Object Detection",
|
title="Object Detection",
|
||||||
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
|
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
|
||||||
)
|
)
|
||||||
face_recognition: CameraFaceRecognitionConfig = Field(
|
face_recognition: CameraFaceRecognitionConfig = Field(
|
||||||
default_factory=CameraFaceRecognitionConfig,
|
default_factory=CameraFaceRecognitionConfig,
|
||||||
title="Face recognition",
|
title="Face recognition",
|
||||||
description="Settings for face detection and recognition for this camera.",
|
description="Settings for face detection and recognition for this camera.",
|
||||||
)
|
)
|
||||||
ffmpeg: CameraFfmpegConfig = Field(
|
ffmpeg: CameraFfmpegConfig = Field(
|
||||||
title="FFmpeg",
|
title="FFmpeg",
|
||||||
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
|
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
|
||||||
)
|
)
|
||||||
live: CameraLiveConfig = Field(
|
live: CameraLiveConfig = Field(
|
||||||
default_factory=CameraLiveConfig,
|
default_factory=CameraLiveConfig,
|
||||||
title="Live playback",
|
title="Live playback",
|
||||||
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
|
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
|
||||||
)
|
)
|
||||||
lpr: CameraLicensePlateRecognitionConfig = Field(
|
lpr: CameraLicensePlateRecognitionConfig = Field(
|
||||||
default_factory=CameraLicensePlateRecognitionConfig,
|
default_factory=CameraLicensePlateRecognitionConfig,
|
||||||
title="License Plate Recognition",
|
title="License Plate Recognition",
|
||||||
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
|
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
|
||||||
)
|
)
|
||||||
motion: MotionConfig = Field(
|
motion: MotionConfig = Field(
|
||||||
None,
|
None,
|
||||||
title="Motion detection",
|
title="Motion detection",
|
||||||
description="Default motion detection settings for this camera.",
|
description="Default motion detection settings for this camera.",
|
||||||
)
|
)
|
||||||
objects: ObjectConfig = Field(
|
objects: ObjectConfig = Field(
|
||||||
default_factory=ObjectConfig,
|
default_factory=ObjectConfig,
|
||||||
title="Objects",
|
title="Objects",
|
||||||
description="Object tracking defaults including which labels to track and per-object filters.",
|
description="Object tracking defaults including which labels to track and per-object filters.",
|
||||||
)
|
)
|
||||||
record: RecordConfig = Field(
|
record: RecordConfig = Field(
|
||||||
default_factory=RecordConfig,
|
default_factory=RecordConfig,
|
||||||
title="Recording",
|
title="Recording",
|
||||||
description="Recording and retention settings for this camera.",
|
description="Recording and retention settings for this camera.",
|
||||||
)
|
)
|
||||||
review: ReviewConfig = Field(
|
review: ReviewConfig = Field(
|
||||||
default_factory=ReviewConfig,
|
default_factory=ReviewConfig,
|
||||||
title="Review",
|
title="Review",
|
||||||
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
|
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
|
||||||
)
|
)
|
||||||
semantic_search: CameraSemanticSearchConfig = Field(
|
semantic_search: CameraSemanticSearchConfig = Field(
|
||||||
default_factory=CameraSemanticSearchConfig,
|
default_factory=CameraSemanticSearchConfig,
|
||||||
title="Semantic Search",
|
title="Semantic Search",
|
||||||
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
|
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
|
||||||
)
|
)
|
||||||
snapshots: SnapshotsConfig = Field(
|
snapshots: SnapshotsConfig = Field(
|
||||||
default_factory=SnapshotsConfig,
|
default_factory=SnapshotsConfig,
|
||||||
title="Snapshots",
|
title="Snapshots",
|
||||||
description="Settings for saved JPEG snapshots of tracked objects for this camera.",
|
description="Settings for saved JPEG snapshots of tracked objects for this camera.",
|
||||||
)
|
)
|
||||||
timestamp_style: TimestampStyleConfig = Field(
|
timestamp_style: TimestampStyleConfig = Field(
|
||||||
default_factory=TimestampStyleConfig,
|
default_factory=TimestampStyleConfig,
|
||||||
title="Timestamp style",
|
title="Timestamp style",
|
||||||
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
|
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Options without global fallback
|
# Options without global fallback
|
||||||
best_image_timeout: int = Field(
|
best_image_timeout: int = Field(
|
||||||
default=60,
|
default=60,
|
||||||
title="Best image timeout",
|
title="Best image timeout",
|
||||||
description="How long to wait for the image with the highest confidence score.",
|
description="How long to wait for the image with the highest confidence score.",
|
||||||
)
|
)
|
||||||
mqtt: CameraMqttConfig = Field(
|
mqtt: CameraMqttConfig = Field(
|
||||||
default_factory=CameraMqttConfig,
|
default_factory=CameraMqttConfig,
|
||||||
title="MQTT",
|
title="MQTT",
|
||||||
description="MQTT image publishing settings.",
|
description="MQTT image publishing settings.",
|
||||||
)
|
)
|
||||||
notifications: NotificationConfig = Field(
|
notifications: NotificationConfig = Field(
|
||||||
default_factory=NotificationConfig,
|
default_factory=NotificationConfig,
|
||||||
title="Notifications",
|
title="Notifications",
|
||||||
description="Settings to enable and control notifications for this camera.",
|
description="Settings to enable and control notifications for this camera.",
|
||||||
)
|
)
|
||||||
onvif: OnvifConfig = Field(
|
onvif: OnvifConfig = Field(
|
||||||
default_factory=OnvifConfig,
|
default_factory=OnvifConfig,
|
||||||
title="ONVIF",
|
title="ONVIF",
|
||||||
description="ONVIF connection and PTZ autotracking settings for this camera.",
|
description="ONVIF connection and PTZ autotracking settings for this camera.",
|
||||||
)
|
)
|
||||||
type: CameraTypeEnum = Field(
|
type: CameraTypeEnum = Field(
|
||||||
default=CameraTypeEnum.generic,
|
default=CameraTypeEnum.generic,
|
||||||
title="Camera type",
|
title="Camera type",
|
||||||
description="Camera Type",
|
description="Camera Type",
|
||||||
)
|
)
|
||||||
ui: CameraUiConfig = Field(
|
ui: CameraUiConfig = Field(
|
||||||
default_factory=CameraUiConfig,
|
default_factory=CameraUiConfig,
|
||||||
title="Camera UI",
|
title="Camera UI",
|
||||||
description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
|
description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
|
||||||
)
|
)
|
||||||
webui_url: Optional[str] = Field(
|
webui_url: Optional[str] = Field(
|
||||||
None,
|
None,
|
||||||
title="Camera URL",
|
title="Camera URL",
|
||||||
description="URL to visit the camera directly from system page",
|
description="URL to visit the camera directly from system page",
|
||||||
)
|
)
|
||||||
zones: dict[str, ZoneConfig] = Field(
|
zones: dict[str, ZoneConfig] = Field(
|
||||||
default_factory=dict,
|
default_factory=dict,
|
||||||
title="Zones",
|
title="Zones",
|
||||||
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
|
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
|
||||||
)
|
)
|
||||||
enabled_in_config: Optional[bool] = Field(
|
enabled_in_config: Optional[bool] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
title="Original camera state",
|
title="Original camera state",
|
||||||
description="Keep track of original state of camera.",
|
description="Keep track of original state of camera.",
|
||||||
)
|
)
|
||||||
|
|
||||||
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
|
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()
|
||||||
|
|
||||||
def __init__(self, **config):
|
def __init__(self, **config):
|
||||||
# Set zone colors
|
# Set zone colors
|
||||||
if "zones" in config:
|
if "zones" in config:
|
||||||
colors = generate_color_palette(len(config["zones"]))
|
colors = generate_color_palette(len(config["zones"]))
|
||||||
|
|
||||||
config["zones"] = {
|
config["zones"] = {
|
||||||
name: {**z, "color": color}
|
name: {**z, "color": color}
|
||||||
for (name, z), color in zip(config["zones"].items(), colors)
|
for (name, z), color in zip(config["zones"].items(), colors)
|
||||||
}
|
}
|
||||||
|
|
||||||
# add roles to the input if there is only one
|
# add roles to the input if there is only one
|
||||||
if len(config["ffmpeg"]["inputs"]) == 1:
|
if len(config["ffmpeg"]["inputs"]) == 1:
|
||||||
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
|
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
|
||||||
|
|
||||||
config["ffmpeg"]["inputs"][0]["roles"] = [
|
config["ffmpeg"]["inputs"][0]["roles"] = [
|
||||||
"record",
|
"record",
|
||||||
"detect",
|
"detect",
|
||||||
]
|
]
|
||||||
|
|
||||||
if has_audio:
|
if has_audio:
|
||||||
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
|
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
|
||||||
|
|
||||||
super().__init__(**config)
|
super().__init__(**config)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def frame_shape(self) -> tuple[int, int]:
|
def frame_shape(self) -> tuple[int, int]:
|
||||||
return self.detect.height, self.detect.width
|
return self.detect.height, self.detect.width
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def frame_shape_yuv(self) -> tuple[int, int]:
|
def frame_shape_yuv(self) -> tuple[int, int]:
|
||||||
return self.detect.height * 3 // 2, self.detect.width
|
return self.detect.height * 3 // 2, self.detect.width
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
|
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
|
||||||
return self._ffmpeg_cmds
|
return self._ffmpeg_cmds
|
||||||
|
|
||||||
def get_formatted_name(self) -> str:
|
def get_formatted_name(self) -> str:
|
||||||
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
|
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
|
||||||
if self.friendly_name:
|
if self.friendly_name:
|
||||||
return self.friendly_name
|
return self.friendly_name
|
||||||
return self.name.replace("_", " ").title() if self.name else ""
|
return self.name.replace("_", " ").title() if self.name else ""
|
||||||
|
|
||||||
def create_ffmpeg_cmds(self):
|
def create_ffmpeg_cmds(self):
|
||||||
if "_ffmpeg_cmds" in self:
|
if "_ffmpeg_cmds" in self:
|
||||||
return
|
return
|
||||||
self._build_ffmpeg_cmds()
|
self._build_ffmpeg_cmds()
|
||||||
|
|
||||||
def recreate_ffmpeg_cmds(self):
|
def recreate_ffmpeg_cmds(self):
|
||||||
"""Force regeneration of ffmpeg commands from current config."""
|
"""Force regeneration of ffmpeg commands from current config."""
|
||||||
self._build_ffmpeg_cmds()
|
self._build_ffmpeg_cmds()
|
||||||
|
|
||||||
def _build_ffmpeg_cmds(self):
|
def _build_ffmpeg_cmds(self):
|
||||||
"""Build ffmpeg commands from the current ffmpeg config."""
|
"""Build ffmpeg commands from the current ffmpeg config."""
|
||||||
ffmpeg_cmds = []
|
ffmpeg_cmds = []
|
||||||
for ffmpeg_input in self.ffmpeg.inputs:
|
for ffmpeg_input in self.ffmpeg.inputs:
|
||||||
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
|
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
|
||||||
if ffmpeg_cmd is None:
|
if ffmpeg_cmd is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
|
ffmpeg_cmds.append(
|
||||||
self._ffmpeg_cmds = ffmpeg_cmds
|
{
|
||||||
|
"roles": ffmpeg_input.roles,
|
||||||
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
|
"cmd": ffmpeg_cmd,
|
||||||
ffmpeg_output_args = []
|
"record_variant": ffmpeg_input.record_variant,
|
||||||
if "detect" in ffmpeg_input.roles:
|
}
|
||||||
detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
|
)
|
||||||
scale_detect_args = parse_preset_hardware_acceleration_scale(
|
self._ffmpeg_cmds = ffmpeg_cmds
|
||||||
ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
|
|
||||||
detect_args,
|
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
|
||||||
self.detect.fps,
|
ffmpeg_output_args = []
|
||||||
self.detect.width,
|
if "detect" in ffmpeg_input.roles:
|
||||||
self.detect.height,
|
detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
|
||||||
)
|
scale_detect_args = parse_preset_hardware_acceleration_scale(
|
||||||
|
ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
|
||||||
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
|
detect_args,
|
||||||
|
self.detect.fps,
|
||||||
if "record" in ffmpeg_input.roles and self.record.enabled:
|
self.detect.width,
|
||||||
record_args = get_ffmpeg_arg_list(
|
self.detect.height,
|
||||||
parse_preset_output_record(
|
)
|
||||||
self.ffmpeg.output_args.record,
|
|
||||||
self.ffmpeg.apple_compatibility,
|
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
|
||||||
)
|
|
||||||
or self.ffmpeg.output_args.record
|
if "record" in ffmpeg_input.roles and self.record.enabled:
|
||||||
)
|
record_args = get_ffmpeg_arg_list(
|
||||||
|
parse_preset_output_record(
|
||||||
ffmpeg_output_args = (
|
self.ffmpeg.output_args.record,
|
||||||
record_args
|
self.ffmpeg.apple_compatibility,
|
||||||
+ [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"]
|
)
|
||||||
+ ffmpeg_output_args
|
or self.ffmpeg.output_args.record
|
||||||
)
|
)
|
||||||
|
record_variant = ffmpeg_input.record_variant or "main"
|
||||||
# if there aren't any outputs enabled for this input
|
cache_prefix = os.path.join(CACHE_DIR, self.name)
|
||||||
if len(ffmpeg_output_args) == 0:
|
cache_path = f"{cache_prefix}@{record_variant}@{CACHE_SEGMENT_FORMAT}.mp4"
|
||||||
return None
|
|
||||||
|
ffmpeg_output_args = (
|
||||||
global_args = get_ffmpeg_arg_list(
|
record_args
|
||||||
ffmpeg_input.global_args or self.ffmpeg.global_args
|
+ [cache_path]
|
||||||
)
|
+ ffmpeg_output_args
|
||||||
|
)
|
||||||
camera_arg = (
|
|
||||||
self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
|
# if there aren't any outputs enabled for this input
|
||||||
)
|
if len(ffmpeg_output_args) == 0:
|
||||||
hwaccel_args = get_ffmpeg_arg_list(
|
return None
|
||||||
parse_preset_hardware_acceleration_decode(
|
|
||||||
ffmpeg_input.hwaccel_args,
|
global_args = get_ffmpeg_arg_list(
|
||||||
self.detect.fps,
|
ffmpeg_input.global_args or self.ffmpeg.global_args
|
||||||
self.detect.width,
|
)
|
||||||
self.detect.height,
|
|
||||||
self.ffmpeg.gpu,
|
camera_arg = (
|
||||||
)
|
self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
|
||||||
or ffmpeg_input.hwaccel_args
|
)
|
||||||
or parse_preset_hardware_acceleration_decode(
|
hwaccel_args = get_ffmpeg_arg_list(
|
||||||
camera_arg,
|
parse_preset_hardware_acceleration_decode(
|
||||||
self.detect.fps,
|
ffmpeg_input.hwaccel_args,
|
||||||
self.detect.width,
|
self.detect.fps,
|
||||||
self.detect.height,
|
self.detect.width,
|
||||||
self.ffmpeg.gpu,
|
self.detect.height,
|
||||||
)
|
self.ffmpeg.gpu,
|
||||||
or camera_arg
|
)
|
||||||
or []
|
or ffmpeg_input.hwaccel_args
|
||||||
)
|
or parse_preset_hardware_acceleration_decode(
|
||||||
input_args = get_ffmpeg_arg_list(
|
camera_arg,
|
||||||
parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
|
self.detect.fps,
|
||||||
or ffmpeg_input.input_args
|
self.detect.width,
|
||||||
or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
|
self.detect.height,
|
||||||
or self.ffmpeg.input_args
|
self.ffmpeg.gpu,
|
||||||
)
|
)
|
||||||
|
or camera_arg
|
||||||
cmd = (
|
or []
|
||||||
[self.ffmpeg.ffmpeg_path]
|
)
|
||||||
+ global_args
|
input_args = get_ffmpeg_arg_list(
|
||||||
+ (hwaccel_args if "detect" in ffmpeg_input.roles else [])
|
parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
|
||||||
+ input_args
|
or ffmpeg_input.input_args
|
||||||
+ ["-i", escape_special_characters(ffmpeg_input.path)]
|
or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
|
||||||
+ ffmpeg_output_args
|
or self.ffmpeg.input_args
|
||||||
)
|
)
|
||||||
|
|
||||||
return [part for part in cmd if part != ""]
|
cmd = (
|
||||||
|
[self.ffmpeg.ffmpeg_path]
|
||||||
|
+ global_args
|
||||||
|
+ (hwaccel_args if "detect" in ffmpeg_input.roles else [])
|
||||||
|
+ input_args
|
||||||
|
+ ["-i", escape_special_characters(ffmpeg_input.path)]
|
||||||
|
+ ffmpeg_output_args
|
||||||
|
)
|
||||||
|
|
||||||
|
return [part for part in cmd if part != ""]
|
||||||
|
|||||||
@ -1,159 +1,192 @@
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
from pydantic import Field, field_validator
|
from pydantic import Field, field_validator, model_validator
|
||||||
|
|
||||||
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
|
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, REGEX_CAMERA_NAME
|
||||||
|
|
||||||
from ..base import FrigateBaseModel
|
from ..base import FrigateBaseModel
|
||||||
from ..env import EnvString
|
from ..env import EnvString
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"CameraFfmpegConfig",
|
"CameraFfmpegConfig",
|
||||||
"CameraInput",
|
"CameraInput",
|
||||||
"CameraRoleEnum",
|
"CameraRoleEnum",
|
||||||
"FfmpegConfig",
|
"FfmpegConfig",
|
||||||
"FfmpegOutputArgsConfig",
|
"FfmpegOutputArgsConfig",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Note: Setting threads to less than 2 caused several issues with recording segments
|
# Note: Setting threads to less than 2 caused several issues with recording segments
|
||||||
# https://github.com/blakeblackshear/frigate/issues/5659
|
# https://github.com/blakeblackshear/frigate/issues/5659
|
||||||
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
|
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
|
||||||
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
|
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
|
||||||
|
|
||||||
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
|
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
|
||||||
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
|
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
|
||||||
"-threads",
|
"-threads",
|
||||||
"2",
|
"2",
|
||||||
"-f",
|
"-f",
|
||||||
"rawvideo",
|
"rawvideo",
|
||||||
"-pix_fmt",
|
"-pix_fmt",
|
||||||
"yuv420p",
|
"yuv420p",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class FfmpegOutputArgsConfig(FrigateBaseModel):
|
class FfmpegOutputArgsConfig(FrigateBaseModel):
|
||||||
detect: Union[str, list[str]] = Field(
|
detect: Union[str, list[str]] = Field(
|
||||||
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
||||||
title="Detect output arguments",
|
title="Detect output arguments",
|
||||||
description="Default output arguments for detect role streams.",
|
description="Default output arguments for detect role streams.",
|
||||||
)
|
)
|
||||||
record: Union[str, list[str]] = Field(
|
record: Union[str, list[str]] = Field(
|
||||||
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
||||||
title="Record output arguments",
|
title="Record output arguments",
|
||||||
description="Default output arguments for record role streams.",
|
description="Default output arguments for record role streams.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class FfmpegConfig(FrigateBaseModel):
|
class FfmpegConfig(FrigateBaseModel):
|
||||||
path: str = Field(
|
path: str = Field(
|
||||||
default="default",
|
default="default",
|
||||||
title="FFmpeg path",
|
title="FFmpeg path",
|
||||||
description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
|
description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
|
||||||
)
|
)
|
||||||
global_args: Union[str, list[str]] = Field(
|
global_args: Union[str, list[str]] = Field(
|
||||||
default=FFMPEG_GLOBAL_ARGS_DEFAULT,
|
default=FFMPEG_GLOBAL_ARGS_DEFAULT,
|
||||||
title="FFmpeg global arguments",
|
title="FFmpeg global arguments",
|
||||||
description="Global arguments passed to FFmpeg processes.",
|
description="Global arguments passed to FFmpeg processes.",
|
||||||
)
|
)
|
||||||
hwaccel_args: Union[str, list[str]] = Field(
|
hwaccel_args: Union[str, list[str]] = Field(
|
||||||
default="auto",
|
default="auto",
|
||||||
title="Hardware acceleration arguments",
|
title="Hardware acceleration arguments",
|
||||||
description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
|
description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
|
||||||
)
|
)
|
||||||
input_args: Union[str, list[str]] = Field(
|
input_args: Union[str, list[str]] = Field(
|
||||||
default=FFMPEG_INPUT_ARGS_DEFAULT,
|
default=FFMPEG_INPUT_ARGS_DEFAULT,
|
||||||
title="Input arguments",
|
title="Input arguments",
|
||||||
description="Input arguments applied to FFmpeg input streams.",
|
description="Input arguments applied to FFmpeg input streams.",
|
||||||
)
|
)
|
||||||
output_args: FfmpegOutputArgsConfig = Field(
|
output_args: FfmpegOutputArgsConfig = Field(
|
||||||
default_factory=FfmpegOutputArgsConfig,
|
default_factory=FfmpegOutputArgsConfig,
|
||||||
title="Output arguments",
|
title="Output arguments",
|
||||||
description="Default output arguments used for different FFmpeg roles such as detect and record.",
|
description="Default output arguments used for different FFmpeg roles such as detect and record.",
|
||||||
)
|
)
|
||||||
retry_interval: float = Field(
|
retry_interval: float = Field(
|
||||||
default=10.0,
|
default=10.0,
|
||||||
title="FFmpeg retry time",
|
title="FFmpeg retry time",
|
||||||
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
|
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
|
||||||
gt=0.0,
|
gt=0.0,
|
||||||
)
|
)
|
||||||
apple_compatibility: bool = Field(
|
apple_compatibility: bool = Field(
|
||||||
default=False,
|
default=False,
|
||||||
title="Apple compatibility",
|
title="Apple compatibility",
|
||||||
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
|
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
|
||||||
)
|
)
|
||||||
gpu: int = Field(
|
gpu: int = Field(
|
||||||
default=0,
|
default=0,
|
||||||
title="GPU index",
|
title="GPU index",
|
||||||
description="Default GPU index used for hardware acceleration if available.",
|
description="Default GPU index used for hardware acceleration if available.",
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ffmpeg_path(self) -> str:
|
def ffmpeg_path(self) -> str:
|
||||||
if self.path == "default":
|
if self.path == "default":
|
||||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||||
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
||||||
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
|
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
|
||||||
else:
|
else:
|
||||||
return f"{self.path}/bin/ffmpeg"
|
return f"{self.path}/bin/ffmpeg"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ffprobe_path(self) -> str:
|
def ffprobe_path(self) -> str:
|
||||||
if self.path == "default":
|
if self.path == "default":
|
||||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
|
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
|
||||||
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
||||||
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
|
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
|
||||||
else:
|
else:
|
||||||
return f"{self.path}/bin/ffprobe"
|
return f"{self.path}/bin/ffprobe"
|
||||||
|
|
||||||
|
|
||||||
class CameraRoleEnum(str, Enum):
|
class CameraRoleEnum(str, Enum):
|
||||||
audio = "audio"
|
audio = "audio"
|
||||||
record = "record"
|
record = "record"
|
||||||
detect = "detect"
|
detect = "detect"
|
||||||
|
|
||||||
|
|
||||||
class CameraInput(FrigateBaseModel):
|
class CameraInput(FrigateBaseModel):
|
||||||
path: EnvString = Field(
|
path: EnvString = Field(
|
||||||
title="Input path",
|
title="Input path",
|
||||||
description="Camera input stream URL or path.",
|
description="Camera input stream URL or path.",
|
||||||
)
|
)
|
||||||
roles: list[CameraRoleEnum] = Field(
|
roles: list[CameraRoleEnum] = Field(
|
||||||
title="Input roles",
|
title="Input roles",
|
||||||
description="Roles for this input stream.",
|
description="Roles for this input stream.",
|
||||||
)
|
)
|
||||||
global_args: Union[str, list[str]] = Field(
|
global_args: Union[str, list[str]] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="FFmpeg global arguments",
|
title="FFmpeg global arguments",
|
||||||
description="FFmpeg global arguments for this input stream.",
|
description="FFmpeg global arguments for this input stream.",
|
||||||
)
|
)
|
||||||
hwaccel_args: Union[str, list[str]] = Field(
|
hwaccel_args: Union[str, list[str]] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="Hardware acceleration arguments",
|
title="Hardware acceleration arguments",
|
||||||
description="Hardware acceleration arguments for this input stream.",
|
description="Hardware acceleration arguments for this input stream.",
|
||||||
)
|
)
|
||||||
input_args: Union[str, list[str]] = Field(
|
input_args: Union[str, list[str]] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="Input arguments",
|
title="Input arguments",
|
||||||
description="Input arguments specific to this stream.",
|
description="Input arguments specific to this stream.",
|
||||||
)
|
)
|
||||||
|
record_variant: str | None = Field(
|
||||||
|
default=None,
|
||||||
class CameraFfmpegConfig(FfmpegConfig):
|
title="Recording variant",
|
||||||
inputs: list[CameraInput] = Field(
|
description="Optional recording variant label for record role inputs such as main or sub.",
|
||||||
title="Camera inputs",
|
pattern=REGEX_CAMERA_NAME,
|
||||||
description="List of input stream definitions (paths and roles) for this camera.",
|
)
|
||||||
)
|
|
||||||
|
@model_validator(mode="after")
|
||||||
@field_validator("inputs")
|
def validate_record_variant(self):
|
||||||
@classmethod
|
if CameraRoleEnum.record in self.roles:
|
||||||
def validate_roles(cls, v):
|
if not self.record_variant:
|
||||||
roles = [role for input in v for role in input.roles]
|
self.record_variant = "main"
|
||||||
|
else:
|
||||||
if len(roles) != len(set(roles)):
|
self.record_variant = None
|
||||||
raise ValueError("Each input role may only be used once.")
|
|
||||||
|
return self
|
||||||
if "detect" not in roles:
|
|
||||||
raise ValueError("The detect role is required.")
|
|
||||||
|
class CameraFfmpegConfig(FfmpegConfig):
|
||||||
return v
|
inputs: list[CameraInput] = Field(
|
||||||
|
title="Camera inputs",
|
||||||
|
description="List of input stream definitions (paths and roles) for this camera.",
|
||||||
|
)
|
||||||
|
|
||||||
|
@field_validator("inputs")
|
||||||
|
@classmethod
|
||||||
|
def validate_roles(cls, v):
|
||||||
|
detect_inputs = 0
|
||||||
|
audio_inputs = 0
|
||||||
|
record_variants: set[str] = set()
|
||||||
|
|
||||||
|
for camera_input in v:
|
||||||
|
if CameraRoleEnum.detect in camera_input.roles:
|
||||||
|
detect_inputs += 1
|
||||||
|
|
||||||
|
if CameraRoleEnum.audio in camera_input.roles:
|
||||||
|
audio_inputs += 1
|
||||||
|
|
||||||
|
if CameraRoleEnum.record in camera_input.roles:
|
||||||
|
record_variant = camera_input.record_variant or "main"
|
||||||
|
if record_variant in record_variants:
|
||||||
|
raise ValueError(
|
||||||
|
f"Record variant '{record_variant}' may only be used once."
|
||||||
|
)
|
||||||
|
record_variants.add(record_variant)
|
||||||
|
|
||||||
|
if detect_inputs != 1:
|
||||||
|
raise ValueError("The detect role is required.")
|
||||||
|
|
||||||
|
if audio_inputs > 1:
|
||||||
|
raise ValueError("Each input role may only be used once.")
|
||||||
|
|
||||||
|
return v
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
21
frigate/config/transcode_proxy.py
Normal file
21
frigate/config/transcode_proxy.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
"""Configuration for the VOD transcode proxy (optional playback transcoding)."""
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from .base import FrigateBaseModel
|
||||||
|
|
||||||
|
__all__ = ["TranscodeProxyConfig"]
|
||||||
|
|
||||||
|
|
||||||
|
class TranscodeProxyConfig(FrigateBaseModel):
|
||||||
|
"""Settings for the optional transcode proxy used for recording playback."""
|
||||||
|
|
||||||
|
enabled: bool = Field(
|
||||||
|
default=False,
|
||||||
|
title="Transcode proxy enabled",
|
||||||
|
description="When enabled, the UI uses the transcode proxy URL for VOD playback so recordings are transcoded to H.264 on the fly (e.g. for HEVC compatibility or lower bitrate).",
|
||||||
|
)
|
||||||
|
vod_proxy_url: str = Field(
|
||||||
|
default="",
|
||||||
|
title="VOD proxy base URL",
|
||||||
|
description="Base URL for the transcode proxy (e.g. http://host:5010). When enabled, recording playback requests go to this URL + /vod/... Leave empty if the proxy is mounted at the same host (e.g. /vod-transcoded/ under the same origin).",
|
||||||
|
)
|
||||||
@ -1,179 +1,184 @@
|
|||||||
from peewee import (
|
from peewee import (
|
||||||
BlobField,
|
BlobField,
|
||||||
BooleanField,
|
BooleanField,
|
||||||
CharField,
|
CharField,
|
||||||
CompositeKey,
|
CompositeKey,
|
||||||
DateTimeField,
|
DateTimeField,
|
||||||
FloatField,
|
FloatField,
|
||||||
ForeignKeyField,
|
ForeignKeyField,
|
||||||
IntegerField,
|
IntegerField,
|
||||||
Model,
|
Model,
|
||||||
TextField,
|
TextField,
|
||||||
)
|
)
|
||||||
from playhouse.sqlite_ext import JSONField
|
from playhouse.sqlite_ext import JSONField
|
||||||
|
|
||||||
|
|
||||||
class Event(Model):
|
class Event(Model):
|
||||||
id = CharField(null=False, primary_key=True, max_length=30)
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
label = CharField(index=True, max_length=20)
|
label = CharField(index=True, max_length=20)
|
||||||
sub_label = CharField(max_length=100, null=True)
|
sub_label = CharField(max_length=100, null=True)
|
||||||
camera = CharField(index=True, max_length=20)
|
camera = CharField(index=True, max_length=20)
|
||||||
start_time = DateTimeField()
|
start_time = DateTimeField()
|
||||||
end_time = DateTimeField()
|
end_time = DateTimeField()
|
||||||
top_score = (
|
top_score = (
|
||||||
FloatField()
|
FloatField()
|
||||||
) # TODO remove when columns can be dropped without rebuilding table
|
) # TODO remove when columns can be dropped without rebuilding table
|
||||||
score = (
|
score = (
|
||||||
FloatField()
|
FloatField()
|
||||||
) # TODO remove when columns can be dropped without rebuilding table
|
) # TODO remove when columns can be dropped without rebuilding table
|
||||||
false_positive = BooleanField()
|
false_positive = BooleanField()
|
||||||
zones = JSONField()
|
zones = JSONField()
|
||||||
thumbnail = TextField()
|
thumbnail = TextField()
|
||||||
has_clip = BooleanField(default=True)
|
has_clip = BooleanField(default=True)
|
||||||
has_snapshot = BooleanField(default=True)
|
has_snapshot = BooleanField(default=True)
|
||||||
region = (
|
region = (
|
||||||
JSONField()
|
JSONField()
|
||||||
) # TODO remove when columns can be dropped without rebuilding table
|
) # TODO remove when columns can be dropped without rebuilding table
|
||||||
box = (
|
box = (
|
||||||
JSONField()
|
JSONField()
|
||||||
) # TODO remove when columns can be dropped without rebuilding table
|
) # TODO remove when columns can be dropped without rebuilding table
|
||||||
area = (
|
area = (
|
||||||
IntegerField()
|
IntegerField()
|
||||||
) # TODO remove when columns can be dropped without rebuilding table
|
) # TODO remove when columns can be dropped without rebuilding table
|
||||||
retain_indefinitely = BooleanField(default=False)
|
retain_indefinitely = BooleanField(default=False)
|
||||||
ratio = FloatField(
|
ratio = FloatField(
|
||||||
default=1.0
|
default=1.0
|
||||||
) # TODO remove when columns can be dropped without rebuilding table
|
) # TODO remove when columns can be dropped without rebuilding table
|
||||||
plus_id = CharField(max_length=30)
|
plus_id = CharField(max_length=30)
|
||||||
model_hash = CharField(max_length=32)
|
model_hash = CharField(max_length=32)
|
||||||
detector_type = CharField(max_length=32)
|
detector_type = CharField(max_length=32)
|
||||||
model_type = CharField(max_length=32)
|
model_type = CharField(max_length=32)
|
||||||
data = JSONField() # ex: tracked object box, region, etc.
|
data = JSONField() # ex: tracked object box, region, etc.
|
||||||
|
|
||||||
|
|
||||||
class Timeline(Model):
|
class Timeline(Model):
|
||||||
timestamp = DateTimeField()
|
timestamp = DateTimeField()
|
||||||
camera = CharField(index=True, max_length=20)
|
camera = CharField(index=True, max_length=20)
|
||||||
source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
|
source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
|
||||||
source_id = CharField(index=True, max_length=30)
|
source_id = CharField(index=True, max_length=30)
|
||||||
class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
|
class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
|
||||||
data = JSONField() # ex: tracked object id, region, box, etc.
|
data = JSONField() # ex: tracked object id, region, box, etc.
|
||||||
|
|
||||||
|
|
||||||
class Regions(Model):
|
class Regions(Model):
|
||||||
camera = CharField(null=False, primary_key=True, max_length=20)
|
camera = CharField(null=False, primary_key=True, max_length=20)
|
||||||
grid = JSONField() # json blob of grid
|
grid = JSONField() # json blob of grid
|
||||||
last_update = DateTimeField()
|
last_update = DateTimeField()
|
||||||
|
|
||||||
|
|
||||||
class Recordings(Model):
|
class Recordings(Model):
|
||||||
id = CharField(null=False, primary_key=True, max_length=30)
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
camera = CharField(index=True, max_length=20)
|
camera = CharField(index=True, max_length=20)
|
||||||
path = CharField(unique=True)
|
path = CharField(unique=True)
|
||||||
start_time = DateTimeField()
|
variant = CharField(default="main", index=True, max_length=20)
|
||||||
end_time = DateTimeField()
|
start_time = DateTimeField()
|
||||||
duration = FloatField()
|
end_time = DateTimeField()
|
||||||
motion = IntegerField(null=True)
|
duration = FloatField()
|
||||||
objects = IntegerField(null=True)
|
motion = IntegerField(null=True)
|
||||||
dBFS = IntegerField(null=True)
|
objects = IntegerField(null=True)
|
||||||
segment_size = FloatField(default=0) # this should be stored as MB
|
dBFS = IntegerField(null=True)
|
||||||
regions = IntegerField(null=True)
|
segment_size = FloatField(default=0) # this should be stored as MB
|
||||||
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
|
codec_name = CharField(null=True, max_length=32)
|
||||||
|
width = IntegerField(null=True)
|
||||||
|
height = IntegerField(null=True)
|
||||||
class ExportCase(Model):
|
bitrate = IntegerField(null=True)
|
||||||
id = CharField(null=False, primary_key=True, max_length=30)
|
regions = IntegerField(null=True)
|
||||||
name = CharField(index=True, max_length=100)
|
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
|
||||||
description = TextField(null=True)
|
|
||||||
created_at = DateTimeField()
|
|
||||||
updated_at = DateTimeField()
|
class ExportCase(Model):
|
||||||
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
|
name = CharField(index=True, max_length=100)
|
||||||
class Export(Model):
|
description = TextField(null=True)
|
||||||
id = CharField(null=False, primary_key=True, max_length=30)
|
created_at = DateTimeField()
|
||||||
camera = CharField(index=True, max_length=20)
|
updated_at = DateTimeField()
|
||||||
name = CharField(index=True, max_length=100)
|
|
||||||
date = DateTimeField()
|
|
||||||
video_path = CharField(unique=True)
|
class Export(Model):
|
||||||
thumb_path = CharField(unique=True)
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
in_progress = BooleanField()
|
camera = CharField(index=True, max_length=20)
|
||||||
export_case = ForeignKeyField(
|
name = CharField(index=True, max_length=100)
|
||||||
ExportCase,
|
date = DateTimeField()
|
||||||
null=True,
|
video_path = CharField(unique=True)
|
||||||
backref="exports",
|
thumb_path = CharField(unique=True)
|
||||||
column_name="export_case_id",
|
in_progress = BooleanField()
|
||||||
)
|
export_case = ForeignKeyField(
|
||||||
|
ExportCase,
|
||||||
|
null=True,
|
||||||
class ReviewSegment(Model):
|
backref="exports",
|
||||||
id = CharField(null=False, primary_key=True, max_length=30)
|
column_name="export_case_id",
|
||||||
camera = CharField(index=True, max_length=20)
|
)
|
||||||
start_time = DateTimeField()
|
|
||||||
end_time = DateTimeField()
|
|
||||||
severity = CharField(max_length=30) # alert, detection
|
class ReviewSegment(Model):
|
||||||
thumb_path = CharField(unique=True)
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
|
camera = CharField(index=True, max_length=20)
|
||||||
|
start_time = DateTimeField()
|
||||||
|
end_time = DateTimeField()
|
||||||
class UserReviewStatus(Model):
|
severity = CharField(max_length=30) # alert, detection
|
||||||
user_id = CharField(max_length=30)
|
thumb_path = CharField(unique=True)
|
||||||
review_segment = ForeignKeyField(ReviewSegment, backref="user_reviews")
|
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
|
||||||
has_been_reviewed = BooleanField(default=False)
|
|
||||||
|
|
||||||
class Meta:
|
class UserReviewStatus(Model):
|
||||||
indexes = ((("user_id", "review_segment"), True),)
|
user_id = CharField(max_length=30)
|
||||||
|
review_segment = ForeignKeyField(ReviewSegment, backref="user_reviews")
|
||||||
|
has_been_reviewed = BooleanField(default=False)
|
||||||
class Previews(Model):
|
|
||||||
id = CharField(null=False, primary_key=True, max_length=30)
|
class Meta:
|
||||||
camera = CharField(index=True, max_length=20)
|
indexes = ((("user_id", "review_segment"), True),)
|
||||||
path = CharField(unique=True)
|
|
||||||
start_time = DateTimeField()
|
|
||||||
end_time = DateTimeField()
|
class Previews(Model):
|
||||||
duration = FloatField()
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
|
camera = CharField(index=True, max_length=20)
|
||||||
|
path = CharField(unique=True)
|
||||||
# Used for temporary table in record/cleanup.py
|
start_time = DateTimeField()
|
||||||
class RecordingsToDelete(Model):
|
end_time = DateTimeField()
|
||||||
id = CharField(null=False, primary_key=False, max_length=30)
|
duration = FloatField()
|
||||||
|
|
||||||
class Meta:
|
|
||||||
temporary = True
|
# Used for temporary table in record/cleanup.py
|
||||||
|
class RecordingsToDelete(Model):
|
||||||
|
id = CharField(null=False, primary_key=False, max_length=30)
|
||||||
class User(Model):
|
|
||||||
username = CharField(null=False, primary_key=True, max_length=30)
|
class Meta:
|
||||||
role = CharField(
|
temporary = True
|
||||||
max_length=20,
|
|
||||||
default="admin",
|
|
||||||
)
|
class User(Model):
|
||||||
password_hash = CharField(null=False, max_length=120)
|
username = CharField(null=False, primary_key=True, max_length=30)
|
||||||
password_changed_at = DateTimeField(null=True)
|
role = CharField(
|
||||||
notification_tokens = JSONField()
|
max_length=20,
|
||||||
|
default="admin",
|
||||||
@classmethod
|
)
|
||||||
def get_allowed_cameras(
|
password_hash = CharField(null=False, max_length=120)
|
||||||
cls, role: str, roles_dict: dict[str, list[str]], all_camera_names: set[str]
|
password_changed_at = DateTimeField(null=True)
|
||||||
) -> list[str]:
|
notification_tokens = JSONField()
|
||||||
if role not in roles_dict:
|
|
||||||
return [] # Invalid role grants no access
|
@classmethod
|
||||||
allowed = roles_dict[role]
|
def get_allowed_cameras(
|
||||||
if not allowed: # Empty list means all cameras
|
cls, role: str, roles_dict: dict[str, list[str]], all_camera_names: set[str]
|
||||||
return list(all_camera_names)
|
) -> list[str]:
|
||||||
|
if role not in roles_dict:
|
||||||
return [cam for cam in allowed if cam in all_camera_names]
|
return [] # Invalid role grants no access
|
||||||
|
allowed = roles_dict[role]
|
||||||
|
if not allowed: # Empty list means all cameras
|
||||||
class Trigger(Model):
|
return list(all_camera_names)
|
||||||
camera = CharField(max_length=20)
|
|
||||||
name = CharField()
|
return [cam for cam in allowed if cam in all_camera_names]
|
||||||
type = CharField(max_length=10)
|
|
||||||
data = TextField()
|
|
||||||
threshold = FloatField()
|
class Trigger(Model):
|
||||||
model = CharField(max_length=30)
|
camera = CharField(max_length=20)
|
||||||
embedding = BlobField()
|
name = CharField()
|
||||||
triggering_event_id = CharField(max_length=30)
|
type = CharField(max_length=10)
|
||||||
last_triggered = DateTimeField()
|
data = TextField()
|
||||||
|
threshold = FloatField()
|
||||||
class Meta:
|
model = CharField(max_length=30)
|
||||||
primary_key = CompositeKey("camera", "name")
|
embedding = BlobField()
|
||||||
|
triggering_event_id = CharField(max_length=30)
|
||||||
|
last_triggered = DateTimeField()
|
||||||
|
|
||||||
|
class Meta:
|
||||||
|
primary_key = CompositeKey("camera", "name")
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,405 +1,458 @@
|
|||||||
"""Unit tests for recordings/media API endpoints."""
|
"""Unit tests for recordings/media API endpoints."""
|
||||||
|
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
import pytz
|
import pytz
|
||||||
from fastapi import Request
|
from fastapi import Request
|
||||||
|
|
||||||
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
||||||
from frigate.models import Recordings
|
from frigate.models import Recordings
|
||||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||||
|
|
||||||
|
|
||||||
class TestHttpMedia(BaseTestHttp):
|
class TestHttpMedia(BaseTestHttp):
|
||||||
"""Test media API endpoints, particularly recordings with DST handling."""
|
"""Test media API endpoints, particularly recordings with DST handling."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
"""Set up test fixtures."""
|
"""Set up test fixtures."""
|
||||||
super().setUp([Recordings])
|
super().setUp([Recordings])
|
||||||
self.app = super().create_app()
|
self.app = super().create_app()
|
||||||
|
|
||||||
# Mock get_current_user for all tests
|
# Mock get_current_user for all tests
|
||||||
async def mock_get_current_user(request: Request):
|
async def mock_get_current_user(request: Request):
|
||||||
username = request.headers.get("remote-user")
|
username = request.headers.get("remote-user")
|
||||||
role = request.headers.get("remote-role")
|
role = request.headers.get("remote-role")
|
||||||
if not username or not role:
|
if not username or not role:
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content={"message": "No authorization headers."}, status_code=401
|
content={"message": "No authorization headers."}, status_code=401
|
||||||
)
|
)
|
||||||
return {"username": username, "role": role}
|
return {"username": username, "role": role}
|
||||||
|
|
||||||
self.app.dependency_overrides[get_current_user] = mock_get_current_user
|
self.app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||||
|
|
||||||
async def mock_get_allowed_cameras_for_filter(request: Request):
|
async def mock_get_allowed_cameras_for_filter(request: Request):
|
||||||
return ["front_door"]
|
return ["front_door"]
|
||||||
|
|
||||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||||
mock_get_allowed_cameras_for_filter
|
mock_get_allowed_cameras_for_filter
|
||||||
)
|
)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
"""Clean up after tests."""
|
"""Clean up after tests."""
|
||||||
self.app.dependency_overrides.clear()
|
self.app.dependency_overrides.clear()
|
||||||
super().tearDown()
|
super().tearDown()
|
||||||
|
|
||||||
def test_recordings_summary_across_dst_spring_forward(self):
|
def test_camera_recordings_variant_filter(self):
|
||||||
"""
|
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
|
||||||
Test recordings summary across spring DST transition (spring forward).
|
end_ts = start_ts + 3600
|
||||||
|
|
||||||
In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
|
with AuthTestClient(self.app) as client:
|
||||||
Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
|
Recordings.insert(
|
||||||
"""
|
id="recording_main",
|
||||||
tz = pytz.timezone("America/New_York")
|
path="/media/recordings/front/main.mp4",
|
||||||
|
camera="front_door",
|
||||||
# March 9, 2024 at 12:00 PM EST (before DST)
|
variant="main",
|
||||||
march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
|
start_time=start_ts,
|
||||||
|
end_time=end_ts,
|
||||||
# March 10, 2024 at 12:00 PM EDT (after DST transition)
|
duration=3600,
|
||||||
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
|
motion=100,
|
||||||
|
objects=5,
|
||||||
# March 11, 2024 at 12:00 PM EDT (after DST)
|
codec_name="h264",
|
||||||
march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
|
width=1920,
|
||||||
|
height=1080,
|
||||||
with AuthTestClient(self.app) as client:
|
bitrate=4_000_000,
|
||||||
# Insert recordings for each day
|
).execute()
|
||||||
Recordings.insert(
|
Recordings.insert(
|
||||||
id="recording_march_9",
|
id="recording_sub",
|
||||||
path="/media/recordings/march_9.mp4",
|
path="/media/recordings/front/sub.mp4",
|
||||||
camera="front_door",
|
camera="front_door",
|
||||||
start_time=march_9_noon,
|
variant="sub",
|
||||||
end_time=march_9_noon + 3600, # 1 hour recording
|
start_time=start_ts,
|
||||||
duration=3600,
|
end_time=end_ts,
|
||||||
motion=100,
|
duration=3600,
|
||||||
objects=5,
|
motion=100,
|
||||||
).execute()
|
objects=5,
|
||||||
|
codec_name="h264",
|
||||||
Recordings.insert(
|
width=640,
|
||||||
id="recording_march_10",
|
height=360,
|
||||||
path="/media/recordings/march_10.mp4",
|
bitrate=512_000,
|
||||||
camera="front_door",
|
).execute()
|
||||||
start_time=march_10_noon,
|
|
||||||
end_time=march_10_noon + 3600,
|
default_response = client.get(
|
||||||
duration=3600,
|
"/front_door/recordings",
|
||||||
motion=150,
|
params={"after": start_ts, "before": end_ts},
|
||||||
objects=8,
|
)
|
||||||
).execute()
|
assert default_response.status_code == 200
|
||||||
|
default_recordings = default_response.json()
|
||||||
Recordings.insert(
|
assert len(default_recordings) == 1
|
||||||
id="recording_march_11",
|
assert default_recordings[0]["variant"] == "main"
|
||||||
path="/media/recordings/march_11.mp4",
|
|
||||||
camera="front_door",
|
all_response = client.get(
|
||||||
start_time=march_11_noon,
|
"/front_door/recordings",
|
||||||
end_time=march_11_noon + 3600,
|
params={"after": start_ts, "before": end_ts, "variant": "all"},
|
||||||
duration=3600,
|
)
|
||||||
motion=200,
|
assert all_response.status_code == 200
|
||||||
objects=10,
|
variants = {recording["variant"] for recording in all_response.json()}
|
||||||
).execute()
|
assert variants == {"main", "sub"}
|
||||||
|
|
||||||
# Test recordings summary with America/New_York timezone
|
def test_recordings_summary_across_dst_spring_forward(self):
|
||||||
response = client.get(
|
"""
|
||||||
"/recordings/summary",
|
Test recordings summary across spring DST transition (spring forward).
|
||||||
params={"timezone": "America/New_York", "cameras": "all"},
|
|
||||||
)
|
In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
|
||||||
|
Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
|
||||||
assert response.status_code == 200
|
"""
|
||||||
summary = response.json()
|
tz = pytz.timezone("America/New_York")
|
||||||
|
|
||||||
# Verify we get exactly 3 days
|
# March 9, 2024 at 12:00 PM EST (before DST)
|
||||||
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
|
march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
|
||||||
|
|
||||||
# Verify the correct dates are returned (API returns dict with True values)
|
# March 10, 2024 at 12:00 PM EDT (after DST transition)
|
||||||
assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
|
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
|
||||||
assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
|
|
||||||
assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
|
# March 11, 2024 at 12:00 PM EDT (after DST)
|
||||||
assert summary["2024-03-09"] is True
|
march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
|
||||||
assert summary["2024-03-10"] is True
|
|
||||||
assert summary["2024-03-11"] is True
|
with AuthTestClient(self.app) as client:
|
||||||
|
# Insert recordings for each day
|
||||||
def test_recordings_summary_across_dst_fall_back(self):
|
Recordings.insert(
|
||||||
"""
|
id="recording_march_9",
|
||||||
Test recordings summary across fall DST transition (fall back).
|
path="/media/recordings/march_9.mp4",
|
||||||
|
camera="front_door",
|
||||||
In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
|
start_time=march_9_noon,
|
||||||
Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
|
end_time=march_9_noon + 3600, # 1 hour recording
|
||||||
"""
|
duration=3600,
|
||||||
tz = pytz.timezone("America/New_York")
|
motion=100,
|
||||||
|
objects=5,
|
||||||
# November 2, 2024 at 12:00 PM EDT (before DST transition)
|
).execute()
|
||||||
nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
|
|
||||||
|
Recordings.insert(
|
||||||
# November 3, 2024 at 12:00 PM EST (after DST transition)
|
id="recording_march_10",
|
||||||
# Need to specify is_dst=False to get the time after fall back
|
path="/media/recordings/march_10.mp4",
|
||||||
nov_3_noon = tz.localize(
|
camera="front_door",
|
||||||
datetime(2024, 11, 3, 12, 0, 0), is_dst=False
|
start_time=march_10_noon,
|
||||||
).timestamp()
|
end_time=march_10_noon + 3600,
|
||||||
|
duration=3600,
|
||||||
# November 4, 2024 at 12:00 PM EST (after DST)
|
motion=150,
|
||||||
nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
|
objects=8,
|
||||||
|
).execute()
|
||||||
with AuthTestClient(self.app) as client:
|
|
||||||
# Insert recordings for each day
|
Recordings.insert(
|
||||||
Recordings.insert(
|
id="recording_march_11",
|
||||||
id="recording_nov_2",
|
path="/media/recordings/march_11.mp4",
|
||||||
path="/media/recordings/nov_2.mp4",
|
camera="front_door",
|
||||||
camera="front_door",
|
start_time=march_11_noon,
|
||||||
start_time=nov_2_noon,
|
end_time=march_11_noon + 3600,
|
||||||
end_time=nov_2_noon + 3600,
|
duration=3600,
|
||||||
duration=3600,
|
motion=200,
|
||||||
motion=100,
|
objects=10,
|
||||||
objects=5,
|
).execute()
|
||||||
).execute()
|
|
||||||
|
# Test recordings summary with America/New_York timezone
|
||||||
Recordings.insert(
|
response = client.get(
|
||||||
id="recording_nov_3",
|
"/recordings/summary",
|
||||||
path="/media/recordings/nov_3.mp4",
|
params={"timezone": "America/New_York", "cameras": "all"},
|
||||||
camera="front_door",
|
)
|
||||||
start_time=nov_3_noon,
|
|
||||||
end_time=nov_3_noon + 3600,
|
assert response.status_code == 200
|
||||||
duration=3600,
|
summary = response.json()
|
||||||
motion=150,
|
|
||||||
objects=8,
|
# Verify we get exactly 3 days
|
||||||
).execute()
|
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
|
||||||
|
|
||||||
Recordings.insert(
|
# Verify the correct dates are returned (API returns dict with True values)
|
||||||
id="recording_nov_4",
|
assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
|
||||||
path="/media/recordings/nov_4.mp4",
|
assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
|
||||||
camera="front_door",
|
assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
|
||||||
start_time=nov_4_noon,
|
assert summary["2024-03-09"] is True
|
||||||
end_time=nov_4_noon + 3600,
|
assert summary["2024-03-10"] is True
|
||||||
duration=3600,
|
assert summary["2024-03-11"] is True
|
||||||
motion=200,
|
|
||||||
objects=10,
|
def test_recordings_summary_across_dst_fall_back(self):
|
||||||
).execute()
|
"""
|
||||||
|
Test recordings summary across fall DST transition (fall back).
|
||||||
# Test recordings summary with America/New_York timezone
|
|
||||||
response = client.get(
|
In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
|
||||||
"/recordings/summary",
|
Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
|
||||||
params={"timezone": "America/New_York", "cameras": "all"},
|
"""
|
||||||
)
|
tz = pytz.timezone("America/New_York")
|
||||||
|
|
||||||
assert response.status_code == 200
|
# November 2, 2024 at 12:00 PM EDT (before DST transition)
|
||||||
summary = response.json()
|
nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
|
||||||
|
|
||||||
# Verify we get exactly 3 days
|
# November 3, 2024 at 12:00 PM EST (after DST transition)
|
||||||
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
|
# Need to specify is_dst=False to get the time after fall back
|
||||||
|
nov_3_noon = tz.localize(
|
||||||
# Verify the correct dates are returned (API returns dict with True values)
|
datetime(2024, 11, 3, 12, 0, 0), is_dst=False
|
||||||
assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
|
).timestamp()
|
||||||
assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
|
|
||||||
assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
|
# November 4, 2024 at 12:00 PM EST (after DST)
|
||||||
assert summary["2024-11-02"] is True
|
nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
|
||||||
assert summary["2024-11-03"] is True
|
|
||||||
assert summary["2024-11-04"] is True
|
with AuthTestClient(self.app) as client:
|
||||||
|
# Insert recordings for each day
|
||||||
def test_recordings_summary_multiple_cameras_across_dst(self):
|
Recordings.insert(
|
||||||
"""
|
id="recording_nov_2",
|
||||||
Test recordings summary with multiple cameras across DST boundary.
|
path="/media/recordings/nov_2.mp4",
|
||||||
"""
|
camera="front_door",
|
||||||
tz = pytz.timezone("America/New_York")
|
start_time=nov_2_noon,
|
||||||
|
end_time=nov_2_noon + 3600,
|
||||||
# March 9, 2024 at 10:00 AM EST (before DST)
|
duration=3600,
|
||||||
march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
|
motion=100,
|
||||||
|
objects=5,
|
||||||
# March 10, 2024 at 3:00 PM EDT (after DST transition)
|
).execute()
|
||||||
march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
|
|
||||||
|
Recordings.insert(
|
||||||
with AuthTestClient(self.app) as client:
|
id="recording_nov_3",
|
||||||
# Override allowed cameras for this test to include both
|
path="/media/recordings/nov_3.mp4",
|
||||||
async def mock_get_allowed_cameras_for_filter(_request: Request):
|
camera="front_door",
|
||||||
return ["front_door", "back_door"]
|
start_time=nov_3_noon,
|
||||||
|
end_time=nov_3_noon + 3600,
|
||||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
duration=3600,
|
||||||
mock_get_allowed_cameras_for_filter
|
motion=150,
|
||||||
)
|
objects=8,
|
||||||
|
).execute()
|
||||||
# Insert recordings for front_door on March 9
|
|
||||||
Recordings.insert(
|
Recordings.insert(
|
||||||
id="front_march_9",
|
id="recording_nov_4",
|
||||||
path="/media/recordings/front_march_9.mp4",
|
path="/media/recordings/nov_4.mp4",
|
||||||
camera="front_door",
|
camera="front_door",
|
||||||
start_time=march_9_morning,
|
start_time=nov_4_noon,
|
||||||
end_time=march_9_morning + 3600,
|
end_time=nov_4_noon + 3600,
|
||||||
duration=3600,
|
duration=3600,
|
||||||
motion=100,
|
motion=200,
|
||||||
objects=5,
|
objects=10,
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
# Insert recordings for back_door on March 10
|
# Test recordings summary with America/New_York timezone
|
||||||
Recordings.insert(
|
response = client.get(
|
||||||
id="back_march_10",
|
"/recordings/summary",
|
||||||
path="/media/recordings/back_march_10.mp4",
|
params={"timezone": "America/New_York", "cameras": "all"},
|
||||||
camera="back_door",
|
)
|
||||||
start_time=march_10_afternoon,
|
|
||||||
end_time=march_10_afternoon + 3600,
|
assert response.status_code == 200
|
||||||
duration=3600,
|
summary = response.json()
|
||||||
motion=150,
|
|
||||||
objects=8,
|
# Verify we get exactly 3 days
|
||||||
).execute()
|
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
|
||||||
|
|
||||||
# Test with all cameras
|
# Verify the correct dates are returned (API returns dict with True values)
|
||||||
response = client.get(
|
assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
|
||||||
"/recordings/summary",
|
assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
|
||||||
params={"timezone": "America/New_York", "cameras": "all"},
|
assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
|
||||||
)
|
assert summary["2024-11-02"] is True
|
||||||
|
assert summary["2024-11-03"] is True
|
||||||
assert response.status_code == 200
|
assert summary["2024-11-04"] is True
|
||||||
summary = response.json()
|
|
||||||
|
def test_recordings_summary_multiple_cameras_across_dst(self):
|
||||||
# Verify we get both days
|
"""
|
||||||
assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
|
Test recordings summary with multiple cameras across DST boundary.
|
||||||
assert "2024-03-09" in summary
|
"""
|
||||||
assert "2024-03-10" in summary
|
tz = pytz.timezone("America/New_York")
|
||||||
assert summary["2024-03-09"] is True
|
|
||||||
assert summary["2024-03-10"] is True
|
# March 9, 2024 at 10:00 AM EST (before DST)
|
||||||
|
march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
|
||||||
# Reset dependency override back to default single camera for other tests
|
|
||||||
async def reset_allowed_cameras(_request: Request):
|
# March 10, 2024 at 3:00 PM EDT (after DST transition)
|
||||||
return ["front_door"]
|
march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
|
||||||
|
|
||||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
with AuthTestClient(self.app) as client:
|
||||||
reset_allowed_cameras
|
# Override allowed cameras for this test to include both
|
||||||
)
|
async def mock_get_allowed_cameras_for_filter(_request: Request):
|
||||||
|
return ["front_door", "back_door"]
|
||||||
def test_recordings_summary_at_dst_transition_time(self):
|
|
||||||
"""
|
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||||
Test recordings that span the exact DST transition time.
|
mock_get_allowed_cameras_for_filter
|
||||||
"""
|
)
|
||||||
tz = pytz.timezone("America/New_York")
|
|
||||||
|
# Insert recordings for front_door on March 9
|
||||||
# March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
|
Recordings.insert(
|
||||||
# At 2:00 AM, clocks jump to 3:00 AM
|
id="front_march_9",
|
||||||
before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
|
path="/media/recordings/front_march_9.mp4",
|
||||||
|
camera="front_door",
|
||||||
# Recording that spans the transition (1:00 AM to 3:30 AM EDT)
|
start_time=march_9_morning,
|
||||||
# This is 1.5 hours of actual time but spans the "missing" hour
|
end_time=march_9_morning + 3600,
|
||||||
after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
|
duration=3600,
|
||||||
|
motion=100,
|
||||||
with AuthTestClient(self.app) as client:
|
objects=5,
|
||||||
Recordings.insert(
|
).execute()
|
||||||
id="recording_during_transition",
|
|
||||||
path="/media/recordings/transition.mp4",
|
# Insert recordings for back_door on March 10
|
||||||
camera="front_door",
|
Recordings.insert(
|
||||||
start_time=before_transition,
|
id="back_march_10",
|
||||||
end_time=after_transition,
|
path="/media/recordings/back_march_10.mp4",
|
||||||
duration=after_transition - before_transition,
|
camera="back_door",
|
||||||
motion=100,
|
start_time=march_10_afternoon,
|
||||||
objects=5,
|
end_time=march_10_afternoon + 3600,
|
||||||
).execute()
|
duration=3600,
|
||||||
|
motion=150,
|
||||||
response = client.get(
|
objects=8,
|
||||||
"/recordings/summary",
|
).execute()
|
||||||
params={"timezone": "America/New_York", "cameras": "all"},
|
|
||||||
)
|
# Test with all cameras
|
||||||
|
response = client.get(
|
||||||
assert response.status_code == 200
|
"/recordings/summary",
|
||||||
summary = response.json()
|
params={"timezone": "America/New_York", "cameras": "all"},
|
||||||
|
)
|
||||||
# The recording should appear on March 10
|
|
||||||
assert len(summary) == 1
|
assert response.status_code == 200
|
||||||
assert "2024-03-10" in summary
|
summary = response.json()
|
||||||
assert summary["2024-03-10"] is True
|
|
||||||
|
# Verify we get both days
|
||||||
def test_recordings_summary_utc_timezone(self):
|
assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
|
||||||
"""
|
assert "2024-03-09" in summary
|
||||||
Test recordings summary with UTC timezone (no DST).
|
assert "2024-03-10" in summary
|
||||||
"""
|
assert summary["2024-03-09"] is True
|
||||||
# Use UTC timestamps directly
|
assert summary["2024-03-10"] is True
|
||||||
march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
|
|
||||||
march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
|
# Reset dependency override back to default single camera for other tests
|
||||||
|
async def reset_allowed_cameras(_request: Request):
|
||||||
with AuthTestClient(self.app) as client:
|
return ["front_door"]
|
||||||
Recordings.insert(
|
|
||||||
id="recording_march_9_utc",
|
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||||
path="/media/recordings/march_9_utc.mp4",
|
reset_allowed_cameras
|
||||||
camera="front_door",
|
)
|
||||||
start_time=march_9_utc,
|
|
||||||
end_time=march_9_utc + 3600,
|
def test_recordings_summary_at_dst_transition_time(self):
|
||||||
duration=3600,
|
"""
|
||||||
motion=100,
|
Test recordings that span the exact DST transition time.
|
||||||
objects=5,
|
"""
|
||||||
).execute()
|
tz = pytz.timezone("America/New_York")
|
||||||
|
|
||||||
Recordings.insert(
|
# March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
|
||||||
id="recording_march_10_utc",
|
# At 2:00 AM, clocks jump to 3:00 AM
|
||||||
path="/media/recordings/march_10_utc.mp4",
|
before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
|
||||||
camera="front_door",
|
|
||||||
start_time=march_10_utc,
|
# Recording that spans the transition (1:00 AM to 3:30 AM EDT)
|
||||||
end_time=march_10_utc + 3600,
|
# This is 1.5 hours of actual time but spans the "missing" hour
|
||||||
duration=3600,
|
after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
|
||||||
motion=150,
|
|
||||||
objects=8,
|
with AuthTestClient(self.app) as client:
|
||||||
).execute()
|
Recordings.insert(
|
||||||
|
id="recording_during_transition",
|
||||||
# Test with UTC timezone
|
path="/media/recordings/transition.mp4",
|
||||||
response = client.get(
|
camera="front_door",
|
||||||
"/recordings/summary", params={"timezone": "utc", "cameras": "all"}
|
start_time=before_transition,
|
||||||
)
|
end_time=after_transition,
|
||||||
|
duration=after_transition - before_transition,
|
||||||
assert response.status_code == 200
|
motion=100,
|
||||||
summary = response.json()
|
objects=5,
|
||||||
|
).execute()
|
||||||
# Verify we get both days
|
|
||||||
assert len(summary) == 2
|
response = client.get(
|
||||||
assert "2024-03-09" in summary
|
"/recordings/summary",
|
||||||
assert "2024-03-10" in summary
|
params={"timezone": "America/New_York", "cameras": "all"},
|
||||||
assert summary["2024-03-09"] is True
|
)
|
||||||
assert summary["2024-03-10"] is True
|
|
||||||
|
assert response.status_code == 200
|
||||||
def test_recordings_summary_no_recordings(self):
|
summary = response.json()
|
||||||
"""
|
|
||||||
Test recordings summary when no recordings exist.
|
# The recording should appear on March 10
|
||||||
"""
|
assert len(summary) == 1
|
||||||
with AuthTestClient(self.app) as client:
|
assert "2024-03-10" in summary
|
||||||
response = client.get(
|
assert summary["2024-03-10"] is True
|
||||||
"/recordings/summary",
|
|
||||||
params={"timezone": "America/New_York", "cameras": "all"},
|
def test_recordings_summary_utc_timezone(self):
|
||||||
)
|
"""
|
||||||
|
Test recordings summary with UTC timezone (no DST).
|
||||||
assert response.status_code == 200
|
"""
|
||||||
summary = response.json()
|
# Use UTC timestamps directly
|
||||||
assert len(summary) == 0
|
march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
|
||||||
|
march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
|
||||||
def test_recordings_summary_single_camera_filter(self):
|
|
||||||
"""
|
with AuthTestClient(self.app) as client:
|
||||||
Test recordings summary filtered to a single camera.
|
Recordings.insert(
|
||||||
"""
|
id="recording_march_9_utc",
|
||||||
tz = pytz.timezone("America/New_York")
|
path="/media/recordings/march_9_utc.mp4",
|
||||||
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
|
camera="front_door",
|
||||||
|
start_time=march_9_utc,
|
||||||
with AuthTestClient(self.app) as client:
|
end_time=march_9_utc + 3600,
|
||||||
# Insert recordings for both cameras
|
duration=3600,
|
||||||
Recordings.insert(
|
motion=100,
|
||||||
id="front_recording",
|
objects=5,
|
||||||
path="/media/recordings/front.mp4",
|
).execute()
|
||||||
camera="front_door",
|
|
||||||
start_time=march_10_noon,
|
Recordings.insert(
|
||||||
end_time=march_10_noon + 3600,
|
id="recording_march_10_utc",
|
||||||
duration=3600,
|
path="/media/recordings/march_10_utc.mp4",
|
||||||
motion=100,
|
camera="front_door",
|
||||||
objects=5,
|
start_time=march_10_utc,
|
||||||
).execute()
|
end_time=march_10_utc + 3600,
|
||||||
|
duration=3600,
|
||||||
Recordings.insert(
|
motion=150,
|
||||||
id="back_recording",
|
objects=8,
|
||||||
path="/media/recordings/back.mp4",
|
).execute()
|
||||||
camera="back_door",
|
|
||||||
start_time=march_10_noon,
|
# Test with UTC timezone
|
||||||
end_time=march_10_noon + 3600,
|
response = client.get(
|
||||||
duration=3600,
|
"/recordings/summary", params={"timezone": "utc", "cameras": "all"}
|
||||||
motion=150,
|
)
|
||||||
objects=8,
|
|
||||||
).execute()
|
assert response.status_code == 200
|
||||||
|
summary = response.json()
|
||||||
# Test with only front_door camera
|
|
||||||
response = client.get(
|
# Verify we get both days
|
||||||
"/recordings/summary",
|
assert len(summary) == 2
|
||||||
params={"timezone": "America/New_York", "cameras": "front_door"},
|
assert "2024-03-09" in summary
|
||||||
)
|
assert "2024-03-10" in summary
|
||||||
|
assert summary["2024-03-09"] is True
|
||||||
assert response.status_code == 200
|
assert summary["2024-03-10"] is True
|
||||||
summary = response.json()
|
|
||||||
assert len(summary) == 1
|
def test_recordings_summary_no_recordings(self):
|
||||||
assert "2024-03-10" in summary
|
"""
|
||||||
assert summary["2024-03-10"] is True
|
Test recordings summary when no recordings exist.
|
||||||
|
"""
|
||||||
|
with AuthTestClient(self.app) as client:
|
||||||
|
response = client.get(
|
||||||
|
"/recordings/summary",
|
||||||
|
params={"timezone": "America/New_York", "cameras": "all"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
summary = response.json()
|
||||||
|
assert len(summary) == 0
|
||||||
|
|
||||||
|
def test_recordings_summary_single_camera_filter(self):
|
||||||
|
"""
|
||||||
|
Test recordings summary filtered to a single camera.
|
||||||
|
"""
|
||||||
|
tz = pytz.timezone("America/New_York")
|
||||||
|
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
|
||||||
|
|
||||||
|
with AuthTestClient(self.app) as client:
|
||||||
|
# Insert recordings for both cameras
|
||||||
|
Recordings.insert(
|
||||||
|
id="front_recording",
|
||||||
|
path="/media/recordings/front.mp4",
|
||||||
|
camera="front_door",
|
||||||
|
start_time=march_10_noon,
|
||||||
|
end_time=march_10_noon + 3600,
|
||||||
|
duration=3600,
|
||||||
|
motion=100,
|
||||||
|
objects=5,
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
Recordings.insert(
|
||||||
|
id="back_recording",
|
||||||
|
path="/media/recordings/back.mp4",
|
||||||
|
camera="back_door",
|
||||||
|
start_time=march_10_noon,
|
||||||
|
end_time=march_10_noon + 3600,
|
||||||
|
duration=3600,
|
||||||
|
motion=150,
|
||||||
|
objects=8,
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
# Test with only front_door camera
|
||||||
|
response = client.get(
|
||||||
|
"/recordings/summary",
|
||||||
|
params={"timezone": "America/New_York", "cameras": "front_door"},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
summary = response.json()
|
||||||
|
assert len(summary) == 1
|
||||||
|
assert "2024-03-10" in summary
|
||||||
|
assert summary["2024-03-10"] is True
|
||||||
|
|||||||
@ -1,66 +1,78 @@
|
|||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
# Mock complex imports before importing maintainer
|
# Mock complex imports before importing maintainer
|
||||||
sys.modules["frigate.comms.inter_process"] = MagicMock()
|
sys.modules["frigate.comms.inter_process"] = MagicMock()
|
||||||
sys.modules["frigate.comms.detections_updater"] = MagicMock()
|
sys.modules["frigate.comms.detections_updater"] = MagicMock()
|
||||||
sys.modules["frigate.comms.recordings_updater"] = MagicMock()
|
sys.modules["frigate.comms.recordings_updater"] = MagicMock()
|
||||||
sys.modules["frigate.config.camera.updater"] = MagicMock()
|
sys.modules["frigate.config.camera.updater"] = MagicMock()
|
||||||
|
|
||||||
# Now import the class under test
|
# Now import the class under test
|
||||||
from frigate.config import FrigateConfig # noqa: E402
|
from frigate.config import FrigateConfig # noqa: E402
|
||||||
from frigate.record.maintainer import RecordingMaintainer # noqa: E402
|
from frigate.record.maintainer import RecordingMaintainer # noqa: E402
|
||||||
|
|
||||||
|
|
||||||
class TestMaintainer(unittest.IsolatedAsyncioTestCase):
|
class TestMaintainer(unittest.IsolatedAsyncioTestCase):
|
||||||
async def test_move_files_survives_bad_filename(self):
|
async def test_parse_cache_segment_supports_variant(self):
|
||||||
config = MagicMock(spec=FrigateConfig)
|
config = MagicMock(spec=FrigateConfig)
|
||||||
config.cameras = {}
|
config.cameras = {}
|
||||||
stop_event = MagicMock()
|
stop_event = MagicMock()
|
||||||
|
|
||||||
maintainer = RecordingMaintainer(config, stop_event)
|
maintainer = RecordingMaintainer(config, stop_event)
|
||||||
|
parsed = maintainer._parse_cache_segment("front@sub@20210101000000+0000.mp4")
|
||||||
# We need to mock end_time_cache to avoid key errors if logic proceeds
|
|
||||||
maintainer.end_time_cache = {}
|
self.assertIsNotNone(parsed)
|
||||||
|
self.assertEqual("front", parsed["camera"])
|
||||||
# Mock filesystem
|
self.assertEqual("sub", parsed["variant"])
|
||||||
# One bad file, one good file
|
|
||||||
files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
|
async def test_move_files_survives_bad_filename(self):
|
||||||
|
config = MagicMock(spec=FrigateConfig)
|
||||||
with patch("os.listdir", return_value=files):
|
config.cameras = {}
|
||||||
with patch("os.path.isfile", return_value=True):
|
stop_event = MagicMock()
|
||||||
with patch(
|
|
||||||
"frigate.record.maintainer.psutil.process_iter", return_value=[]
|
maintainer = RecordingMaintainer(config, stop_event)
|
||||||
):
|
|
||||||
with patch("frigate.record.maintainer.logger.warning") as warn:
|
# We need to mock end_time_cache to avoid key errors if logic proceeds
|
||||||
# Mock validate_and_move_segment to avoid further logic
|
maintainer.end_time_cache = {}
|
||||||
maintainer.validate_and_move_segment = MagicMock()
|
|
||||||
|
# Mock filesystem
|
||||||
try:
|
# One bad file, one good file
|
||||||
await maintainer.move_files()
|
files = ["bad_filename.mp4", "camera@20210101000000+0000.mp4"]
|
||||||
except ValueError as e:
|
|
||||||
if "not enough values to unpack" in str(e):
|
with patch("os.listdir", return_value=files):
|
||||||
self.fail("move_files() crashed on bad filename!")
|
with patch("os.path.isfile", return_value=True):
|
||||||
raise e
|
with patch(
|
||||||
except Exception:
|
"frigate.record.maintainer.psutil.process_iter", return_value=[]
|
||||||
# Ignore other errors (like DB connection) as we only care about the unpack crash
|
):
|
||||||
pass
|
with patch("frigate.record.maintainer.logger.warning") as warn:
|
||||||
|
# Mock validate_and_move_segment to avoid further logic
|
||||||
# The bad filename is encountered in multiple loops, but should only warn once.
|
maintainer.validate_and_move_segment = MagicMock()
|
||||||
matching = [
|
|
||||||
c
|
try:
|
||||||
for c in warn.call_args_list
|
await maintainer.move_files()
|
||||||
if c.args
|
except ValueError as e:
|
||||||
and isinstance(c.args[0], str)
|
if "not enough values to unpack" in str(e):
|
||||||
and "Skipping unexpected files in cache" in c.args[0]
|
self.fail("move_files() crashed on bad filename!")
|
||||||
]
|
raise e
|
||||||
self.assertEqual(
|
except Exception:
|
||||||
1,
|
# Ignore other errors (like DB connection) as we only care about the unpack crash
|
||||||
len(matching),
|
pass
|
||||||
f"Expected a single warning for unexpected files, got {len(matching)}",
|
|
||||||
)
|
# The bad filename is encountered in multiple loops, but should only warn once.
|
||||||
|
matching = [
|
||||||
|
c
|
||||||
if __name__ == "__main__":
|
for c in warn.call_args_list
|
||||||
unittest.main()
|
if c.args
|
||||||
|
and isinstance(c.args[0], str)
|
||||||
|
and "Skipping unexpected files in cache" in c.args[0]
|
||||||
|
]
|
||||||
|
self.assertEqual(
|
||||||
|
1,
|
||||||
|
len(matching),
|
||||||
|
f"Expected a single warning for unexpected files, got {len(matching)}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
38
migrations/036_add_recording_variants.py
Normal file
38
migrations/036_add_recording_variants.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
"""Peewee migrations -- 036_add_recording_variants.py."""
|
||||||
|
|
||||||
|
import peewee as pw
|
||||||
|
|
||||||
|
from frigate.models import Recordings
|
||||||
|
|
||||||
|
SQL = pw.SQL
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(migrator, database, fake=False, **kwargs):
|
||||||
|
existing_columns = {
|
||||||
|
row[1] for row in database.execute_sql('PRAGMA table_info("recordings")').fetchall()
|
||||||
|
}
|
||||||
|
|
||||||
|
fields_to_add = {}
|
||||||
|
if "variant" not in existing_columns:
|
||||||
|
fields_to_add["variant"] = pw.CharField(default="main", max_length=20)
|
||||||
|
if "codec_name" not in existing_columns:
|
||||||
|
fields_to_add["codec_name"] = pw.CharField(null=True, max_length=32)
|
||||||
|
if "width" not in existing_columns:
|
||||||
|
fields_to_add["width"] = pw.IntegerField(null=True)
|
||||||
|
if "height" not in existing_columns:
|
||||||
|
fields_to_add["height"] = pw.IntegerField(null=True)
|
||||||
|
if "bitrate" not in existing_columns:
|
||||||
|
fields_to_add["bitrate"] = pw.IntegerField(null=True)
|
||||||
|
|
||||||
|
if fields_to_add:
|
||||||
|
migrator.add_fields(Recordings, **fields_to_add)
|
||||||
|
|
||||||
|
migrator.sql(
|
||||||
|
'CREATE INDEX IF NOT EXISTS "recordings_camera_variant_start_time_end_time" ON "recordings" ("camera", "variant", "start_time" DESC, "end_time" DESC)'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def rollback(migrator, database, fake=False, **kwargs):
|
||||||
|
migrator.remove_fields(
|
||||||
|
Recordings, ["variant", "codec_name", "width", "height", "bitrate"]
|
||||||
|
)
|
||||||
83
scripts/README.md
Normal file
83
scripts/README.md
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# Scripts
|
||||||
|
|
||||||
|
## Transcode benchmarks
|
||||||
|
|
||||||
|
Proof-of-concept benchmarks for **real-time VOD transcoding**: transcode a video file with FFmpeg (optionally with hardware acceleration) and measure time and throughput. Used to de-risk the real-time VOD transcoding feature (segment-level transcode + cache): we need ~10s segments to transcode in well under 10s (ideally <2s) so timeline scrubbing stays responsive.
|
||||||
|
|
||||||
|
### Python (recommended)
|
||||||
|
|
||||||
|
From the repo root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Full file, CPU
|
||||||
|
python scripts/transcode_benchmark.py path/to/recording.mp4
|
||||||
|
|
||||||
|
# First 10 seconds only (simulates one HLS segment)
|
||||||
|
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10
|
||||||
|
|
||||||
|
# 10s segment with NVIDIA HW accel
|
||||||
|
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel nvidia
|
||||||
|
|
||||||
|
# Simulate scrubbing: start 60s in, transcode 10s (VAAPI)
|
||||||
|
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --seek 60 --hwaccel vaapi
|
||||||
|
|
||||||
|
# Intel QSV H.265 (preset-intel-qsv-h265)
|
||||||
|
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel qsv-h265
|
||||||
|
|
||||||
|
# Custom FFmpeg binary (e.g. Frigate container)
|
||||||
|
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --ffmpeg /usr/lib/ffmpeg/7/bin/ffmpeg
|
||||||
|
```
|
||||||
|
|
||||||
|
Options:
|
||||||
|
|
||||||
|
- `--duration SEC` – Transcode only this many seconds (default: full file). Use 10 to simulate one HLS segment.
|
||||||
|
- `--seek SEC` – Start at this position (fast seek before `-i`). Simulates scrubbing into the file.
|
||||||
|
- `--hwaccel cpu|nvidia|vaapi|qsv-h265` – Matches Frigate presets: libx264, h264_nvenc, h264_vaapi, preset-intel-qsv-h265 (hevc_qsv).
|
||||||
|
- `--vaapi-device` – VAAPI device (default: `/dev/dri/renderD128`).
|
||||||
|
- `--qsv-device` – Intel QSV device: on Linux defaults to `/dev/dri/renderD129` if present (else `renderD128`, else `0`). With two GPUs, the second node is often the Intel iGPU. Override if you get “No VA display found” (e.g. try the other node).
|
||||||
|
- `--output PATH` – Write output here (default: temp file, deleted).
|
||||||
|
- `--keep-output` – Keep the temp output file.
|
||||||
|
|
||||||
|
Output: real time, speed (× realtime), output size. The script suggests whether the speed is good for ~10s segment transcode.
|
||||||
|
|
||||||
|
### Shell
|
||||||
|
|
||||||
|
Quick one-liners without Python:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
chmod +x scripts/transcode_benchmark.sh
|
||||||
|
|
||||||
|
./scripts/transcode_benchmark.sh path/to/recording.mp4
|
||||||
|
./scripts/transcode_benchmark.sh path/to/recording.mp4 10
|
||||||
|
./scripts/transcode_benchmark.sh path/to/recording.mp4 10 nvidia
|
||||||
|
```
|
||||||
|
|
||||||
|
Arguments: `INPUT [DURATION_SEC] [cpu|nvidia|vaapi|qsv-h265]`. Optional env: `FFMPEG`, `FFPROBE`, `VAAPI_DEVICE`, `QSV_DEVICE`.
|
||||||
|
|
||||||
|
### Interpreting results
|
||||||
|
|
||||||
|
- **Speed ≥ 5× realtime** – A 10s segment transcodes in ~2s or less; good for on-demand segment transcode with cache.
|
||||||
|
- **Speed 1–5×** – Marginal; segment may take several seconds; transcode-ahead or caching helps.
|
||||||
|
- **Speed < 1×** – Too slow for real-time; consider stronger HW or lower resolution/bitrate.
|
||||||
|
|
||||||
|
Run with a real Frigate recording (or any H.264/HEVC MP4) and try both `--duration 10` and full file to see segment vs full transcode cost.
|
||||||
|
|
||||||
|
### Troubleshooting `qsv-h265` (“No VA display found”)
|
||||||
|
|
||||||
|
Intel QSV (`qsv-h265`) only works on **Intel GPUs** with a working **Intel VA-API** stack. If both `/dev/dri/renderD128` and `renderD129` fail with “No VA display found” or “Device creation failed: -22”, then:
|
||||||
|
|
||||||
|
1. **Check which GPUs you have** – With two cards, both may be non-Intel (e.g. NVIDIA + AMD). QSV is Intel-only. Use `lspci -k | grep -A3 VGA` to see adapters and drivers.
|
||||||
|
2. **Check VA-API** – Run `vainfo` or `vainfo --display drm --device /dev/dri/renderD128` (then `renderD129`). If it errors or shows no Intel driver, QSV won’t work. On Intel you typically need `intel-media-driver` (newer) or `intel-vaapi-driver` (i965, older).
|
||||||
|
3. **Permissions** – Ensure your user is in the `render` (and often `video`) group: `groups`; add with `sudo usermod -aG render $USER` and log in again.
|
||||||
|
4. **Use another HW accel** – If you have an **AMD** GPU, use `vaapi` (H.264). If you have **NVIDIA**, use `nvidia`. Otherwise use `cpu`.
|
||||||
|
|
||||||
|
5. **Frigate Docker uses QSV but host benchmark fails** – The container has the Intel VA/QSV stack and device access; the host may not. Run the benchmark **inside the same environment** (e.g. inside the Frigate container):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy script and a sample recording into the container (adjust container name)
|
||||||
|
docker cp scripts/transcode_benchmark.sh frigate:/tmp/
|
||||||
|
docker cp /path/to/59.24.mp4 frigate:/tmp/
|
||||||
|
docker exec -it frigate bash -c 'chmod +x /tmp/transcode_benchmark.sh && /tmp/transcode_benchmark.sh /tmp/59.24.mp4 10 qsv-h265'
|
||||||
|
```
|
||||||
|
|
||||||
|
The script auto-detects FFmpeg under `/usr/lib/ffmpeg/*/bin` when `ffmpeg` isn’t on PATH (Frigate container). If it doesn’t, set `FFMPEG` and `FFPROBE` explicitly, e.g. `docker exec ... env FFMPEG=/usr/lib/ffmpeg/7.0/bin/ffmpeg FFPROBE=/usr/lib/ffmpeg/7.0/bin/ffprobe /tmp/transcode_benchmark.sh ...`.
|
||||||
289
scripts/transcode_benchmark.py
Normal file
289
scripts/transcode_benchmark.py
Normal file
@ -0,0 +1,289 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Proof-of-concept benchmark: transcode a video file with FFmpeg (optionally with
|
||||||
|
hardware acceleration) and report timing and throughput.
|
||||||
|
|
||||||
|
Used to de-risk real-time VOD transcoding: we need ~10s segments to transcode
|
||||||
|
in well under 10s (ideally <2s) so scrubbing stays responsive.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python scripts/transcode_benchmark.py path/to/video.mp4
|
||||||
|
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --hwaccel nvidia
|
||||||
|
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --seek 60 --hwaccel vaapi
|
||||||
|
|
||||||
|
Output: real time, speed (x realtime), output size. Aligns with Frigate export/timelapse
|
||||||
|
HW presets (preset-nvidia, preset-vaapi, libx264 default).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
def get_ffmpeg_command(
|
||||||
|
ffmpeg_path: str,
|
||||||
|
input_path: str,
|
||||||
|
output_path: str,
|
||||||
|
*,
|
||||||
|
duration_sec: Optional[float] = None,
|
||||||
|
seek_sec: float = 0,
|
||||||
|
hwaccel: str = "cpu",
|
||||||
|
gpu_device: str = "/dev/dri/renderD128",
|
||||||
|
qsv_device: str = "0",
|
||||||
|
) -> list[str]:
|
||||||
|
"""Build argv for FFmpeg transcode (H.264 or HEVC, no audio). Matches Frigate timelapse-style encode."""
|
||||||
|
cmd = [ffmpeg_path, "-hide_banner", "-y", "-loglevel", "warning", "-stats"]
|
||||||
|
|
||||||
|
# Optional seek: -ss before -i for fast seek (keyframe then decode)
|
||||||
|
if seek_sec > 0:
|
||||||
|
cmd.extend(["-ss", str(seek_sec)])
|
||||||
|
|
||||||
|
if hwaccel == "nvidia":
|
||||||
|
cmd.extend(
|
||||||
|
[
|
||||||
|
"-hwaccel",
|
||||||
|
"cuda",
|
||||||
|
"-hwaccel_output_format",
|
||||||
|
"cuda",
|
||||||
|
"-extra_hw_frames",
|
||||||
|
"8",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
elif hwaccel == "vaapi":
|
||||||
|
cmd.extend(
|
||||||
|
[
|
||||||
|
"-hwaccel",
|
||||||
|
"vaapi",
|
||||||
|
"-hwaccel_device",
|
||||||
|
gpu_device,
|
||||||
|
"-hwaccel_output_format",
|
||||||
|
"vaapi",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
elif hwaccel == "qsv-h265":
|
||||||
|
# preset-intel-qsv-h265: load_plugin for HEVC decode, QSV device for decode+encode
|
||||||
|
cmd.extend(
|
||||||
|
[
|
||||||
|
"-load_plugin",
|
||||||
|
"hevc_hw",
|
||||||
|
"-hwaccel",
|
||||||
|
"qsv",
|
||||||
|
"-qsv_device",
|
||||||
|
qsv_device,
|
||||||
|
"-hwaccel_output_format",
|
||||||
|
"qsv",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
cmd.extend(["-i", input_path])
|
||||||
|
|
||||||
|
if duration_sec is not None and duration_sec > 0:
|
||||||
|
cmd.extend(["-t", str(duration_sec)])
|
||||||
|
|
||||||
|
cmd.extend(["-an"])
|
||||||
|
|
||||||
|
if hwaccel == "nvidia":
|
||||||
|
cmd.extend(["-c:v", "h264_nvenc"])
|
||||||
|
elif hwaccel == "vaapi":
|
||||||
|
# VAAPI encode needs frames in vaapi format; decoder outputs vaapi when hwaccel_output_format vaapi
|
||||||
|
cmd.extend(["-c:v", "h264_vaapi"])
|
||||||
|
elif hwaccel == "qsv-h265":
|
||||||
|
# Use CQP explicitly; profile/level can be unsupported on some QSV runtimes
|
||||||
|
cmd.extend(["-c:v", "hevc_qsv", "-global_quality", "23"])
|
||||||
|
else:
|
||||||
|
cmd.extend(
|
||||||
|
["-c:v", "libx264", "-preset:v", "ultrafast", "-tune:v", "zerolatency"]
|
||||||
|
)
|
||||||
|
|
||||||
|
cmd.extend(["-f", "mp4", "-movflags", "+faststart", output_path])
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
def get_video_duration_sec(ffprobe_path: str, input_path: str) -> Optional[float]:
|
||||||
|
"""Return duration in seconds or None on failure."""
|
||||||
|
try:
|
||||||
|
out = subprocess.run(
|
||||||
|
[
|
||||||
|
ffprobe_path,
|
||||||
|
"-v",
|
||||||
|
"error",
|
||||||
|
"-show_entries",
|
||||||
|
"format=duration",
|
||||||
|
"-of",
|
||||||
|
"default=noprint_wrappers=1:nokey=1",
|
||||||
|
input_path,
|
||||||
|
],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
if out.returncode == 0 and out.stdout.strip():
|
||||||
|
return float(out.stdout.strip())
|
||||||
|
except (subprocess.TimeoutExpired, ValueError, FileNotFoundError):
|
||||||
|
pass
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Benchmark FFmpeg transcode (H.264) with optional HW accel."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"input",
|
||||||
|
type=Path,
|
||||||
|
help="Input video file (e.g. recording segment)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--duration",
|
||||||
|
type=float,
|
||||||
|
default=None,
|
||||||
|
metavar="SEC",
|
||||||
|
help="Transcode only this many seconds (default: full file). Simulates segment length.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--seek",
|
||||||
|
type=float,
|
||||||
|
default=0,
|
||||||
|
metavar="SEC",
|
||||||
|
help="Start at this position (before -i for fast seek). Simulates scrubbing into file.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--hwaccel",
|
||||||
|
choices=("cpu", "nvidia", "vaapi", "qsv-h265"),
|
||||||
|
default="cpu",
|
||||||
|
help="HW accel: cpu (libx264), nvidia (h264_nvenc), vaapi (h264_vaapi), qsv-h265 (preset-intel-qsv-h265, hevc_qsv).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vaapi-device",
|
||||||
|
default="/dev/dri/renderD128",
|
||||||
|
help="VAAPI device (default: /dev/dri/renderD128).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--qsv-device",
|
||||||
|
default=(
|
||||||
|
"/dev/dri/renderD129"
|
||||||
|
if os.path.exists("/dev/dri/renderD129")
|
||||||
|
else "/dev/dri/renderD128"
|
||||||
|
if os.path.exists("/dev/dri/renderD128")
|
||||||
|
else "0"
|
||||||
|
),
|
||||||
|
help="Intel QSV device: path (e.g. /dev/dri/renderD129 or renderD128 on Linux) or 0 (Windows). With two GPUs, try renderD129 if renderD128 fails. Used for --hwaccel qsv-h265.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ffmpeg",
|
||||||
|
default="ffmpeg",
|
||||||
|
metavar="PATH",
|
||||||
|
help="FFmpeg binary (default: ffmpeg in PATH).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ffprobe",
|
||||||
|
default="ffprobe",
|
||||||
|
metavar="PATH",
|
||||||
|
help="FFprobe binary (default: ffprobe in PATH).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output",
|
||||||
|
type=Path,
|
||||||
|
default=None,
|
||||||
|
help="Output file (default: temp file, deleted after).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--keep-output",
|
||||||
|
action="store_true",
|
||||||
|
help="Keep output file when using default temp path.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
input_path = args.input.resolve()
|
||||||
|
if not input_path.is_file():
|
||||||
|
print(f"Error: input file not found: {input_path}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
effective_duration = args.duration
|
||||||
|
if effective_duration is None:
|
||||||
|
duration_from_probe = get_video_duration_sec(str(args.ffprobe), str(input_path))
|
||||||
|
if duration_from_probe is not None:
|
||||||
|
effective_duration = duration_from_probe - args.seek
|
||||||
|
if effective_duration <= 0:
|
||||||
|
print("Error: seek >= file duration", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
print("Warning: could not probe duration; reporting real time only.", file=sys.stderr)
|
||||||
|
|
||||||
|
use_temp = args.output is None
|
||||||
|
if use_temp:
|
||||||
|
fd, out_path = tempfile.mkstemp(suffix=".mp4")
|
||||||
|
os.close(fd)
|
||||||
|
output_path = Path(out_path)
|
||||||
|
else:
|
||||||
|
output_path = args.output.resolve()
|
||||||
|
|
||||||
|
cmd = get_ffmpeg_command(
|
||||||
|
args.ffmpeg,
|
||||||
|
str(input_path),
|
||||||
|
str(output_path),
|
||||||
|
duration_sec=args.duration,
|
||||||
|
seek_sec=args.seek,
|
||||||
|
hwaccel=args.hwaccel,
|
||||||
|
gpu_device=args.vaapi_device,
|
||||||
|
qsv_device=args.qsv_device,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Input: {input_path}")
|
||||||
|
print(f"Output: {output_path}")
|
||||||
|
print(f"HW: {args.hwaccel}")
|
||||||
|
if args.duration is not None:
|
||||||
|
print(f"Limit: {args.duration}s")
|
||||||
|
if args.seek > 0:
|
||||||
|
print(f"Seek: {args.seek}s")
|
||||||
|
print(f"Run: {' '.join(cmd)}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
start = time.perf_counter()
|
||||||
|
try:
|
||||||
|
subprocess.run(cmd, check=True, timeout=3600)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"FFmpeg failed: {e}", file=sys.stderr)
|
||||||
|
if use_temp and output_path.exists():
|
||||||
|
output_path.unlink()
|
||||||
|
return 1
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
print("FFmpeg timed out.", file=sys.stderr)
|
||||||
|
if use_temp and output_path.exists():
|
||||||
|
output_path.unlink()
|
||||||
|
return 1
|
||||||
|
elapsed = time.perf_counter() - start
|
||||||
|
|
||||||
|
size_bytes = output_path.stat().st_size if output_path.exists() else 0
|
||||||
|
|
||||||
|
print("--- Results ---")
|
||||||
|
print(f"Real time: {elapsed:.2f}s")
|
||||||
|
if effective_duration is not None and effective_duration > 0:
|
||||||
|
speed = effective_duration / elapsed
|
||||||
|
print(f"Video duration: {effective_duration:.2f}s")
|
||||||
|
print(f"Speed: {speed:.2f}x realtime")
|
||||||
|
if args.duration and args.duration <= 15:
|
||||||
|
if speed >= 5:
|
||||||
|
print("(Good for ~10s segment transcode: well under 2s.)")
|
||||||
|
elif speed >= 1:
|
||||||
|
print("(Marginal: segment may take several seconds.)")
|
||||||
|
else:
|
||||||
|
print("(Slow: segment transcode would exceed segment length.)")
|
||||||
|
print(f"Output size: {size_bytes / (1024*1024):.2f} MiB")
|
||||||
|
|
||||||
|
if use_temp:
|
||||||
|
if args.keep_output:
|
||||||
|
print(f"(Output kept: {output_path})")
|
||||||
|
else:
|
||||||
|
output_path.unlink(missing_ok=True)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
101
scripts/transcode_benchmark.sh
Normal file
101
scripts/transcode_benchmark.sh
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Proof-of-concept: run FFmpeg transcode and report real time.
|
||||||
|
# Usage:
|
||||||
|
# ./scripts/transcode_benchmark.sh path/to/video.mp4
|
||||||
|
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 # first 10 seconds only
|
||||||
|
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 nvidia
|
||||||
|
#
|
||||||
|
# Optional: DURATION (seconds), HWACCEL (cpu|nvidia|vaapi|qsv-h265). Default: full file, cpu.
|
||||||
|
# Requires: ffmpeg, ffprobe. Output: temp file, then deleted. Reports real time and speed.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
INPUT="${1:?Usage: $0 <input.mp4> [duration_sec] [cpu|nvidia|vaapi|qsv-h265]}"
|
||||||
|
DURATION="${2:-}"
|
||||||
|
HWACCEL="${3:-cpu}"
|
||||||
|
# On Linux, QSV needs a DRM render node. With two GPUs, renderD128 is often non-Intel and renderD129 the Intel iGPU; prefer 129 when both exist so QSV finds VA.
|
||||||
|
if [[ -z "${QSV_DEVICE:-}" ]]; then
|
||||||
|
if [[ -e /dev/dri/renderD129 ]]; then
|
||||||
|
QSV_DEVICE="/dev/dri/renderD129"
|
||||||
|
elif [[ -e /dev/dri/renderD128 ]]; then
|
||||||
|
QSV_DEVICE="/dev/dri/renderD128"
|
||||||
|
else
|
||||||
|
QSV_DEVICE="0"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# Frigate container has ffmpeg under /usr/lib/ffmpeg/<ver>/bin, not on PATH
|
||||||
|
if [[ -z "${FFMPEG:-}" ]]; then
|
||||||
|
if command -v ffmpeg &>/dev/null; then
|
||||||
|
FFMPEG="ffmpeg"
|
||||||
|
elif [[ -d /usr/lib/ffmpeg ]] && FFMPEG_CANDIDATE=$(find /usr/lib/ffmpeg -path '*/bin/ffmpeg' -type f 2>/dev/null | head -1); [[ -n "${FFMPEG_CANDIDATE:-}" ]]; then
|
||||||
|
FFMPEG="$FFMPEG_CANDIDATE"
|
||||||
|
else
|
||||||
|
FFMPEG="ffmpeg"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
FFPROBE="${FFPROBE:-$(dirname "$FFMPEG")/ffprobe}"
|
||||||
|
if [[ ! -x "$FFPROBE" ]]; then
|
||||||
|
FFPROBE="ffprobe"
|
||||||
|
fi
|
||||||
|
OUTPUT=$(mktemp -u).mp4
|
||||||
|
|
||||||
|
cleanup() { rm -f "$OUTPUT"; }
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Build base decode/input args
|
||||||
|
INPUT_ARGS=(-hide_banner -y -loglevel warning -stats -i "$INPUT")
|
||||||
|
if [[ -n "$DURATION" && "$DURATION" =~ ^[0-9]+\.?[0-9]*$ ]]; then
|
||||||
|
INPUT_ARGS+=(-t "$DURATION")
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$HWACCEL" in
|
||||||
|
nvidia)
|
||||||
|
PRE=( -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 )
|
||||||
|
ENC=(-c:v h264_nvenc)
|
||||||
|
;;
|
||||||
|
vaapi)
|
||||||
|
PRE=( -hwaccel vaapi -hwaccel_device "${VAAPI_DEVICE:-/dev/dri/renderD128}" -hwaccel_output_format vaapi )
|
||||||
|
ENC=(-c:v h264_vaapi)
|
||||||
|
;;
|
||||||
|
qsv-h265)
|
||||||
|
PRE=( -load_plugin hevc_hw -hwaccel qsv -qsv_device "$QSV_DEVICE" -hwaccel_output_format qsv )
|
||||||
|
# Use CQP explicitly; -profile:v/-level can be unsupported on some QSV runtimes
|
||||||
|
ENC=(-c:v hevc_qsv -global_quality 23)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
PRE=()
|
||||||
|
ENC=(-c:v libx264 -preset:v ultrafast -tune:v zerolatency)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "Input: $INPUT"
|
||||||
|
echo "Output: $OUTPUT (temp)"
|
||||||
|
echo "HW: $HWACCEL"
|
||||||
|
[[ -n "$DURATION" ]] && echo "Limit: ${DURATION}s"
|
||||||
|
# QSV is Intel-only and needs a working Intel VA-API stack; if you see 'No VA display found', see scripts/README.md troubleshooting.
|
||||||
|
[[ "$HWACCEL" = "qsv-h265" ]] && echo "QSV device: $QSV_DEVICE"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Get duration for speed calculation (if not limiting, use full file length)
|
||||||
|
if [[ -n "$DURATION" ]]; then
|
||||||
|
DUR_SEC="$DURATION"
|
||||||
|
else
|
||||||
|
DUR_SEC=$("${FFPROBE:-ffprobe}" -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "$INPUT" 2>/dev/null || true)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use $SECONDS (bash) so we don't rely on date %N or bc in minimal containers
|
||||||
|
START=$SECONDS
|
||||||
|
"$FFMPEG" "${PRE[@]}" "${INPUT_ARGS[@]}" -an "${ENC[@]}" -f mp4 -movflags +faststart "$OUTPUT"
|
||||||
|
ELAPSED=$((SECONDS - START))
|
||||||
|
[[ "$ELAPSED" -eq 0 ]] && ELAPSED=1
|
||||||
|
|
||||||
|
SIZE=$(stat -c%s "$OUTPUT" 2>/dev/null || stat -f%z "$OUTPUT" 2>/dev/null || echo 0)
|
||||||
|
SIZE_MB=$(awk "BEGIN {printf \"%.2f\", $SIZE/1048576}" 2>/dev/null || echo "$((SIZE / 1048576))")
|
||||||
|
|
||||||
|
echo "--- Results ---"
|
||||||
|
echo "Real time: ${ELAPSED}s"
|
||||||
|
if [[ -n "$DUR_SEC" && "$DUR_SEC" =~ ^[0-9]+\.?[0-9]*$ ]]; then
|
||||||
|
SPEED=$(awk "BEGIN {printf \"%.2f\", $DUR_SEC/$ELAPSED}" 2>/dev/null || echo "?")
|
||||||
|
echo "Duration: ${DUR_SEC}s"
|
||||||
|
echo "Speed: ${SPEED}x realtime"
|
||||||
|
fi
|
||||||
|
echo "Output size: ${SIZE_MB} MiB"
|
||||||
69
transcode_proxy/DEV_WORKFLOW.md
Normal file
69
transcode_proxy/DEV_WORKFLOW.md
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Dev workflow: frigate-dev (single image with transcode proxy)
|
||||||
|
|
||||||
|
Use **frigate-dev** so your working Docker setup keeps using the stable image. You switch between stable and dev by changing the image in compose and restarting. The transcode proxy runs **inside** the Frigate container; there is no separate proxy image.
|
||||||
|
|
||||||
|
## Image names
|
||||||
|
|
||||||
|
- **frigate-dev** – Frigate image built from this repo (includes transcode proxy, config + UI for transcode_proxy).
|
||||||
|
- Your normal setup keeps using **ghcr.io/blakeblackshear/frigate:stable-tensorrt** (or whatever you use today).
|
||||||
|
|
||||||
|
## Start / stop (switch between stable and dev)
|
||||||
|
|
||||||
|
You can’t run both stacks at once (same ports). Use one compose file and swap the image.
|
||||||
|
|
||||||
|
**Stop everything:**
|
||||||
|
```bash
|
||||||
|
cd ~/docker-compose # or wherever your compose file is
|
||||||
|
docker compose down
|
||||||
|
```
|
||||||
|
|
||||||
|
**Run dev stack (Frigate with in-container transcode proxy):**
|
||||||
|
- In `docker-compose.yml`, set the frigate service to `image: frigate-dev` and publish port 5010 if you use transcode_proxy.
|
||||||
|
```bash
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
**Switch back to stable:**
|
||||||
|
- Stop: `docker compose down`
|
||||||
|
- In `docker-compose.yml`, set frigate back to `image: ghcr.io/blakeblackshear/frigate:stable-tensorrt`.
|
||||||
|
```bash
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
**Useful commands:**
|
||||||
|
- `docker compose down` – stop and remove containers.
|
||||||
|
- `docker compose up -d` – start in the background.
|
||||||
|
- `docker compose ps` – see what’s running.
|
||||||
|
- `docker compose logs -f frigate` – follow Frigate logs.
|
||||||
|
|
||||||
|
## Building (Ubuntu server recommended)
|
||||||
|
|
||||||
|
Frigate’s image **is not** “just Python” – it has a **compile phase** (nginx, sqlite-vec, etc.). Building is done with Docker and can take a while.
|
||||||
|
|
||||||
|
**Where to build:** On the **Ubuntu server** where you run Frigate. That way you get the right architecture and avoid Windows/Linux cross-build issues. Sync the repo from your Windows machine via git (clone or push from Windows to a repo and pull on the server, or copy the repo onto the server).
|
||||||
|
|
||||||
|
**On the Ubuntu server:**
|
||||||
|
|
||||||
|
1. Clone (or pull) the Frigate repo with this code.
|
||||||
|
2. **Build Frigate (TensorRT variant, same as stable-tensorrt):**
|
||||||
|
```bash
|
||||||
|
cd /path/to/frigate
|
||||||
|
make version
|
||||||
|
make local-trt
|
||||||
|
docker tag frigate:latest-tensorrt frigate-dev
|
||||||
|
```
|
||||||
|
(`make local-trt` uses buildx; first time may be slow.) The resulting image includes the transcode proxy; no separate proxy image is built.
|
||||||
|
|
||||||
|
**If you prefer to build on Windows:** You can use Docker buildx to build for `linux/amd64` and push to a registry, then pull `frigate-dev` on the Ubuntu server. The Frigate build is heavy and may be slower or more fragile on Windows; building on the server is simpler.
|
||||||
|
|
||||||
|
## One-time setup on the server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone or copy the repo, then:
|
||||||
|
cd /path/to/frigate
|
||||||
|
make version
|
||||||
|
make local-trt
|
||||||
|
docker tag frigate:latest-tensorrt frigate-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Then in your compose use `image: frigate-dev`, publish port 5010 if you use the transcode proxy, and set `transcode_proxy` in Frigate config as in the main README.
|
||||||
55
transcode_proxy/README.md
Normal file
55
transcode_proxy/README.md
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# Frigate VOD Transcode Proxy
|
||||||
|
|
||||||
|
Optional proxy that runs **inside the Frigate container** and rewrites VOD HLS playback to an H.264 transport-stream rendition on the fly. Use it when recordings are HEVC (or high bitrate) and you want compatible or lower-bitrate playback.
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
|
||||||
|
- **Manifest requests** (e.g. `.../master.m3u8` and `.../index-v1.m3u8`): Fetched from upstream and rewritten so the browser sees a proxy-owned H.264 HLS rendition.
|
||||||
|
- **Segment requests**: The rewritten media playlist points to proxy-owned `.transcoded.ts` segment URLs. Those requests fetch the upstream source segment, transcode it to H.264 MPEG-TS with FFmpeg, cache it in memory (LRU, configurable size), then serve it.
|
||||||
|
- **Init fragments**: The rewritten media playlist removes upstream `#EXT-X-MAP` usage, so the browser no longer depends on upstream fragmented MP4 init files for transcoded playback.
|
||||||
|
|
||||||
|
The proxy is an s6-managed service in the same Docker image as Frigate. It binds to port **5010** inside the container and starts after nginx is ready.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Environment variables (optional; defaults work when running in the same container):
|
||||||
|
|
||||||
|
| Variable | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `TRANSCODE_PROXY_UPSTREAM` | `http://127.0.0.1:5000` | Upstream Frigate VOD base URL (nginx internal port when in-container). |
|
||||||
|
| `TRANSCODE_PROXY_PATH_PREFIX` | (empty) | If the proxy is mounted at a path (e.g. `/vod-transcoded`), set this so the proxy strips it when forwarding. |
|
||||||
|
| `TRANSCODE_PROXY_HOST` | `0.0.0.0` | Bind host. |
|
||||||
|
| `TRANSCODE_PROXY_PORT` | `5010` | Bind port. |
|
||||||
|
| `TRANSCODE_PROXY_CACHE_MB` | `500` | Max in-memory cache size (MB). |
|
||||||
|
| `TRANSCODE_PROXY_FFMPEG` | (system) | FFmpeg binary path; uses Frigate’s FFmpeg when not set. |
|
||||||
|
| `TRANSCODE_PROXY_H264_BITRATE` | `128k` | H.264 bitrate for transcoded segments. |
|
||||||
|
| `TRANSCODE_PROXY_MAX_WIDTH` | `640` | Max output width for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
|
||||||
|
| `TRANSCODE_PROXY_MAX_HEIGHT` | `480` | Max output height for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
|
||||||
|
|
||||||
|
## Enabling in Frigate
|
||||||
|
|
||||||
|
1. Build Frigate from this repo (e.g. `frigate-dev`) so the image includes the proxy and config/UI support.
|
||||||
|
2. Expose the proxy either internally through Frigate nginx (recommended, e.g. `/vod-transcoded`) or by publishing port **5010** for direct access.
|
||||||
|
3. In Frigate config (YAML), add:
|
||||||
|
```yaml
|
||||||
|
transcode_proxy:
|
||||||
|
enabled: true
|
||||||
|
vod_proxy_url: "http://YOUR_FRIGATE_HOST:5010" # same host as Frigate, port 5010
|
||||||
|
```
|
||||||
|
4. Restart Frigate. The UI will use the proxy for recording playback when enabled.
|
||||||
|
|
||||||
|
If Frigate is behind a reverse proxy and you expose the transcode service at a path (e.g. `https://frigate.example.com/vod-transcoded`), set `TRANSCODE_PROXY_PATH_PREFIX=/vod-transcoded` in the container environment and use that full URL as `vod_proxy_url`.
|
||||||
|
|
||||||
|
## Running (single container)
|
||||||
|
|
||||||
|
The proxy runs automatically inside the Frigate container. No separate container or image is needed. For same-origin playback, keep the service internal and route it through Frigate nginx on the normal UI origin.
|
||||||
|
|
||||||
|
See **transcode_proxy/DEV_WORKFLOW.md** for building the dev image (e.g. `frigate-dev`) and switching between stable and dev.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
- `GET /vod/.../master.m3u8` – Rewritten HLS master playlist for the transcoded rendition.
|
||||||
|
- `GET /vod/.../index*.m3u8` – Rewritten HLS media playlist that points at proxy-owned transcoded transport-stream segments.
|
||||||
|
- `GET /vod/.../*.transcoded.ts` – Transcoded H.264 MPEG-TS segments.
|
||||||
|
- `GET /cache` – Cache stats (size, entry count).
|
||||||
|
- `GET /health` – Health check.
|
||||||
1
transcode_proxy/__init__.py
Normal file
1
transcode_proxy/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
"""Transcode proxy: sits in front of Frigate VOD and transcodes segments on the fly to H.264."""
|
||||||
5
transcode_proxy/__main__.py
Normal file
5
transcode_proxy/__main__.py
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
"""Run the transcode proxy: python -m transcode_proxy."""
|
||||||
|
from transcode_proxy.main import run
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
47
transcode_proxy/cache.py
Normal file
47
transcode_proxy/cache.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
"""In-memory LRU cache for transcoded segments (byte-size limited)."""
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
from collections import OrderedDict
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ByteLRUCache:
|
||||||
|
"""LRU cache that evicts by total byte size."""
|
||||||
|
|
||||||
|
def __init__(self, max_bytes: int):
|
||||||
|
self._max_bytes = max_bytes
|
||||||
|
self._current_bytes = 0
|
||||||
|
self._order: OrderedDict[str, bytes] = OrderedDict()
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
|
def get(self, key: str) -> Optional[bytes]:
|
||||||
|
with self._lock:
|
||||||
|
data = self._order.pop(key, None)
|
||||||
|
if data is not None:
|
||||||
|
self._order[key] = data # move to end (most recent)
|
||||||
|
return data
|
||||||
|
return None
|
||||||
|
|
||||||
|
def set(self, key: str, value: bytes) -> None:
|
||||||
|
size = len(value)
|
||||||
|
if size > self._max_bytes:
|
||||||
|
logger.warning("Segment larger than cache max (%s bytes), not caching", size)
|
||||||
|
return
|
||||||
|
with self._lock:
|
||||||
|
while self._current_bytes + size > self._max_bytes and self._order:
|
||||||
|
evicted_key = next(iter(self._order))
|
||||||
|
evicted = self._order.pop(evicted_key)
|
||||||
|
self._current_bytes -= len(evicted)
|
||||||
|
logger.debug("Evicted %s from transcode cache", evicted_key)
|
||||||
|
self._order[key] = value
|
||||||
|
self._current_bytes += size
|
||||||
|
|
||||||
|
def size_bytes(self) -> int:
|
||||||
|
with self._lock:
|
||||||
|
return self._current_bytes
|
||||||
|
|
||||||
|
def count(self) -> int:
|
||||||
|
with self._lock:
|
||||||
|
return len(self._order)
|
||||||
44
transcode_proxy/config.py
Normal file
44
transcode_proxy/config.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
"""Configuration from environment."""
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Config:
|
||||||
|
"""Proxy configuration."""
|
||||||
|
|
||||||
|
# Upstream Frigate VOD base URL (e.g. http://nginx:80 or http://127.0.0.1:5001)
|
||||||
|
upstream_base: str = field(
|
||||||
|
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_UPSTREAM", "http://127.0.0.1:80")
|
||||||
|
)
|
||||||
|
# Optional path prefix the proxy is mounted at (e.g. /vod-transcoded); strip when forwarding
|
||||||
|
path_prefix: str = field(
|
||||||
|
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_PATH_PREFIX", "").rstrip("/")
|
||||||
|
)
|
||||||
|
# Host/port to bind
|
||||||
|
host: str = field(default_factory=lambda: os.environ.get("TRANSCODE_PROXY_HOST", "0.0.0.0"))
|
||||||
|
port: int = field(
|
||||||
|
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_PORT", "5010"))
|
||||||
|
)
|
||||||
|
# In-memory cache max size in bytes
|
||||||
|
cache_max_bytes: int = field(
|
||||||
|
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_CACHE_MB", "500")) * 1024 * 1024
|
||||||
|
)
|
||||||
|
# FFmpeg binary
|
||||||
|
ffmpeg_path: str = field(
|
||||||
|
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_FFMPEG", "ffmpeg")
|
||||||
|
)
|
||||||
|
# H.264 bitrate for transcoded segments
|
||||||
|
h264_bitrate: str = field(
|
||||||
|
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_H264_BITRATE", "128k")
|
||||||
|
)
|
||||||
|
# Max output size for transcoded playback; preserves aspect ratio and will not upscale
|
||||||
|
max_width: int = field(
|
||||||
|
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_WIDTH", "640"))
|
||||||
|
)
|
||||||
|
max_height: int = field(
|
||||||
|
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_HEIGHT", "480"))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
config = Config()
|
||||||
24
transcode_proxy/docker-compose.example.yml
Normal file
24
transcode_proxy/docker-compose.example.yml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# Example: Frigate with in-container transcode proxy (single image).
|
||||||
|
#
|
||||||
|
# 1. Build Frigate from this repo (on Ubuntu recommended):
|
||||||
|
# make version && make local-trt && docker tag frigate:latest-tensorrt frigate-dev
|
||||||
|
#
|
||||||
|
# 2. Use image: frigate-dev and publish port 5010 for the transcode proxy.
|
||||||
|
# 3. In Frigate config (config.yml), set:
|
||||||
|
# transcode_proxy:
|
||||||
|
# enabled: true
|
||||||
|
# vod_proxy_url: "http://YOUR_HOST:5010"
|
||||||
|
|
||||||
|
services:
|
||||||
|
frigate:
|
||||||
|
container_name: frigate
|
||||||
|
restart: unless-stopped
|
||||||
|
image: frigate-dev
|
||||||
|
# ... your existing frigate config (gpus, shm_size, devices, volumes) ...
|
||||||
|
ports:
|
||||||
|
- "5000:5000" # or 8971:8971 depending on your setup
|
||||||
|
- "5010:5010" # transcode proxy (only needed if transcode_proxy.enabled is true)
|
||||||
|
# Optional: override proxy defaults
|
||||||
|
# environment:
|
||||||
|
# TRANSCODE_PROXY_PORT: "5010"
|
||||||
|
# TRANSCODE_PROXY_CACHE_MB: "500"
|
||||||
419
transcode_proxy/main.py
Normal file
419
transcode_proxy/main.py
Normal file
@ -0,0 +1,419 @@
|
|||||||
|
"""FastAPI app: proxy VOD requests, transcode segments on the fly."""
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from collections.abc import AsyncIterator
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
from fastapi import FastAPI, Request, Response
|
||||||
|
from fastapi.responses import StreamingResponse
|
||||||
|
from transcode_proxy.cache import ByteLRUCache
|
||||||
|
from transcode_proxy.config import config
|
||||||
|
from transcode_proxy.transcode import (
|
||||||
|
TranscodeError,
|
||||||
|
stream_transcode_segment_to_h264_ts,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
app = FastAPI(title="Frigate VOD Transcode Proxy", version="0.1.0")
|
||||||
|
cache = ByteLRUCache(config.cache_max_bytes)
|
||||||
|
|
||||||
|
# Segment extensions that the upstream VOD may expose.
|
||||||
|
SEGMENT_EXTENSIONS = (".m4s", ".mp4", ".ts")
|
||||||
|
FORWARD_HEADERS = ("cookie", "authorization", "referer")
|
||||||
|
TRANSCODED_SEGMENT_SUFFIX = ".transcoded.ts"
|
||||||
|
H264_CODEC = "avc1.64001f"
|
||||||
|
LOCAL_QUERY_KEYS = {"bitrate", "max_width", "max_height"}
|
||||||
|
|
||||||
|
|
||||||
|
def _upstream_path(path: str) -> Optional[str]:
|
||||||
|
"""Strip path_prefix and only allow VOD paths through to upstream."""
|
||||||
|
p = path.lstrip("/")
|
||||||
|
if config.path_prefix:
|
||||||
|
prefix = config.path_prefix.strip("/")
|
||||||
|
if p.startswith(prefix + "/"):
|
||||||
|
p = p[len(prefix) + 1 :]
|
||||||
|
if p == "vod" or p.startswith("vod/"):
|
||||||
|
return "/" + p
|
||||||
|
if p.startswith("vod-transcoded/"):
|
||||||
|
return "/" + p[len("vod-transcoded/") :]
|
||||||
|
if p == "vod-transcoded":
|
||||||
|
return "/vod"
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _is_segment(path: str) -> bool:
|
||||||
|
return path.rstrip("/").endswith(TRANSCODED_SEGMENT_SUFFIX) or any(
|
||||||
|
path.rstrip("/").endswith(ext) for ext in SEGMENT_EXTENSIONS
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_init_path(path: str) -> bool:
|
||||||
|
return bool(re.search(r"/init.*\.mp4$", path))
|
||||||
|
|
||||||
|
|
||||||
|
def _is_master_playlist(path: str) -> bool:
|
||||||
|
return path.endswith("/master.m3u8") or path.endswith("master.m3u8")
|
||||||
|
|
||||||
|
|
||||||
|
def _init_upstream_path(segment_path: str) -> Optional[str]:
|
||||||
|
"""Infer the matching init fragment for an fMP4 media fragment path."""
|
||||||
|
match = re.search(r"/seg-\d+(?P<suffix>.*)\.m4s$", segment_path)
|
||||||
|
if not match:
|
||||||
|
return None
|
||||||
|
suffix = match.group("suffix")
|
||||||
|
return re.sub(r"/seg-\d+.*\.m4s$", f"/init{suffix}.mp4", segment_path)
|
||||||
|
|
||||||
|
|
||||||
|
async def _fetch_upstream_bytes(
|
||||||
|
client: httpx.AsyncClient, url: str, headers: dict[str, str]
|
||||||
|
) -> Optional[bytes]:
|
||||||
|
try:
|
||||||
|
upstream_resp = await client.get(url, headers=headers)
|
||||||
|
upstream_resp.raise_for_status()
|
||||||
|
return upstream_resp.content
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Upstream fetch failed %s: %s", url, e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def _fetch_source_init_bytes(
|
||||||
|
client: httpx.AsyncClient,
|
||||||
|
init_path: str,
|
||||||
|
query: str,
|
||||||
|
headers: dict[str, str],
|
||||||
|
) -> Optional[bytes]:
|
||||||
|
init_url = f"{config.upstream_base.rstrip('/')}{init_path}"
|
||||||
|
if query:
|
||||||
|
init_url += f"?{query}"
|
||||||
|
|
||||||
|
cache_key = f"source-init:{init_url}"
|
||||||
|
cached = cache.get(cache_key)
|
||||||
|
if cached is not None:
|
||||||
|
return cached
|
||||||
|
|
||||||
|
init_bytes = await _fetch_upstream_bytes(client, init_url, headers)
|
||||||
|
if init_bytes is not None:
|
||||||
|
cache.set(cache_key, init_bytes)
|
||||||
|
return init_bytes
|
||||||
|
|
||||||
|
|
||||||
|
async def _stream_source_segment_bytes(
|
||||||
|
source_url: str,
|
||||||
|
headers: dict[str, str],
|
||||||
|
init_bytes: Optional[bytes] = None,
|
||||||
|
) -> AsyncIterator[bytes]:
|
||||||
|
if init_bytes is not None:
|
||||||
|
yield init_bytes
|
||||||
|
|
||||||
|
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||||
|
async with client.stream("GET", source_url, headers=headers) as upstream_resp:
|
||||||
|
upstream_resp.raise_for_status()
|
||||||
|
async for chunk in upstream_resp.aiter_bytes():
|
||||||
|
if chunk:
|
||||||
|
yield chunk
|
||||||
|
|
||||||
|
|
||||||
|
def _proxy_segment_uri(entry: str) -> str:
|
||||||
|
return f"{entry}{TRANSCODED_SEGMENT_SUFFIX}"
|
||||||
|
|
||||||
|
|
||||||
|
def _source_segment_path(path: str) -> str:
|
||||||
|
if path.endswith(TRANSCODED_SEGMENT_SUFFIX):
|
||||||
|
return path[: -len(TRANSCODED_SEGMENT_SUFFIX)]
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def _resolution_for_transcode(
|
||||||
|
width: int, height: int, max_width: int, max_height: int
|
||||||
|
) -> tuple[int, int]:
|
||||||
|
if width <= 0 or height <= 0:
|
||||||
|
return (max_width, max_height)
|
||||||
|
|
||||||
|
max_width = max(max_width, 2)
|
||||||
|
max_height = max(max_height, 2)
|
||||||
|
scale = min(max_width / width, max_height / height, 1.0)
|
||||||
|
out_width = max(2, int(width * scale))
|
||||||
|
out_height = max(2, int(height * scale))
|
||||||
|
|
||||||
|
if out_width % 2:
|
||||||
|
out_width -= 1
|
||||||
|
if out_height % 2:
|
||||||
|
out_height -= 1
|
||||||
|
|
||||||
|
return (max(out_width, 2), max(out_height, 2))
|
||||||
|
|
||||||
|
|
||||||
|
def _bandwidth_bits(bitrate: str) -> int:
|
||||||
|
match = re.fullmatch(r"(?P<value>\d+(?:\.\d+)?)(?P<suffix>[kKmMgG]?)", bitrate.strip())
|
||||||
|
if not match:
|
||||||
|
return 2_000_000
|
||||||
|
|
||||||
|
value = float(match.group("value"))
|
||||||
|
suffix = match.group("suffix").upper()
|
||||||
|
multiplier = {
|
||||||
|
"": 1,
|
||||||
|
"K": 1_000,
|
||||||
|
"M": 1_000_000,
|
||||||
|
"G": 1_000_000_000,
|
||||||
|
}[suffix]
|
||||||
|
return int(value * multiplier)
|
||||||
|
|
||||||
|
|
||||||
|
def _transcode_request_profile(request: Request) -> tuple[str, int, int, str]:
|
||||||
|
bitrate = request.query_params.get("bitrate", config.h264_bitrate)
|
||||||
|
max_width = int(request.query_params.get("max_width", config.max_width))
|
||||||
|
max_height = int(request.query_params.get("max_height", config.max_height))
|
||||||
|
upstream_query = "&".join(
|
||||||
|
f"{key}={value}"
|
||||||
|
for key, value in request.query_params.multi_items()
|
||||||
|
if key not in LOCAL_QUERY_KEYS
|
||||||
|
)
|
||||||
|
return bitrate, max_width, max_height, upstream_query
|
||||||
|
|
||||||
|
|
||||||
|
def _rewrite_master_playlist(
|
||||||
|
upstream_bytes: bytes, bitrate: str, max_width: int, max_height: int
|
||||||
|
) -> bytes:
|
||||||
|
playlist = upstream_bytes.decode("utf-8", errors="replace")
|
||||||
|
lines = [line.strip() for line in playlist.splitlines() if line.strip()]
|
||||||
|
child_uri: Optional[str] = None
|
||||||
|
stream_inf_line: Optional[str] = None
|
||||||
|
|
||||||
|
for idx, line in enumerate(lines):
|
||||||
|
if line.startswith("#EXT-X-STREAM-INF:"):
|
||||||
|
stream_inf_line = line
|
||||||
|
for child_line in lines[idx + 1 :]:
|
||||||
|
if child_line and not child_line.startswith("#"):
|
||||||
|
child_uri = child_line
|
||||||
|
break
|
||||||
|
break
|
||||||
|
|
||||||
|
if child_uri is None or stream_inf_line is None:
|
||||||
|
logger.warning("Unable to parse master playlist, returning upstream manifest")
|
||||||
|
return upstream_bytes
|
||||||
|
|
||||||
|
attrs = [
|
||||||
|
f'BANDWIDTH={max(_bandwidth_bits(bitrate), 1)}',
|
||||||
|
f'CODECS="{H264_CODEC}"',
|
||||||
|
]
|
||||||
|
|
||||||
|
resolution_match = re.search(r"RESOLUTION=(\d+)x(\d+)", stream_inf_line)
|
||||||
|
if resolution_match:
|
||||||
|
width = int(resolution_match.group(1))
|
||||||
|
height = int(resolution_match.group(2))
|
||||||
|
out_width, out_height = _resolution_for_transcode(
|
||||||
|
width, height, max_width, max_height
|
||||||
|
)
|
||||||
|
attrs.insert(1, f"RESOLUTION={out_width}x{out_height}")
|
||||||
|
|
||||||
|
rewritten = [
|
||||||
|
"#EXTM3U",
|
||||||
|
"#EXT-X-STREAM-INF:" + ",".join(attrs),
|
||||||
|
child_uri,
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
return "\n".join(rewritten).encode()
|
||||||
|
|
||||||
|
|
||||||
|
def _rewrite_media_playlist(upstream_bytes: bytes) -> bytes:
|
||||||
|
playlist = upstream_bytes.decode("utf-8", errors="replace")
|
||||||
|
output_lines: list[str] = []
|
||||||
|
segment_index = 0
|
||||||
|
|
||||||
|
for line in playlist.splitlines():
|
||||||
|
stripped = line.strip()
|
||||||
|
if stripped.startswith("#EXT-X-MAP:"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if stripped.startswith("#EXTINF:") and segment_index > 0:
|
||||||
|
output_lines.append("#EXT-X-DISCONTINUITY")
|
||||||
|
|
||||||
|
if stripped and not stripped.startswith("#"):
|
||||||
|
output_lines.append(_proxy_segment_uri(stripped))
|
||||||
|
segment_index += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
output_lines.append(line)
|
||||||
|
|
||||||
|
if output_lines and output_lines[-1] != "":
|
||||||
|
output_lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(output_lines).encode()
|
||||||
|
|
||||||
|
|
||||||
|
async def _proxy_upstream_response(
|
||||||
|
client: httpx.AsyncClient, url: str, headers: dict[str, str]
|
||||||
|
) -> Optional[httpx.Response]:
|
||||||
|
try:
|
||||||
|
upstream_resp = await client.get(url, headers=headers)
|
||||||
|
upstream_resp.raise_for_status()
|
||||||
|
return upstream_resp
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Upstream fetch failed %s: %s", url, e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def _transcoded_segment_response(
|
||||||
|
source_url: str,
|
||||||
|
cache_key: str,
|
||||||
|
headers: dict[str, str],
|
||||||
|
init_bytes: Optional[bytes] = None,
|
||||||
|
bitrate: Optional[str] = None,
|
||||||
|
max_width: Optional[int] = None,
|
||||||
|
max_height: Optional[int] = None,
|
||||||
|
) -> Response:
|
||||||
|
stream = await stream_transcode_segment_to_h264_ts(
|
||||||
|
_stream_source_segment_bytes(source_url, headers, init_bytes),
|
||||||
|
config.ffmpeg_path,
|
||||||
|
bitrate or config.h264_bitrate,
|
||||||
|
max_width or config.max_width,
|
||||||
|
max_height or config.max_height,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
first_chunk = await stream.first_chunk()
|
||||||
|
except TranscodeError as e:
|
||||||
|
await stream.aclose()
|
||||||
|
logger.warning("Transcode stream failed %s: %s", source_url, e)
|
||||||
|
return Response(status_code=502, content=b"Transcode failed")
|
||||||
|
|
||||||
|
async def body() -> AsyncIterator[bytes]:
|
||||||
|
try:
|
||||||
|
async for chunk in stream.iter_chunks(first_chunk):
|
||||||
|
yield chunk
|
||||||
|
except TranscodeError as e:
|
||||||
|
logger.warning("Transcode stream failed %s: %s", source_url, e)
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
cache.set(cache_key, stream.output_bytes)
|
||||||
|
|
||||||
|
return StreamingResponse(
|
||||||
|
body(),
|
||||||
|
media_type="video/mp2t",
|
||||||
|
headers={"Cache-Control": "private, max-age=300"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/cache")
|
||||||
|
async def cache_info() -> dict:
|
||||||
|
"""Return cache size and entry count (for debugging)."""
|
||||||
|
return {
|
||||||
|
"size_bytes": cache.size_bytes(),
|
||||||
|
"size_mb": round(cache.size_bytes() / (1024 * 1024), 2),
|
||||||
|
"entries": cache.count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/health")
|
||||||
|
async def health() -> dict:
|
||||||
|
return {"status": "ok"}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/{full_path:path}")
|
||||||
|
async def vod_proxy(request: Request, full_path: str) -> Response:
|
||||||
|
"""Handle /vod/... or /vod-transcoded/... (when path_prefix is set)."""
|
||||||
|
path = "/" + full_path.lstrip("/")
|
||||||
|
upstream_path = _upstream_path(path)
|
||||||
|
if upstream_path is None or not (
|
||||||
|
upstream_path == "/vod" or upstream_path.startswith("/vod/")
|
||||||
|
):
|
||||||
|
return Response(status_code=404, content=b"Not found")
|
||||||
|
bitrate, max_width, max_height, upstream_query = _transcode_request_profile(request)
|
||||||
|
upstream_url = f"{config.upstream_base.rstrip('/')}{upstream_path}"
|
||||||
|
if upstream_query:
|
||||||
|
upstream_url += f"?{upstream_query}"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
k: v for k, v in request.headers.items() if k.lower() in FORWARD_HEADERS
|
||||||
|
}
|
||||||
|
|
||||||
|
if upstream_path.endswith(TRANSCODED_SEGMENT_SUFFIX):
|
||||||
|
cache_key = f"{upstream_url}|{bitrate}|{max_width}x{max_height}"
|
||||||
|
cached = cache.get(cache_key)
|
||||||
|
if cached is not None:
|
||||||
|
return Response(
|
||||||
|
content=cached,
|
||||||
|
media_type="video/mp2t",
|
||||||
|
headers={"Cache-Control": "private, max-age=300"},
|
||||||
|
)
|
||||||
|
|
||||||
|
source_path = _source_segment_path(upstream_path)
|
||||||
|
source_url = f"{config.upstream_base.rstrip('/')}{source_path}"
|
||||||
|
if upstream_query:
|
||||||
|
source_url += f"?{upstream_query}"
|
||||||
|
|
||||||
|
init_bytes: Optional[bytes] = None
|
||||||
|
if source_path.endswith(".m4s"):
|
||||||
|
init_path = _init_upstream_path(source_path)
|
||||||
|
if init_path is None:
|
||||||
|
return Response(status_code=502, content=b"Init segment inference failed")
|
||||||
|
|
||||||
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||||
|
init_bytes = await _fetch_source_init_bytes(
|
||||||
|
client, init_path, upstream_query, headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if init_bytes is None:
|
||||||
|
return Response(status_code=502, content=b"Init segment fetch failed")
|
||||||
|
|
||||||
|
return await _transcoded_segment_response(
|
||||||
|
source_url=source_url,
|
||||||
|
cache_key=cache_key,
|
||||||
|
headers=headers,
|
||||||
|
init_bytes=init_bytes,
|
||||||
|
bitrate=bitrate,
|
||||||
|
max_width=max_width,
|
||||||
|
max_height=max_height,
|
||||||
|
)
|
||||||
|
|
||||||
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||||
|
if _is_master_playlist(upstream_path):
|
||||||
|
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
|
||||||
|
if upstream_resp is None:
|
||||||
|
return Response(status_code=502, content=b"Upstream fetch failed")
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
content=_rewrite_master_playlist(
|
||||||
|
upstream_resp.content, bitrate, max_width, max_height
|
||||||
|
),
|
||||||
|
media_type="application/vnd.apple.mpegurl",
|
||||||
|
headers={"Cache-Control": "no-store"},
|
||||||
|
)
|
||||||
|
|
||||||
|
if upstream_path.endswith(".m3u8"):
|
||||||
|
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
|
||||||
|
if upstream_resp is None:
|
||||||
|
return Response(status_code=502, content=b"Upstream fetch failed")
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
content=_rewrite_media_playlist(upstream_resp.content),
|
||||||
|
media_type="application/vnd.apple.mpegurl",
|
||||||
|
headers={"Cache-Control": "no-store"},
|
||||||
|
)
|
||||||
|
|
||||||
|
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
|
||||||
|
if upstream_resp is None:
|
||||||
|
return Response(status_code=502, content=b"Upstream fetch failed")
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
content=upstream_resp.content,
|
||||||
|
media_type=upstream_resp.headers.get("content-type", "application/octet-stream"),
|
||||||
|
headers={"Cache-Control": "no-store"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run() -> None:
|
||||||
|
import uvicorn
|
||||||
|
uvicorn.run(
|
||||||
|
"transcode_proxy.main:app",
|
||||||
|
host=config.host,
|
||||||
|
port=config.port,
|
||||||
|
log_level="info",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
5
transcode_proxy/requirements.txt
Normal file
5
transcode_proxy/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# Dependencies for running the transcode proxy standalone (e.g. in a separate container).
|
||||||
|
# Frigate's main container may already have these; the proxy can share the same env.
|
||||||
|
fastapi>=0.100.0
|
||||||
|
uvicorn>=0.22.0
|
||||||
|
httpx>=0.24.0
|
||||||
256
transcode_proxy/transcode.py
Normal file
256
transcode_proxy/transcode.py
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
"""Transcode media segments to H.264 transport stream bytes using FFmpeg."""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
from collections.abc import AsyncIterable, AsyncIterator
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TranscodeError(RuntimeError):
|
||||||
|
"""Raised when FFmpeg cannot produce a valid transcoded segment."""
|
||||||
|
|
||||||
|
|
||||||
|
def _build_scale_filter(max_width: int, max_height: int) -> Optional[str]:
|
||||||
|
if max_width <= 0 or max_height <= 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return (
|
||||||
|
f"scale=w={max_width}:h={max_height}:"
|
||||||
|
"force_original_aspect_ratio=decrease:"
|
||||||
|
"force_divisible_by=2"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_ffmpeg_cmd(
|
||||||
|
ffmpeg_path: str,
|
||||||
|
bitrate: str,
|
||||||
|
max_width: int,
|
||||||
|
max_height: int,
|
||||||
|
) -> list[str]:
|
||||||
|
cmd = [
|
||||||
|
ffmpeg_path,
|
||||||
|
"-hide_banner",
|
||||||
|
"-loglevel",
|
||||||
|
"error",
|
||||||
|
"-i",
|
||||||
|
"pipe:0",
|
||||||
|
"-an",
|
||||||
|
"-pix_fmt",
|
||||||
|
"yuv420p",
|
||||||
|
"-c:v",
|
||||||
|
"libx264",
|
||||||
|
"-preset",
|
||||||
|
"fast",
|
||||||
|
"-profile:v",
|
||||||
|
"high",
|
||||||
|
"-level:v",
|
||||||
|
"3.1",
|
||||||
|
"-b:v",
|
||||||
|
bitrate,
|
||||||
|
"-maxrate",
|
||||||
|
bitrate,
|
||||||
|
"-bufsize",
|
||||||
|
bitrate,
|
||||||
|
"-muxdelay",
|
||||||
|
"0",
|
||||||
|
"-muxpreload",
|
||||||
|
"0",
|
||||||
|
"-f",
|
||||||
|
"mpegts",
|
||||||
|
"-mpegts_flags",
|
||||||
|
"+initial_discontinuity",
|
||||||
|
"pipe:1",
|
||||||
|
]
|
||||||
|
|
||||||
|
scale_filter = _build_scale_filter(max_width, max_height)
|
||||||
|
if scale_filter:
|
||||||
|
cmd[7:7] = ["-vf", scale_filter]
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
class H264TSStream:
|
||||||
|
"""Manage a streaming FFmpeg transcode process."""
|
||||||
|
|
||||||
|
def __init__(self, process: asyncio.subprocess.Process):
|
||||||
|
self._process = process
|
||||||
|
self._stderr = bytearray()
|
||||||
|
self._output = bytearray()
|
||||||
|
self._input_error: Exception | None = None
|
||||||
|
self._closed = False
|
||||||
|
self._stdin_task: asyncio.Task[None] | None = None
|
||||||
|
self._stderr_task: asyncio.Task[None] | None = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def start(
|
||||||
|
cls,
|
||||||
|
source_chunks: AsyncIterable[bytes],
|
||||||
|
ffmpeg_path: str,
|
||||||
|
bitrate: str = "2M",
|
||||||
|
max_width: int = 640,
|
||||||
|
max_height: int = 480,
|
||||||
|
) -> "H264TSStream":
|
||||||
|
process = await asyncio.create_subprocess_exec(
|
||||||
|
*_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
|
||||||
|
stdin=asyncio.subprocess.PIPE,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
)
|
||||||
|
stream = cls(process)
|
||||||
|
stream._stdin_task = asyncio.create_task(stream._feed_stdin(source_chunks))
|
||||||
|
stream._stderr_task = asyncio.create_task(stream._drain_stderr())
|
||||||
|
return stream
|
||||||
|
|
||||||
|
async def _feed_stdin(self, source_chunks: AsyncIterable[bytes]) -> None:
|
||||||
|
assert self._process.stdin is not None
|
||||||
|
|
||||||
|
try:
|
||||||
|
async for chunk in source_chunks:
|
||||||
|
if not chunk:
|
||||||
|
continue
|
||||||
|
self._process.stdin.write(chunk)
|
||||||
|
await self._process.stdin.drain()
|
||||||
|
except (BrokenPipeError, ConnectionResetError) as exc:
|
||||||
|
self._input_error = exc
|
||||||
|
except Exception as exc: # pragma: no cover - depends on upstream/network failures
|
||||||
|
self._input_error = exc
|
||||||
|
finally:
|
||||||
|
stdin = self._process.stdin
|
||||||
|
if stdin is not None and not stdin.is_closing():
|
||||||
|
stdin.close()
|
||||||
|
try:
|
||||||
|
await stdin.wait_closed()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def _drain_stderr(self) -> None:
|
||||||
|
assert self._process.stderr is not None
|
||||||
|
|
||||||
|
while True:
|
||||||
|
chunk = await self._process.stderr.read(8192)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
self._stderr.extend(chunk)
|
||||||
|
|
||||||
|
async def _read_stdout_chunk(self) -> bytes:
|
||||||
|
assert self._process.stdout is not None
|
||||||
|
chunk = await self._process.stdout.read(65536)
|
||||||
|
if chunk:
|
||||||
|
self._output.extend(chunk)
|
||||||
|
return chunk
|
||||||
|
|
||||||
|
def _error_message(self) -> str:
|
||||||
|
if self._input_error is not None:
|
||||||
|
return f"Source stream failed: {self._input_error}"
|
||||||
|
if self._stderr:
|
||||||
|
return self._stderr.decode(errors="replace")
|
||||||
|
return "unknown FFmpeg error"
|
||||||
|
|
||||||
|
async def _ensure_success(self) -> bytes:
|
||||||
|
if self._stdin_task is not None:
|
||||||
|
await self._stdin_task
|
||||||
|
if self._stderr_task is not None:
|
||||||
|
await self._stderr_task
|
||||||
|
|
||||||
|
returncode = await self._process.wait()
|
||||||
|
if returncode != 0:
|
||||||
|
raise TranscodeError(self._error_message())
|
||||||
|
|
||||||
|
return bytes(self._output)
|
||||||
|
|
||||||
|
async def first_chunk(self) -> bytes:
|
||||||
|
chunk = await self._read_stdout_chunk()
|
||||||
|
if chunk:
|
||||||
|
return chunk
|
||||||
|
|
||||||
|
try:
|
||||||
|
await self._ensure_success()
|
||||||
|
finally:
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
raise TranscodeError("FFmpeg produced no output")
|
||||||
|
|
||||||
|
async def iter_chunks(self, first_chunk: bytes) -> AsyncIterator[bytes]:
|
||||||
|
try:
|
||||||
|
yield first_chunk
|
||||||
|
while True:
|
||||||
|
chunk = await self._read_stdout_chunk()
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
yield chunk
|
||||||
|
|
||||||
|
await self._ensure_success()
|
||||||
|
finally:
|
||||||
|
await self.aclose()
|
||||||
|
|
||||||
|
async def aclose(self) -> None:
|
||||||
|
if self._closed:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
if self._process.returncode is None:
|
||||||
|
self._process.kill()
|
||||||
|
await self._process.wait()
|
||||||
|
|
||||||
|
for task in (self._stdin_task, self._stderr_task):
|
||||||
|
if task is None or task.done():
|
||||||
|
continue
|
||||||
|
task.cancel()
|
||||||
|
try:
|
||||||
|
await task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_bytes(self) -> bytes:
|
||||||
|
return bytes(self._output)
|
||||||
|
|
||||||
|
|
||||||
|
async def stream_transcode_segment_to_h264_ts(
|
||||||
|
source_chunks: AsyncIterable[bytes],
|
||||||
|
ffmpeg_path: str,
|
||||||
|
bitrate: str = "2M",
|
||||||
|
max_width: int = 640,
|
||||||
|
max_height: int = 480,
|
||||||
|
) -> H264TSStream:
|
||||||
|
"""Start an FFmpeg process that streams H.264 MPEG-TS output."""
|
||||||
|
return await H264TSStream.start(
|
||||||
|
source_chunks,
|
||||||
|
ffmpeg_path,
|
||||||
|
bitrate,
|
||||||
|
max_width,
|
||||||
|
max_height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def transcode_segment_to_h264_ts(
|
||||||
|
segment_bytes: bytes,
|
||||||
|
ffmpeg_path: str,
|
||||||
|
bitrate: str = "2M",
|
||||||
|
max_width: int = 640,
|
||||||
|
max_height: int = 480,
|
||||||
|
) -> Optional[bytes]:
|
||||||
|
"""Decode a segment and re-encode it as H.264 MPEG-TS bytes."""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
|
||||||
|
input=segment_bytes,
|
||||||
|
capture_output=True,
|
||||||
|
timeout=60,
|
||||||
|
)
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.warning(
|
||||||
|
"FFmpeg transcode failed: %s",
|
||||||
|
result.stderr.decode(errors="replace") if result.stderr else "unknown",
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
return result.stdout
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning("FFmpeg transcode timed out")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("FFmpeg transcode error: %s", e)
|
||||||
|
return None
|
||||||
@ -1,457 +1,469 @@
|
|||||||
import { useCallback, useState } from "react";
|
import { useCallback, useState } from "react";
|
||||||
import {
|
import {
|
||||||
Dialog,
|
Dialog,
|
||||||
DialogContent,
|
DialogContent,
|
||||||
DialogDescription,
|
DialogDescription,
|
||||||
DialogFooter,
|
DialogFooter,
|
||||||
DialogHeader,
|
DialogHeader,
|
||||||
DialogTitle,
|
DialogTitle,
|
||||||
DialogTrigger,
|
DialogTrigger,
|
||||||
} from "../ui/dialog";
|
} from "../ui/dialog";
|
||||||
import { Label } from "../ui/label";
|
import { Label } from "../ui/label";
|
||||||
import { RadioGroup, RadioGroupItem } from "../ui/radio-group";
|
import { RadioGroup, RadioGroupItem } from "../ui/radio-group";
|
||||||
import { Button } from "../ui/button";
|
import { Button } from "../ui/button";
|
||||||
import { ExportMode } from "@/types/filter";
|
import { ExportMode } from "@/types/filter";
|
||||||
import { FaArrowDown } from "react-icons/fa";
|
import { FaArrowDown } from "react-icons/fa";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import { Input } from "../ui/input";
|
import { Input } from "../ui/input";
|
||||||
import { TimeRange } from "@/types/timeline";
|
import { TimeRange } from "@/types/timeline";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import {
|
import {
|
||||||
Select,
|
Select,
|
||||||
SelectContent,
|
SelectContent,
|
||||||
SelectItem,
|
SelectItem,
|
||||||
SelectSeparator,
|
SelectSeparator,
|
||||||
SelectTrigger,
|
SelectTrigger,
|
||||||
SelectValue,
|
SelectValue,
|
||||||
} from "../ui/select";
|
} from "../ui/select";
|
||||||
import { isDesktop, isMobile } from "react-device-detect";
|
import { isDesktop, isMobile } from "react-device-detect";
|
||||||
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
||||||
import SaveExportOverlay from "./SaveExportOverlay";
|
import SaveExportOverlay from "./SaveExportOverlay";
|
||||||
import { baseUrl } from "@/api/baseUrl";
|
import { baseUrl } from "@/api/baseUrl";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
|
import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { ExportCase } from "@/types/export";
|
import { ExportCase } from "@/types/export";
|
||||||
import { CustomTimeSelector } from "./CustomTimeSelector";
|
import { CustomTimeSelector } from "./CustomTimeSelector";
|
||||||
|
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
|
||||||
const EXPORT_OPTIONS = [
|
|
||||||
"1",
|
const EXPORT_OPTIONS = [
|
||||||
"4",
|
"1",
|
||||||
"8",
|
"4",
|
||||||
"12",
|
"8",
|
||||||
"24",
|
"12",
|
||||||
"timeline",
|
"24",
|
||||||
"custom",
|
"timeline",
|
||||||
] as const;
|
"custom",
|
||||||
type ExportOption = (typeof EXPORT_OPTIONS)[number];
|
] as const;
|
||||||
|
type ExportOption = (typeof EXPORT_OPTIONS)[number];
|
||||||
type ExportDialogProps = {
|
|
||||||
camera: string;
|
type ExportDialogProps = {
|
||||||
latestTime: number;
|
camera: string;
|
||||||
currentTime: number;
|
latestTime: number;
|
||||||
range?: TimeRange;
|
currentTime: number;
|
||||||
mode: ExportMode;
|
range?: TimeRange;
|
||||||
showPreview: boolean;
|
mode: ExportMode;
|
||||||
setRange: (range: TimeRange | undefined) => void;
|
showPreview: boolean;
|
||||||
setMode: (mode: ExportMode) => void;
|
setRange: (range: TimeRange | undefined) => void;
|
||||||
setShowPreview: (showPreview: boolean) => void;
|
setMode: (mode: ExportMode) => void;
|
||||||
};
|
setShowPreview: (showPreview: boolean) => void;
|
||||||
export default function ExportDialog({
|
};
|
||||||
camera,
|
export default function ExportDialog({
|
||||||
latestTime,
|
camera,
|
||||||
currentTime,
|
latestTime,
|
||||||
range,
|
currentTime,
|
||||||
mode,
|
range,
|
||||||
showPreview,
|
mode,
|
||||||
setRange,
|
showPreview,
|
||||||
setMode,
|
setRange,
|
||||||
setShowPreview,
|
setMode,
|
||||||
}: ExportDialogProps) {
|
setShowPreview,
|
||||||
const { t } = useTranslation(["components/dialog"]);
|
}: ExportDialogProps) {
|
||||||
const [name, setName] = useState("");
|
const { t } = useTranslation(["components/dialog"]);
|
||||||
const [selectedCaseId, setSelectedCaseId] = useState<string | undefined>(
|
const [name, setName] = useState("");
|
||||||
undefined,
|
const [selectedCaseId, setSelectedCaseId] = useState<string | undefined>(
|
||||||
);
|
undefined,
|
||||||
|
);
|
||||||
const onStartExport = useCallback(() => {
|
|
||||||
if (!range) {
|
const onStartExport = useCallback(() => {
|
||||||
toast.error(t("export.toast.error.noVaildTimeSelected"), {
|
if (!range) {
|
||||||
position: "top-center",
|
toast.error(t("export.toast.error.noVaildTimeSelected"), {
|
||||||
});
|
position: "top-center",
|
||||||
return;
|
});
|
||||||
}
|
return;
|
||||||
|
}
|
||||||
if (range.before < range.after) {
|
|
||||||
toast.error(t("export.toast.error.endTimeMustAfterStartTime"), {
|
if (range.before < range.after) {
|
||||||
position: "top-center",
|
toast.error(t("export.toast.error.endTimeMustAfterStartTime"), {
|
||||||
});
|
position: "top-center",
|
||||||
return;
|
});
|
||||||
}
|
return;
|
||||||
|
}
|
||||||
axios
|
|
||||||
.post(
|
axios
|
||||||
`export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
|
.post(
|
||||||
{
|
`export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
|
||||||
playback: "realtime",
|
{
|
||||||
name,
|
playback: "realtime",
|
||||||
export_case_id: selectedCaseId || undefined,
|
name,
|
||||||
},
|
export_case_id: selectedCaseId || undefined,
|
||||||
)
|
},
|
||||||
.then((response) => {
|
)
|
||||||
if (response.status == 200) {
|
.then((response) => {
|
||||||
toast.success(t("export.toast.success"), {
|
if (response.status == 200) {
|
||||||
position: "top-center",
|
toast.success(t("export.toast.success"), {
|
||||||
action: (
|
position: "top-center",
|
||||||
<a href="/export" target="_blank" rel="noopener noreferrer">
|
action: (
|
||||||
<Button>{t("export.toast.view")}</Button>
|
<a href="/export" target="_blank" rel="noopener noreferrer">
|
||||||
</a>
|
<Button>{t("export.toast.view")}</Button>
|
||||||
),
|
</a>
|
||||||
});
|
),
|
||||||
setName("");
|
});
|
||||||
setSelectedCaseId(undefined);
|
setName("");
|
||||||
setRange(undefined);
|
setSelectedCaseId(undefined);
|
||||||
setMode("none");
|
setRange(undefined);
|
||||||
}
|
setMode("none");
|
||||||
})
|
}
|
||||||
.catch((error) => {
|
})
|
||||||
const errorMessage =
|
.catch((error) => {
|
||||||
error.response?.data?.message ||
|
const errorMessage =
|
||||||
error.response?.data?.detail ||
|
error.response?.data?.message ||
|
||||||
"Unknown error";
|
error.response?.data?.detail ||
|
||||||
toast.error(
|
"Unknown error";
|
||||||
t("export.toast.error.failed", {
|
toast.error(
|
||||||
error: errorMessage,
|
t("export.toast.error.failed", {
|
||||||
}),
|
error: errorMessage,
|
||||||
{ position: "top-center" },
|
}),
|
||||||
);
|
{ position: "top-center" },
|
||||||
});
|
);
|
||||||
}, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]);
|
});
|
||||||
|
}, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]);
|
||||||
const handleCancel = useCallback(() => {
|
|
||||||
setName("");
|
const handleCancel = useCallback(() => {
|
||||||
setSelectedCaseId(undefined);
|
setName("");
|
||||||
setMode("none");
|
setSelectedCaseId(undefined);
|
||||||
setRange(undefined);
|
setMode("none");
|
||||||
}, [setMode, setRange]);
|
setRange(undefined);
|
||||||
|
}, [setMode, setRange]);
|
||||||
const Overlay = isDesktop ? Dialog : Drawer;
|
|
||||||
const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
|
const Overlay = isDesktop ? Dialog : Drawer;
|
||||||
const Content = isDesktop ? DialogContent : DrawerContent;
|
const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
|
||||||
|
const Content = isDesktop ? DialogContent : DrawerContent;
|
||||||
return (
|
|
||||||
<>
|
return (
|
||||||
<ExportPreviewDialog
|
<>
|
||||||
camera={camera}
|
<ExportPreviewDialog
|
||||||
range={range}
|
camera={camera}
|
||||||
showPreview={showPreview}
|
range={range}
|
||||||
setShowPreview={setShowPreview}
|
showPreview={showPreview}
|
||||||
/>
|
setShowPreview={setShowPreview}
|
||||||
<SaveExportOverlay
|
/>
|
||||||
className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2"
|
<SaveExportOverlay
|
||||||
show={mode == "timeline"}
|
className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2"
|
||||||
onPreview={() => setShowPreview(true)}
|
show={mode == "timeline"}
|
||||||
onSave={() => onStartExport()}
|
onPreview={() => setShowPreview(true)}
|
||||||
onCancel={handleCancel}
|
onSave={() => onStartExport()}
|
||||||
/>
|
onCancel={handleCancel}
|
||||||
<Overlay
|
/>
|
||||||
open={mode == "select"}
|
<Overlay
|
||||||
onOpenChange={(open) => {
|
open={mode == "select"}
|
||||||
if (!open) {
|
onOpenChange={(open) => {
|
||||||
setMode("none");
|
if (!open) {
|
||||||
}
|
setMode("none");
|
||||||
}}
|
}
|
||||||
>
|
}}
|
||||||
{!isDesktop && (
|
>
|
||||||
<Trigger asChild>
|
{!isDesktop && (
|
||||||
<Button
|
<Trigger asChild>
|
||||||
className="flex items-center gap-2"
|
<Button
|
||||||
aria-label={t("menu.export", { ns: "common" })}
|
className="flex items-center gap-2"
|
||||||
size="sm"
|
aria-label={t("menu.export", { ns: "common" })}
|
||||||
onClick={() => {
|
size="sm"
|
||||||
const now = new Date(latestTime * 1000);
|
onClick={() => {
|
||||||
let start = 0;
|
const now = new Date(latestTime * 1000);
|
||||||
now.setHours(now.getHours() - 1);
|
let start = 0;
|
||||||
start = now.getTime() / 1000;
|
now.setHours(now.getHours() - 1);
|
||||||
setRange({
|
start = now.getTime() / 1000;
|
||||||
before: latestTime,
|
setRange({
|
||||||
after: start,
|
before: latestTime,
|
||||||
});
|
after: start,
|
||||||
setMode("select");
|
});
|
||||||
}}
|
setMode("select");
|
||||||
>
|
}}
|
||||||
<FaArrowDown className="rounded-md bg-secondary-foreground fill-secondary p-1" />
|
>
|
||||||
{isDesktop && (
|
<FaArrowDown className="rounded-md bg-secondary-foreground fill-secondary p-1" />
|
||||||
<div className="text-primary">
|
{isDesktop && (
|
||||||
{t("menu.export", { ns: "common" })}
|
<div className="text-primary">
|
||||||
</div>
|
{t("menu.export", { ns: "common" })}
|
||||||
)}
|
</div>
|
||||||
</Button>
|
)}
|
||||||
</Trigger>
|
</Button>
|
||||||
)}
|
</Trigger>
|
||||||
<Content
|
)}
|
||||||
className={
|
<Content
|
||||||
isDesktop
|
className={
|
||||||
? "sm:rounded-lg md:rounded-2xl"
|
isDesktop
|
||||||
: "mx-4 rounded-lg px-4 pb-4 md:rounded-2xl"
|
? "sm:rounded-lg md:rounded-2xl"
|
||||||
}
|
: "mx-4 rounded-lg px-4 pb-4 md:rounded-2xl"
|
||||||
>
|
}
|
||||||
<ExportContent
|
>
|
||||||
latestTime={latestTime}
|
<ExportContent
|
||||||
currentTime={currentTime}
|
latestTime={latestTime}
|
||||||
range={range}
|
currentTime={currentTime}
|
||||||
name={name}
|
range={range}
|
||||||
selectedCaseId={selectedCaseId}
|
name={name}
|
||||||
onStartExport={onStartExport}
|
selectedCaseId={selectedCaseId}
|
||||||
setName={setName}
|
onStartExport={onStartExport}
|
||||||
setSelectedCaseId={setSelectedCaseId}
|
setName={setName}
|
||||||
setRange={setRange}
|
setSelectedCaseId={setSelectedCaseId}
|
||||||
setMode={setMode}
|
setRange={setRange}
|
||||||
onCancel={handleCancel}
|
setMode={setMode}
|
||||||
/>
|
onCancel={handleCancel}
|
||||||
</Content>
|
/>
|
||||||
</Overlay>
|
</Content>
|
||||||
</>
|
</Overlay>
|
||||||
);
|
</>
|
||||||
}
|
);
|
||||||
|
}
|
||||||
type ExportContentProps = {
|
|
||||||
latestTime: number;
|
type ExportContentProps = {
|
||||||
currentTime: number;
|
latestTime: number;
|
||||||
range?: TimeRange;
|
currentTime: number;
|
||||||
name: string;
|
range?: TimeRange;
|
||||||
selectedCaseId?: string;
|
name: string;
|
||||||
onStartExport: () => void;
|
selectedCaseId?: string;
|
||||||
setName: (name: string) => void;
|
onStartExport: () => void;
|
||||||
setSelectedCaseId: (caseId: string | undefined) => void;
|
setName: (name: string) => void;
|
||||||
setRange: (range: TimeRange | undefined) => void;
|
setSelectedCaseId: (caseId: string | undefined) => void;
|
||||||
setMode: (mode: ExportMode) => void;
|
setRange: (range: TimeRange | undefined) => void;
|
||||||
onCancel: () => void;
|
setMode: (mode: ExportMode) => void;
|
||||||
};
|
onCancel: () => void;
|
||||||
export function ExportContent({
|
};
|
||||||
latestTime,
|
export function ExportContent({
|
||||||
currentTime,
|
latestTime,
|
||||||
range,
|
currentTime,
|
||||||
name,
|
range,
|
||||||
selectedCaseId,
|
name,
|
||||||
onStartExport,
|
selectedCaseId,
|
||||||
setName,
|
onStartExport,
|
||||||
setSelectedCaseId,
|
setName,
|
||||||
setRange,
|
setSelectedCaseId,
|
||||||
setMode,
|
setRange,
|
||||||
onCancel,
|
setMode,
|
||||||
}: ExportContentProps) {
|
onCancel,
|
||||||
const { t } = useTranslation(["components/dialog"]);
|
}: ExportContentProps) {
|
||||||
const [selectedOption, setSelectedOption] = useState<ExportOption>("1");
|
const { t } = useTranslation(["components/dialog"]);
|
||||||
const { data: cases } = useSWR<ExportCase[]>("cases");
|
const [selectedOption, setSelectedOption] = useState<ExportOption>("1");
|
||||||
|
const { data: cases } = useSWR<ExportCase[]>("cases");
|
||||||
const onSelectTime = useCallback(
|
|
||||||
(option: ExportOption) => {
|
const onSelectTime = useCallback(
|
||||||
setSelectedOption(option);
|
(option: ExportOption) => {
|
||||||
|
setSelectedOption(option);
|
||||||
const now = new Date(latestTime * 1000);
|
|
||||||
let start = 0;
|
const now = new Date(latestTime * 1000);
|
||||||
switch (option) {
|
let start = 0;
|
||||||
case "1":
|
switch (option) {
|
||||||
now.setHours(now.getHours() - 1);
|
case "1":
|
||||||
start = now.getTime() / 1000;
|
now.setHours(now.getHours() - 1);
|
||||||
break;
|
start = now.getTime() / 1000;
|
||||||
case "4":
|
break;
|
||||||
now.setHours(now.getHours() - 4);
|
case "4":
|
||||||
start = now.getTime() / 1000;
|
now.setHours(now.getHours() - 4);
|
||||||
break;
|
start = now.getTime() / 1000;
|
||||||
case "8":
|
break;
|
||||||
now.setHours(now.getHours() - 8);
|
case "8":
|
||||||
start = now.getTime() / 1000;
|
now.setHours(now.getHours() - 8);
|
||||||
break;
|
start = now.getTime() / 1000;
|
||||||
case "12":
|
break;
|
||||||
now.setHours(now.getHours() - 12);
|
case "12":
|
||||||
start = now.getTime() / 1000;
|
now.setHours(now.getHours() - 12);
|
||||||
break;
|
start = now.getTime() / 1000;
|
||||||
case "24":
|
break;
|
||||||
now.setHours(now.getHours() - 24);
|
case "24":
|
||||||
start = now.getTime() / 1000;
|
now.setHours(now.getHours() - 24);
|
||||||
break;
|
start = now.getTime() / 1000;
|
||||||
case "custom":
|
break;
|
||||||
start = latestTime - 3600;
|
case "custom":
|
||||||
break;
|
start = latestTime - 3600;
|
||||||
}
|
break;
|
||||||
|
}
|
||||||
setRange({
|
|
||||||
before: latestTime,
|
setRange({
|
||||||
after: start,
|
before: latestTime,
|
||||||
});
|
after: start,
|
||||||
},
|
});
|
||||||
[latestTime, setRange],
|
},
|
||||||
);
|
[latestTime, setRange],
|
||||||
|
);
|
||||||
return (
|
|
||||||
<div className="w-full">
|
return (
|
||||||
{isDesktop && (
|
<div className="w-full">
|
||||||
<>
|
{isDesktop && (
|
||||||
<DialogHeader>
|
<>
|
||||||
<DialogTitle>{t("menu.export", { ns: "common" })}</DialogTitle>
|
<DialogHeader>
|
||||||
</DialogHeader>
|
<DialogTitle>{t("menu.export", { ns: "common" })}</DialogTitle>
|
||||||
<SelectSeparator className="my-4 bg-secondary" />
|
</DialogHeader>
|
||||||
</>
|
<SelectSeparator className="my-4 bg-secondary" />
|
||||||
)}
|
</>
|
||||||
<RadioGroup
|
)}
|
||||||
className={`flex flex-col gap-4 ${isDesktop ? "" : "mt-4"}`}
|
<RadioGroup
|
||||||
onValueChange={(value) => onSelectTime(value as ExportOption)}
|
className={`flex flex-col gap-4 ${isDesktop ? "" : "mt-4"}`}
|
||||||
>
|
onValueChange={(value) => onSelectTime(value as ExportOption)}
|
||||||
{EXPORT_OPTIONS.map((opt) => {
|
>
|
||||||
return (
|
{EXPORT_OPTIONS.map((opt) => {
|
||||||
<div key={opt} className="flex items-center gap-2">
|
return (
|
||||||
<RadioGroupItem
|
<div key={opt} className="flex items-center gap-2">
|
||||||
className={
|
<RadioGroupItem
|
||||||
opt == selectedOption
|
className={
|
||||||
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
opt == selectedOption
|
||||||
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
||||||
}
|
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
||||||
id={opt}
|
}
|
||||||
value={opt}
|
id={opt}
|
||||||
/>
|
value={opt}
|
||||||
<Label className="cursor-pointer smart-capitalize" htmlFor={opt}>
|
/>
|
||||||
{isNaN(parseInt(opt))
|
<Label className="cursor-pointer smart-capitalize" htmlFor={opt}>
|
||||||
? opt == "timeline"
|
{isNaN(parseInt(opt))
|
||||||
? t("export.time.fromTimeline")
|
? opt == "timeline"
|
||||||
: t("export.time." + opt)
|
? t("export.time.fromTimeline")
|
||||||
: t("export.time.lastHour", {
|
: t("export.time." + opt)
|
||||||
count: parseInt(opt),
|
: t("export.time.lastHour", {
|
||||||
})}
|
count: parseInt(opt),
|
||||||
</Label>
|
})}
|
||||||
</div>
|
</Label>
|
||||||
);
|
</div>
|
||||||
})}
|
);
|
||||||
</RadioGroup>
|
})}
|
||||||
{selectedOption == "custom" && (
|
</RadioGroup>
|
||||||
<CustomTimeSelector
|
{selectedOption == "custom" && (
|
||||||
latestTime={latestTime}
|
<CustomTimeSelector
|
||||||
range={range}
|
latestTime={latestTime}
|
||||||
setRange={setRange}
|
range={range}
|
||||||
startLabel={t("export.time.start.title")}
|
setRange={setRange}
|
||||||
endLabel={t("export.time.end.title")}
|
startLabel={t("export.time.start.title")}
|
||||||
/>
|
endLabel={t("export.time.end.title")}
|
||||||
)}
|
/>
|
||||||
<Input
|
)}
|
||||||
className="text-md my-6"
|
<Input
|
||||||
type="search"
|
className="text-md my-6"
|
||||||
placeholder={t("export.name.placeholder")}
|
type="search"
|
||||||
value={name}
|
placeholder={t("export.name.placeholder")}
|
||||||
onChange={(e) => setName(e.target.value)}
|
value={name}
|
||||||
/>
|
onChange={(e) => setName(e.target.value)}
|
||||||
<div className="my-4">
|
/>
|
||||||
<Label className="text-sm text-secondary-foreground">
|
<div className="my-4">
|
||||||
{t("export.case.label", { defaultValue: "Case (optional)" })}
|
<Label className="text-sm text-secondary-foreground">
|
||||||
</Label>
|
{t("export.case.label", { defaultValue: "Case (optional)" })}
|
||||||
<Select
|
</Label>
|
||||||
value={selectedCaseId || "none"}
|
<Select
|
||||||
onValueChange={(value) =>
|
value={selectedCaseId || "none"}
|
||||||
setSelectedCaseId(value === "none" ? undefined : value)
|
onValueChange={(value) =>
|
||||||
}
|
setSelectedCaseId(value === "none" ? undefined : value)
|
||||||
>
|
}
|
||||||
<SelectTrigger className="mt-2">
|
>
|
||||||
<SelectValue
|
<SelectTrigger className="mt-2">
|
||||||
placeholder={t("export.case.placeholder", {
|
<SelectValue
|
||||||
defaultValue: "Select a case (optional)",
|
placeholder={t("export.case.placeholder", {
|
||||||
})}
|
defaultValue: "Select a case (optional)",
|
||||||
/>
|
})}
|
||||||
</SelectTrigger>
|
/>
|
||||||
<SelectContent>
|
</SelectTrigger>
|
||||||
<SelectItem
|
<SelectContent>
|
||||||
value="none"
|
<SelectItem
|
||||||
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
|
value="none"
|
||||||
>
|
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
|
||||||
{t("label.none", { ns: "common" })}
|
>
|
||||||
</SelectItem>
|
{t("label.none", { ns: "common" })}
|
||||||
{cases
|
</SelectItem>
|
||||||
?.sort((a, b) => a.name.localeCompare(b.name))
|
{cases
|
||||||
.map((caseItem) => (
|
?.sort((a, b) => a.name.localeCompare(b.name))
|
||||||
<SelectItem
|
.map((caseItem) => (
|
||||||
key={caseItem.id}
|
<SelectItem
|
||||||
value={caseItem.id}
|
key={caseItem.id}
|
||||||
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
|
value={caseItem.id}
|
||||||
>
|
className="cursor-pointer hover:bg-accent hover:text-accent-foreground"
|
||||||
{caseItem.name}
|
>
|
||||||
</SelectItem>
|
{caseItem.name}
|
||||||
))}
|
</SelectItem>
|
||||||
</SelectContent>
|
))}
|
||||||
</Select>
|
</SelectContent>
|
||||||
</div>
|
</Select>
|
||||||
{isDesktop && <SelectSeparator className="my-4 bg-secondary" />}
|
</div>
|
||||||
<DialogFooter
|
{isDesktop && <SelectSeparator className="my-4 bg-secondary" />}
|
||||||
className={isDesktop ? "" : "mt-3 flex flex-col-reverse gap-4"}
|
<DialogFooter
|
||||||
>
|
className={isDesktop ? "" : "mt-3 flex flex-col-reverse gap-4"}
|
||||||
<div
|
>
|
||||||
className={`cursor-pointer p-2 text-center ${isDesktop ? "" : "w-full"}`}
|
<div
|
||||||
onClick={onCancel}
|
className={`cursor-pointer p-2 text-center ${isDesktop ? "" : "w-full"}`}
|
||||||
>
|
onClick={onCancel}
|
||||||
{t("button.cancel", { ns: "common" })}
|
>
|
||||||
</div>
|
{t("button.cancel", { ns: "common" })}
|
||||||
<Button
|
</div>
|
||||||
className={isDesktop ? "" : "w-full"}
|
<Button
|
||||||
aria-label={t("export.selectOrExport")}
|
className={isDesktop ? "" : "w-full"}
|
||||||
variant="select"
|
aria-label={t("export.selectOrExport")}
|
||||||
size="sm"
|
variant="select"
|
||||||
onClick={() => {
|
size="sm"
|
||||||
if (selectedOption == "timeline") {
|
onClick={() => {
|
||||||
setRange({ before: currentTime + 30, after: currentTime - 30 });
|
if (selectedOption == "timeline") {
|
||||||
setMode("timeline");
|
setRange({ before: currentTime + 30, after: currentTime - 30 });
|
||||||
} else {
|
setMode("timeline");
|
||||||
onStartExport();
|
} else {
|
||||||
setSelectedOption("1");
|
onStartExport();
|
||||||
setMode("none");
|
setSelectedOption("1");
|
||||||
}
|
setMode("none");
|
||||||
}}
|
}
|
||||||
>
|
}}
|
||||||
{selectedOption == "timeline"
|
>
|
||||||
? t("export.select")
|
{selectedOption == "timeline"
|
||||||
: t("export.export")}
|
? t("export.select")
|
||||||
</Button>
|
: t("export.export")}
|
||||||
</DialogFooter>
|
</Button>
|
||||||
</div>
|
</DialogFooter>
|
||||||
);
|
</div>
|
||||||
}
|
);
|
||||||
|
}
|
||||||
type ExportPreviewDialogProps = {
|
|
||||||
camera: string;
|
type ExportPreviewDialogProps = {
|
||||||
range?: TimeRange;
|
camera: string;
|
||||||
showPreview: boolean;
|
range?: TimeRange;
|
||||||
setShowPreview: (showPreview: boolean) => void;
|
showPreview: boolean;
|
||||||
};
|
setShowPreview: (showPreview: boolean) => void;
|
||||||
|
};
|
||||||
export function ExportPreviewDialog({
|
|
||||||
camera,
|
export function ExportPreviewDialog({
|
||||||
range,
|
camera,
|
||||||
showPreview,
|
range,
|
||||||
setShowPreview,
|
showPreview,
|
||||||
}: ExportPreviewDialogProps) {
|
setShowPreview,
|
||||||
const { t } = useTranslation(["components/dialog"]);
|
}: ExportPreviewDialogProps) {
|
||||||
if (!range) {
|
const { t } = useTranslation(["components/dialog"]);
|
||||||
return null;
|
const vodPath = range
|
||||||
}
|
? `/vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`
|
||||||
|
: `/vod/${camera}/start/0/end/0/index.m3u8`;
|
||||||
const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`;
|
const playbackSource = useRecordingPlaybackSource({
|
||||||
|
camera,
|
||||||
return (
|
after: range?.after ?? 0,
|
||||||
<Dialog open={showPreview} onOpenChange={setShowPreview}>
|
before: range?.before ?? 0,
|
||||||
<DialogContent
|
vodPath,
|
||||||
className={cn(
|
enabled: !!range,
|
||||||
"scrollbar-container overflow-y-auto",
|
});
|
||||||
isDesktop &&
|
|
||||||
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
|
if (!range) {
|
||||||
isMobile && "px-4",
|
return null;
|
||||||
)}
|
}
|
||||||
>
|
|
||||||
<DialogHeader>
|
const source = playbackSource ?? `${baseUrl}${vodPath}`;
|
||||||
<DialogTitle>{t("export.fromTimeline.previewExport")}</DialogTitle>
|
|
||||||
<DialogDescription className="sr-only">
|
return (
|
||||||
{t("export.fromTimeline.previewExport")}
|
<Dialog open={showPreview} onOpenChange={setShowPreview}>
|
||||||
</DialogDescription>
|
<DialogContent
|
||||||
</DialogHeader>
|
className={cn(
|
||||||
<GenericVideoPlayer source={source} />
|
"scrollbar-container overflow-y-auto",
|
||||||
</DialogContent>
|
isDesktop &&
|
||||||
</Dialog>
|
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
|
||||||
);
|
isMobile && "px-4",
|
||||||
}
|
)}
|
||||||
|
>
|
||||||
|
<DialogHeader>
|
||||||
|
<DialogTitle>{t("export.fromTimeline.previewExport")}</DialogTitle>
|
||||||
|
<DialogDescription className="sr-only">
|
||||||
|
{t("export.fromTimeline.previewExport")}
|
||||||
|
</DialogDescription>
|
||||||
|
</DialogHeader>
|
||||||
|
<GenericVideoPlayer source={source} />
|
||||||
|
</DialogContent>
|
||||||
|
</Dialog>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,351 +1,429 @@
|
|||||||
import {
|
import {
|
||||||
ReactNode,
|
ReactNode,
|
||||||
useCallback,
|
useCallback,
|
||||||
useEffect,
|
useEffect,
|
||||||
useMemo,
|
useMemo,
|
||||||
useRef,
|
useRef,
|
||||||
useState,
|
useState,
|
||||||
} from "react";
|
} from "react";
|
||||||
import { useApiHost } from "@/api";
|
import { useApiHost } from "@/api";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { Recording } from "@/types/record";
|
import {
|
||||||
import { Preview } from "@/types/preview";
|
Recording,
|
||||||
import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
|
RecordingPlaybackPreference,
|
||||||
import { DynamicVideoController } from "./DynamicVideoController";
|
} from "@/types/record";
|
||||||
import HlsVideoPlayer, { HlsSource } from "../HlsVideoPlayer";
|
import { Preview } from "@/types/preview";
|
||||||
import { useDetailStream } from "@/context/detail-stream-context";
|
import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
|
||||||
import { TimeRange } from "@/types/timeline";
|
import { DynamicVideoController } from "./DynamicVideoController";
|
||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import HlsVideoPlayer, { HlsSource } from "../HlsVideoPlayer";
|
||||||
import { VideoResolutionType } from "@/types/live";
|
import { useDetailStream } from "@/context/detail-stream-context";
|
||||||
import axios from "axios";
|
import { TimeRange } from "@/types/timeline";
|
||||||
import { cn } from "@/lib/utils";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import { useTranslation } from "react-i18next";
|
import { VideoResolutionType } from "@/types/live";
|
||||||
import {
|
import axios from "axios";
|
||||||
calculateInpointOffset,
|
import { cn } from "@/lib/utils";
|
||||||
calculateSeekPosition,
|
import { useTranslation } from "react-i18next";
|
||||||
} from "@/utils/videoUtil";
|
import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||||
import { isFirefox } from "react-device-detect";
|
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||||
|
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
|
||||||
/**
|
import {
|
||||||
* Dynamically switches between video playback and scrubbing preview player.
|
calculateInpointOffset,
|
||||||
*/
|
calculateSeekPosition,
|
||||||
type DynamicVideoPlayerProps = {
|
} from "@/utils/videoUtil";
|
||||||
className?: string;
|
import { isFirefox } from "react-device-detect";
|
||||||
camera: string;
|
import {
|
||||||
timeRange: TimeRange;
|
Select,
|
||||||
cameraPreviews: Preview[];
|
SelectContent,
|
||||||
startTimestamp?: number;
|
SelectItem,
|
||||||
isScrubbing: boolean;
|
SelectTrigger,
|
||||||
hotKeys: boolean;
|
SelectValue,
|
||||||
supportsFullscreen: boolean;
|
} from "@/components/ui/select";
|
||||||
fullscreen: boolean;
|
|
||||||
onControllerReady: (controller: DynamicVideoController) => void;
|
/**
|
||||||
onTimestampUpdate?: (timestamp: number) => void;
|
* Dynamically switches between video playback and scrubbing preview player.
|
||||||
onClipEnded?: () => void;
|
*/
|
||||||
onSeekToTime?: (timestamp: number, play?: boolean) => void;
|
type DynamicVideoPlayerProps = {
|
||||||
setFullResolution: React.Dispatch<React.SetStateAction<VideoResolutionType>>;
|
className?: string;
|
||||||
toggleFullscreen: () => void;
|
camera: string;
|
||||||
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
timeRange: TimeRange;
|
||||||
transformedOverlay?: ReactNode;
|
cameraPreviews: Preview[];
|
||||||
};
|
startTimestamp?: number;
|
||||||
export default function DynamicVideoPlayer({
|
isScrubbing: boolean;
|
||||||
className,
|
hotKeys: boolean;
|
||||||
camera,
|
supportsFullscreen: boolean;
|
||||||
timeRange,
|
fullscreen: boolean;
|
||||||
cameraPreviews,
|
onControllerReady: (controller: DynamicVideoController) => void;
|
||||||
startTimestamp,
|
onTimestampUpdate?: (timestamp: number) => void;
|
||||||
isScrubbing,
|
onClipEnded?: () => void;
|
||||||
hotKeys,
|
onSeekToTime?: (timestamp: number, play?: boolean) => void;
|
||||||
supportsFullscreen,
|
setFullResolution: React.Dispatch<React.SetStateAction<VideoResolutionType>>;
|
||||||
fullscreen,
|
toggleFullscreen: () => void;
|
||||||
onControllerReady,
|
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
||||||
onTimestampUpdate,
|
transformedOverlay?: ReactNode;
|
||||||
onClipEnded,
|
};
|
||||||
onSeekToTime,
|
export default function DynamicVideoPlayer({
|
||||||
setFullResolution,
|
className,
|
||||||
toggleFullscreen,
|
camera,
|
||||||
containerRef,
|
timeRange,
|
||||||
transformedOverlay,
|
cameraPreviews,
|
||||||
}: DynamicVideoPlayerProps) {
|
startTimestamp,
|
||||||
const { t } = useTranslation(["components/player"]);
|
isScrubbing,
|
||||||
const apiHost = useApiHost();
|
hotKeys,
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
supportsFullscreen,
|
||||||
|
fullscreen,
|
||||||
// for detail stream context in History
|
onControllerReady,
|
||||||
const {
|
onTimestampUpdate,
|
||||||
isDetailMode,
|
onClipEnded,
|
||||||
camera: contextCamera,
|
onSeekToTime,
|
||||||
currentTime,
|
setFullResolution,
|
||||||
} = useDetailStream();
|
toggleFullscreen,
|
||||||
|
containerRef,
|
||||||
// controlling playback
|
transformedOverlay,
|
||||||
|
}: DynamicVideoPlayerProps) {
|
||||||
const playerRef = useRef<HTMLVideoElement | null>(null);
|
const { t } = useTranslation(["components/player"]);
|
||||||
const [previewController, setPreviewController] =
|
const apiHost = useApiHost();
|
||||||
useState<PreviewController | null>(null);
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
const [noRecording, setNoRecording] = useState(false);
|
|
||||||
const controller = useMemo(() => {
|
// for detail stream context in History
|
||||||
if (!config || !playerRef.current || !previewController) {
|
const {
|
||||||
return undefined;
|
isDetailMode,
|
||||||
}
|
camera: contextCamera,
|
||||||
|
currentTime,
|
||||||
return new DynamicVideoController(
|
} = useDetailStream();
|
||||||
camera,
|
|
||||||
playerRef.current,
|
// controlling playback
|
||||||
previewController,
|
|
||||||
(config.cameras[camera]?.detect?.annotation_offset || 0) / 1000,
|
const playerRef = useRef<HTMLVideoElement | null>(null);
|
||||||
isScrubbing ? "scrubbing" : "playback",
|
const [previewController, setPreviewController] =
|
||||||
setNoRecording,
|
useState<PreviewController | null>(null);
|
||||||
() => {},
|
const [noRecording, setNoRecording] = useState(false);
|
||||||
);
|
const controller = useMemo(() => {
|
||||||
// we only want to fire once when players are ready
|
if (!config || !playerRef.current || !previewController) {
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
return undefined;
|
||||||
}, [camera, config, playerRef.current, previewController]);
|
}
|
||||||
|
|
||||||
useEffect(() => {
|
return new DynamicVideoController(
|
||||||
if (!controller) {
|
camera,
|
||||||
return;
|
playerRef.current,
|
||||||
}
|
previewController,
|
||||||
|
(config.cameras[camera]?.detect?.annotation_offset || 0) / 1000,
|
||||||
if (controller) {
|
isScrubbing ? "scrubbing" : "playback",
|
||||||
onControllerReady(controller);
|
setNoRecording,
|
||||||
}
|
() => {},
|
||||||
|
);
|
||||||
// we only want to fire once when players are ready
|
// we only want to fire once when players are ready
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, [controller]);
|
}, [camera, config, playerRef.current, previewController]);
|
||||||
|
|
||||||
// initial state
|
useEffect(() => {
|
||||||
|
if (!controller) {
|
||||||
const [isLoading, setIsLoading] = useState(false);
|
return;
|
||||||
const [isBuffering, setIsBuffering] = useState(false);
|
}
|
||||||
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
|
|
||||||
|
if (controller) {
|
||||||
// Don't set source until recordings load - we need accurate startPosition
|
onControllerReady(controller);
|
||||||
// to avoid hls.js clamping to video end when startPosition exceeds duration
|
}
|
||||||
const [source, setSource] = useState<HlsSource | undefined>(undefined);
|
|
||||||
|
// we only want to fire once when players are ready
|
||||||
// start at correct time
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [controller]);
|
||||||
useEffect(() => {
|
|
||||||
if (!isScrubbing) {
|
// initial state
|
||||||
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
|
|
||||||
}
|
const [isLoading, setIsLoading] = useState(false);
|
||||||
|
const [isBuffering, setIsBuffering] = useState(false);
|
||||||
return () => {
|
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
|
||||||
if (loadingTimeout) {
|
const [playbackPreference, setPlaybackPreference] =
|
||||||
clearTimeout(loadingTimeout);
|
useUserPersistence<RecordingPlaybackPreference>(
|
||||||
}
|
`${camera}-recording-playback-v2`,
|
||||||
};
|
"sub",
|
||||||
// we only want trigger when scrubbing state changes
|
);
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
|
||||||
}, [camera, isScrubbing]);
|
// Don't set source until recordings load - we need accurate startPosition
|
||||||
|
// to avoid hls.js clamping to video end when startPosition exceeds duration
|
||||||
const onPlayerLoaded = useCallback(() => {
|
const [source, setSource] = useState<HlsSource | undefined>(undefined);
|
||||||
if (!controller || !startTimestamp) {
|
|
||||||
return;
|
// start at correct time
|
||||||
}
|
|
||||||
|
useEffect(() => {
|
||||||
controller.seekToTimestamp(startTimestamp, true);
|
if (!isScrubbing) {
|
||||||
}, [startTimestamp, controller]);
|
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
|
||||||
|
}
|
||||||
const onTimeUpdate = useCallback(
|
|
||||||
(time: number) => {
|
return () => {
|
||||||
if (isScrubbing || !controller || !onTimestampUpdate || time == 0) {
|
if (loadingTimeout) {
|
||||||
return;
|
clearTimeout(loadingTimeout);
|
||||||
}
|
}
|
||||||
|
};
|
||||||
if (isLoading) {
|
// we only want trigger when scrubbing state changes
|
||||||
setIsLoading(false);
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}
|
}, [camera, isScrubbing]);
|
||||||
|
|
||||||
if (isBuffering) {
|
const onPlayerLoaded = useCallback(() => {
|
||||||
setIsBuffering(false);
|
if (!controller || !startTimestamp) {
|
||||||
}
|
return;
|
||||||
|
}
|
||||||
onTimestampUpdate(controller.getProgress(time));
|
|
||||||
},
|
controller.seekToTimestamp(startTimestamp, true);
|
||||||
[controller, onTimestampUpdate, isBuffering, isLoading, isScrubbing],
|
}, [startTimestamp, controller]);
|
||||||
);
|
|
||||||
|
const onTimeUpdate = useCallback(
|
||||||
const onUploadFrameToPlus = useCallback(
|
(time: number) => {
|
||||||
(playTime: number) => {
|
if (isScrubbing || !controller || !onTimestampUpdate || time == 0) {
|
||||||
if (!controller) {
|
return;
|
||||||
return;
|
}
|
||||||
}
|
|
||||||
|
if (isLoading) {
|
||||||
const time = controller.getProgress(playTime);
|
setIsLoading(false);
|
||||||
return axios.post(`/${camera}/plus/${time}`);
|
}
|
||||||
},
|
|
||||||
[camera, controller],
|
if (isBuffering) {
|
||||||
);
|
setIsBuffering(false);
|
||||||
|
}
|
||||||
// state of playback player
|
|
||||||
|
onTimestampUpdate(controller.getProgress(time));
|
||||||
const recordingParams = useMemo(
|
},
|
||||||
() => ({
|
[controller, onTimestampUpdate, isBuffering, isLoading, isScrubbing],
|
||||||
before: timeRange.before,
|
);
|
||||||
after: timeRange.after,
|
|
||||||
}),
|
const onUploadFrameToPlus = useCallback(
|
||||||
[timeRange],
|
(playTime: number) => {
|
||||||
);
|
if (!controller) {
|
||||||
const { data: recordings } = useSWR<Recording[]>(
|
return;
|
||||||
[`${camera}/recordings`, recordingParams],
|
}
|
||||||
{ revalidateOnFocus: false },
|
|
||||||
);
|
const time = controller.getProgress(playTime);
|
||||||
|
return axios.post(`/${camera}/plus/${time}`);
|
||||||
useEffect(() => {
|
},
|
||||||
if (!recordings?.length) {
|
[camera, controller],
|
||||||
if (recordings?.length == 0) {
|
);
|
||||||
setNoRecording(true);
|
|
||||||
}
|
// state of playback player
|
||||||
|
|
||||||
return;
|
const recordingParams = useMemo(
|
||||||
}
|
() => ({
|
||||||
|
before: timeRange.before,
|
||||||
let startPosition = undefined;
|
after: timeRange.after,
|
||||||
|
}),
|
||||||
if (startTimestamp) {
|
[timeRange],
|
||||||
const inpointOffset = calculateInpointOffset(
|
);
|
||||||
recordingParams.after,
|
const { data: allRecordings } = useSWR<Recording[]>(
|
||||||
(recordings || [])[0],
|
[`${camera}/recordings`, { ...recordingParams, variant: "all" }],
|
||||||
);
|
{ revalidateOnFocus: false },
|
||||||
|
);
|
||||||
startPosition = calculateSeekPosition(
|
const recordings = useMemo(() => {
|
||||||
startTimestamp,
|
if (!allRecordings?.length) {
|
||||||
recordings,
|
return allRecordings;
|
||||||
inpointOffset,
|
}
|
||||||
);
|
|
||||||
}
|
const mainRecordings = allRecordings.filter(
|
||||||
|
(recording) => (recording.variant || "main") === "main",
|
||||||
setSource({
|
);
|
||||||
playlist: `${apiHost}vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`,
|
|
||||||
startPosition,
|
return mainRecordings.length > 0 ? mainRecordings : allRecordings;
|
||||||
});
|
}, [allRecordings]);
|
||||||
|
const codecNames = useMemo(
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
() =>
|
||||||
}, [recordings]);
|
Array.from(
|
||||||
|
new Set((allRecordings ?? []).map((recording) => recording.codec_name)),
|
||||||
useEffect(() => {
|
),
|
||||||
if (!controller || !recordings?.length) {
|
[allRecordings],
|
||||||
return;
|
);
|
||||||
}
|
const playbackCapabilities = usePlaybackCapabilities(codecNames);
|
||||||
|
|
||||||
if (playerRef.current) {
|
useEffect(() => {
|
||||||
playerRef.current.autoplay = !isScrubbing;
|
if (!recordings?.length) {
|
||||||
}
|
if (recordings?.length == 0) {
|
||||||
|
setNoRecording(true);
|
||||||
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
|
}
|
||||||
|
|
||||||
controller.newPlayback({
|
return;
|
||||||
recordings: recordings ?? [],
|
}
|
||||||
timeRange,
|
|
||||||
});
|
let startPosition = undefined;
|
||||||
|
|
||||||
// we only want this to change when controller or recordings update
|
if (startTimestamp) {
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
const inpointOffset = calculateInpointOffset(
|
||||||
}, [controller, recordings]);
|
recordingParams.after,
|
||||||
|
(recordings || [])[0],
|
||||||
const inpointOffset = useMemo(
|
);
|
||||||
() => calculateInpointOffset(recordingParams.after, (recordings || [])[0]),
|
|
||||||
[recordingParams, recordings],
|
startPosition = calculateSeekPosition(
|
||||||
);
|
startTimestamp,
|
||||||
|
recordings,
|
||||||
const onValidateClipEnd = useCallback(
|
inpointOffset,
|
||||||
(currentTime: number) => {
|
);
|
||||||
if (!onClipEnded || !controller || !recordings) {
|
}
|
||||||
return;
|
|
||||||
}
|
const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
|
||||||
|
const decision = chooseRecordingPlayback({
|
||||||
if (!isFirefox) {
|
apiHost,
|
||||||
onClipEnded();
|
config,
|
||||||
}
|
recordings: allRecordings ?? recordings,
|
||||||
|
preference: playbackPreference ?? "sub",
|
||||||
// Firefox has a bug where clipEnded can be called prematurely due to buffering
|
vodPath,
|
||||||
// we need to validate if the current play-point is truly at the end of available recordings
|
capabilities: playbackCapabilities,
|
||||||
|
});
|
||||||
const lastRecordingTime = recordings.at(-1)?.start_time;
|
setSource({
|
||||||
|
playlist: decision.url,
|
||||||
if (
|
startPosition,
|
||||||
!lastRecordingTime ||
|
});
|
||||||
controller.getProgress(currentTime) < lastRecordingTime
|
|
||||||
) {
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
return;
|
}, [
|
||||||
}
|
apiHost,
|
||||||
|
camera,
|
||||||
onClipEnded();
|
recordingParams.after,
|
||||||
},
|
recordingParams.before,
|
||||||
[onClipEnded, controller, recordings],
|
allRecordings,
|
||||||
);
|
recordings,
|
||||||
|
startTimestamp,
|
||||||
return (
|
playbackPreference,
|
||||||
<>
|
playbackCapabilities,
|
||||||
{source && (
|
config?.transcode_proxy?.enabled,
|
||||||
<HlsVideoPlayer
|
config?.transcode_proxy?.vod_proxy_url,
|
||||||
videoRef={playerRef}
|
]);
|
||||||
containerRef={containerRef}
|
|
||||||
visible={!(isScrubbing || isLoading)}
|
useEffect(() => {
|
||||||
currentSource={source}
|
if (!controller || !recordings?.length) {
|
||||||
hotKeys={hotKeys}
|
return;
|
||||||
supportsFullscreen={supportsFullscreen}
|
}
|
||||||
fullscreen={fullscreen}
|
|
||||||
inpointOffset={inpointOffset}
|
if (playerRef.current) {
|
||||||
onTimeUpdate={onTimeUpdate}
|
playerRef.current.autoplay = !isScrubbing;
|
||||||
onPlayerLoaded={onPlayerLoaded}
|
}
|
||||||
onClipEnded={onValidateClipEnd}
|
|
||||||
onSeekToTime={(timestamp, play) => {
|
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
|
||||||
if (onSeekToTime) {
|
|
||||||
onSeekToTime(timestamp, play);
|
controller.newPlayback({
|
||||||
}
|
recordings: recordings ?? [],
|
||||||
}}
|
timeRange,
|
||||||
onPlaying={() => {
|
});
|
||||||
if (isScrubbing) {
|
|
||||||
playerRef.current?.pause();
|
// we only want this to change when controller or recordings update
|
||||||
}
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [controller, recordings]);
|
||||||
if (loadingTimeout) {
|
|
||||||
clearTimeout(loadingTimeout);
|
const inpointOffset = useMemo(
|
||||||
}
|
() => calculateInpointOffset(recordingParams.after, (recordings || [])[0]),
|
||||||
|
[recordingParams, recordings],
|
||||||
setNoRecording(false);
|
);
|
||||||
}}
|
|
||||||
setFullResolution={setFullResolution}
|
const onValidateClipEnd = useCallback(
|
||||||
onUploadFrame={onUploadFrameToPlus}
|
(currentTime: number) => {
|
||||||
toggleFullscreen={toggleFullscreen}
|
if (!onClipEnded || !controller || !recordings) {
|
||||||
onError={(error) => {
|
return;
|
||||||
if (error == "stalled" && !isScrubbing) {
|
}
|
||||||
setIsBuffering(true);
|
|
||||||
}
|
if (!isFirefox) {
|
||||||
}}
|
onClipEnded();
|
||||||
isDetailMode={isDetailMode}
|
}
|
||||||
camera={contextCamera || camera}
|
|
||||||
currentTimeOverride={currentTime}
|
// Firefox has a bug where clipEnded can be called prematurely due to buffering
|
||||||
transformedOverlay={transformedOverlay}
|
// we need to validate if the current play-point is truly at the end of available recordings
|
||||||
/>
|
|
||||||
)}
|
const lastRecordingTime = recordings.at(-1)?.start_time;
|
||||||
<PreviewPlayer
|
|
||||||
className={cn(
|
if (
|
||||||
className,
|
!lastRecordingTime ||
|
||||||
isScrubbing || isLoading ? "visible" : "hidden",
|
controller.getProgress(currentTime) < lastRecordingTime
|
||||||
)}
|
) {
|
||||||
camera={camera}
|
return;
|
||||||
timeRange={timeRange}
|
}
|
||||||
cameraPreviews={cameraPreviews}
|
|
||||||
startTime={startTimestamp}
|
onClipEnded();
|
||||||
isScrubbing={isScrubbing}
|
},
|
||||||
onControllerReady={(previewController) =>
|
[onClipEnded, controller, recordings],
|
||||||
setPreviewController(previewController)
|
);
|
||||||
}
|
|
||||||
/>
|
return (
|
||||||
{!isScrubbing && (isLoading || isBuffering) && !noRecording && (
|
<>
|
||||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
{source && (
|
||||||
)}
|
<HlsVideoPlayer
|
||||||
{!isScrubbing && !isLoading && noRecording && (
|
videoRef={playerRef}
|
||||||
<div className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2">
|
containerRef={containerRef}
|
||||||
{t("noRecordingsFoundForThisTime")}
|
visible={!(isScrubbing || isLoading)}
|
||||||
</div>
|
currentSource={source}
|
||||||
)}
|
hotKeys={hotKeys}
|
||||||
</>
|
supportsFullscreen={supportsFullscreen}
|
||||||
);
|
fullscreen={fullscreen}
|
||||||
}
|
inpointOffset={inpointOffset}
|
||||||
|
onTimeUpdate={onTimeUpdate}
|
||||||
|
onPlayerLoaded={onPlayerLoaded}
|
||||||
|
onClipEnded={onValidateClipEnd}
|
||||||
|
onSeekToTime={(timestamp, play) => {
|
||||||
|
if (onSeekToTime) {
|
||||||
|
onSeekToTime(timestamp, play);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
onPlaying={() => {
|
||||||
|
if (isScrubbing) {
|
||||||
|
playerRef.current?.pause();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (loadingTimeout) {
|
||||||
|
clearTimeout(loadingTimeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
setNoRecording(false);
|
||||||
|
}}
|
||||||
|
setFullResolution={setFullResolution}
|
||||||
|
onUploadFrame={onUploadFrameToPlus}
|
||||||
|
toggleFullscreen={toggleFullscreen}
|
||||||
|
onError={(error) => {
|
||||||
|
if (error == "stalled" && !isScrubbing) {
|
||||||
|
setIsBuffering(true);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
isDetailMode={isDetailMode}
|
||||||
|
camera={contextCamera || camera}
|
||||||
|
currentTimeOverride={currentTime}
|
||||||
|
transformedOverlay={transformedOverlay}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{!isScrubbing && source && (
|
||||||
|
<div className="absolute right-3 top-3 z-50">
|
||||||
|
<Select
|
||||||
|
value={playbackPreference ?? "sub"}
|
||||||
|
onValueChange={(value) =>
|
||||||
|
setPlaybackPreference(value as RecordingPlaybackPreference)
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<SelectTrigger className="h-8 w-32 bg-background/90 text-xs backdrop-blur">
|
||||||
|
<SelectValue />
|
||||||
|
</SelectTrigger>
|
||||||
|
<SelectContent>
|
||||||
|
<SelectItem value="auto">Auto</SelectItem>
|
||||||
|
<SelectItem value="main">Main</SelectItem>
|
||||||
|
<SelectItem value="sub">Sub</SelectItem>
|
||||||
|
<SelectItem value="transcoded">Transcoded</SelectItem>
|
||||||
|
</SelectContent>
|
||||||
|
</Select>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
<PreviewPlayer
|
||||||
|
className={cn(
|
||||||
|
className,
|
||||||
|
isScrubbing || isLoading ? "visible" : "hidden",
|
||||||
|
)}
|
||||||
|
camera={camera}
|
||||||
|
timeRange={timeRange}
|
||||||
|
cameraPreviews={cameraPreviews}
|
||||||
|
startTime={startTimestamp}
|
||||||
|
isScrubbing={isScrubbing}
|
||||||
|
onControllerReady={(previewController) =>
|
||||||
|
setPreviewController(previewController)
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
{!isScrubbing && (isLoading || isBuffering) && !noRecording && (
|
||||||
|
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||||
|
)}
|
||||||
|
{!isScrubbing && !isLoading && noRecording && (
|
||||||
|
<div className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2">
|
||||||
|
{t("noRecordingsFoundForThisTime")}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|||||||
77
web/src/hooks/use-playback-capabilities.ts
Normal file
77
web/src/hooks/use-playback-capabilities.ts
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
import { useMemo } from "react";
|
||||||
|
import {
|
||||||
|
getCodecMimeTypes,
|
||||||
|
normalizeCodecName,
|
||||||
|
PlaybackCapabilities,
|
||||||
|
} from "@/utils/recordingPlayback";
|
||||||
|
|
||||||
|
type NavigatorConnection = {
|
||||||
|
downlink?: number;
|
||||||
|
effectiveType?: string;
|
||||||
|
rtt?: number;
|
||||||
|
saveData?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
declare global {
|
||||||
|
interface Navigator {
|
||||||
|
connection?: NavigatorConnection;
|
||||||
|
mozConnection?: NavigatorConnection;
|
||||||
|
webkitConnection?: NavigatorConnection;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Window {
|
||||||
|
ManagedMediaSource?: typeof MediaSource;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function canPlayMimeType(mimeType?: string): boolean {
|
||||||
|
if (!mimeType || typeof window === "undefined") {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (window.ManagedMediaSource?.isTypeSupported(mimeType)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (window.MediaSource?.isTypeSupported(mimeType)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const video = document.createElement("video");
|
||||||
|
return video.canPlayType(mimeType) !== "";
|
||||||
|
}
|
||||||
|
|
||||||
|
function canPlayAnyMimeType(mimeTypes: string[]): boolean {
|
||||||
|
return mimeTypes.some((mimeType) => canPlayMimeType(mimeType));
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function usePlaybackCapabilities(codecNames: Array<string | null | undefined>) {
|
||||||
|
return useMemo<PlaybackCapabilities>(() => {
|
||||||
|
if (typeof window === "undefined") {
|
||||||
|
return { estimatedBandwidthBps: undefined, saveData: false, supports: {} };
|
||||||
|
}
|
||||||
|
|
||||||
|
const connection =
|
||||||
|
navigator.connection ?? navigator.mozConnection ?? navigator.webkitConnection;
|
||||||
|
const supports: Record<string, boolean> = {};
|
||||||
|
|
||||||
|
codecNames.forEach((codecName) => {
|
||||||
|
const normalized = normalizeCodecName(codecName);
|
||||||
|
if (!normalized || normalized in supports) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
supports[normalized] = canPlayAnyMimeType(getCodecMimeTypes(normalized));
|
||||||
|
});
|
||||||
|
|
||||||
|
const downlinkMbps = connection?.downlink;
|
||||||
|
return {
|
||||||
|
estimatedBandwidthBps:
|
||||||
|
typeof downlinkMbps === "number" && downlinkMbps > 0
|
||||||
|
? downlinkMbps * 1_000_000
|
||||||
|
: undefined,
|
||||||
|
saveData: connection?.saveData === true,
|
||||||
|
supports,
|
||||||
|
};
|
||||||
|
}, [codecNames]);
|
||||||
|
}
|
||||||
72
web/src/hooks/use-recording-playback-source.ts
Normal file
72
web/src/hooks/use-recording-playback-source.ts
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
import { useApiHost } from "@/api";
|
||||||
|
import useSWR from "swr";
|
||||||
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
|
import {
|
||||||
|
Recording,
|
||||||
|
RecordingPlaybackPreference,
|
||||||
|
} from "@/types/record";
|
||||||
|
import { useMemo } from "react";
|
||||||
|
import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||||
|
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||||
|
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
|
||||||
|
|
||||||
|
type RecordingPlaybackSourceOptions = {
|
||||||
|
camera: string;
|
||||||
|
after: number;
|
||||||
|
before: number;
|
||||||
|
vodPath: string;
|
||||||
|
preference?: RecordingPlaybackPreference;
|
||||||
|
enabled?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default function useRecordingPlaybackSource({
|
||||||
|
camera,
|
||||||
|
after,
|
||||||
|
before,
|
||||||
|
vodPath,
|
||||||
|
preference,
|
||||||
|
enabled = true,
|
||||||
|
}: RecordingPlaybackSourceOptions) {
|
||||||
|
const apiHost = useApiHost();
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
const [storedPreference] = useUserPersistence<RecordingPlaybackPreference>(
|
||||||
|
`${camera}-recording-playback-v2`,
|
||||||
|
"sub",
|
||||||
|
);
|
||||||
|
const { data: recordings } = useSWR<Recording[]>(
|
||||||
|
enabled ? [`${camera}/recordings`, { after, before, variant: "all" }] : null,
|
||||||
|
{ revalidateOnFocus: false },
|
||||||
|
);
|
||||||
|
|
||||||
|
const codecNames = useMemo(
|
||||||
|
() =>
|
||||||
|
Array.from(
|
||||||
|
new Set((recordings ?? []).map((recording) => recording.codec_name)),
|
||||||
|
),
|
||||||
|
[recordings],
|
||||||
|
);
|
||||||
|
const capabilities = usePlaybackCapabilities(codecNames);
|
||||||
|
|
||||||
|
return useMemo(() => {
|
||||||
|
if (!recordings?.length) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return chooseRecordingPlayback({
|
||||||
|
apiHost,
|
||||||
|
config,
|
||||||
|
recordings,
|
||||||
|
preference: preference ?? storedPreference ?? "sub",
|
||||||
|
vodPath,
|
||||||
|
capabilities,
|
||||||
|
}).url;
|
||||||
|
}, [
|
||||||
|
apiHost,
|
||||||
|
capabilities,
|
||||||
|
config,
|
||||||
|
preference,
|
||||||
|
recordings,
|
||||||
|
storedPreference,
|
||||||
|
vodPath,
|
||||||
|
]);
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,49 +1,60 @@
|
|||||||
import { ReviewSeverity } from "./review";
|
import { ReviewSeverity } from "./review";
|
||||||
import { TimelineType } from "./timeline";
|
import { TimelineType } from "./timeline";
|
||||||
|
|
||||||
export type Recording = {
|
export type Recording = {
|
||||||
id: string;
|
id: string;
|
||||||
camera: string;
|
camera: string;
|
||||||
start_time: number;
|
start_time: number;
|
||||||
end_time: number;
|
end_time: number;
|
||||||
path: string;
|
path: string;
|
||||||
segment_size: number;
|
variant?: string;
|
||||||
duration: number;
|
segment_size: number;
|
||||||
motion: number;
|
duration: number;
|
||||||
objects: number;
|
motion: number;
|
||||||
motion_heatmap?: Record<string, number> | null;
|
objects: number;
|
||||||
dBFS: number;
|
motion_heatmap?: Record<string, number> | null;
|
||||||
};
|
dBFS: number;
|
||||||
|
codec_name?: string | null;
|
||||||
export type RecordingSegment = {
|
width?: number | null;
|
||||||
id: string;
|
height?: number | null;
|
||||||
start_time: number;
|
bitrate?: number | null;
|
||||||
end_time: number;
|
};
|
||||||
motion: number;
|
|
||||||
objects: number;
|
export type RecordingSegment = {
|
||||||
segment_size: number;
|
id: string;
|
||||||
duration: number;
|
start_time: number;
|
||||||
};
|
end_time: number;
|
||||||
|
motion: number;
|
||||||
export type RecordingActivity = {
|
objects: number;
|
||||||
[hour: number]: RecordingSegmentActivity[];
|
segment_size: number;
|
||||||
};
|
duration: number;
|
||||||
|
};
|
||||||
type RecordingSegmentActivity = {
|
|
||||||
date: number;
|
export type RecordingActivity = {
|
||||||
count: number;
|
[hour: number]: RecordingSegmentActivity[];
|
||||||
hasObjects: boolean;
|
};
|
||||||
};
|
|
||||||
|
type RecordingSegmentActivity = {
|
||||||
export type RecordingStartingPoint = {
|
date: number;
|
||||||
camera: string;
|
count: number;
|
||||||
startTime: number;
|
hasObjects: boolean;
|
||||||
severity: ReviewSeverity;
|
};
|
||||||
timelineType?: TimelineType;
|
|
||||||
};
|
export type RecordingStartingPoint = {
|
||||||
|
camera: string;
|
||||||
export type RecordingPlayerError = "stalled" | "startup";
|
startTime: number;
|
||||||
|
severity: ReviewSeverity;
|
||||||
export const ASPECT_VERTICAL_LAYOUT = 1.5;
|
timelineType?: TimelineType;
|
||||||
export const ASPECT_PORTRAIT_LAYOUT = 1.333;
|
};
|
||||||
export const ASPECT_WIDE_LAYOUT = 2;
|
|
||||||
|
export type RecordingPlayerError = "stalled" | "startup";
|
||||||
|
|
||||||
|
export type RecordingPlaybackPreference =
|
||||||
|
| "auto"
|
||||||
|
| "main"
|
||||||
|
| "sub"
|
||||||
|
| "transcoded";
|
||||||
|
|
||||||
|
export const ASPECT_VERTICAL_LAYOUT = 1.5;
|
||||||
|
export const ASPECT_PORTRAIT_LAYOUT = 1.333;
|
||||||
|
export const ASPECT_WIDE_LAYOUT = 2;
|
||||||
|
|||||||
44
web/src/utils/liveStreamSelection.ts
Normal file
44
web/src/utils/liveStreamSelection.ts
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
const LOW_BANDWIDTH_PATTERN = /\b(sub|low|mobile|small|sd|lowres|low-res)\b/i;
|
||||||
|
const HIGH_BANDWIDTH_PATTERN = /\b(main|high|hd|full|primary)\b/i;
|
||||||
|
|
||||||
|
function rankStreamLabel(label: string, preferLowBandwidth: boolean): number {
|
||||||
|
if (preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function chooseAutoLiveStream(
|
||||||
|
streams: Record<string, string>,
|
||||||
|
estimatedBandwidthBps?: number,
|
||||||
|
saveData = false,
|
||||||
|
): string {
|
||||||
|
const entries = Object.entries(streams || {});
|
||||||
|
if (entries.length === 0) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
const preferLowBandwidth =
|
||||||
|
saveData || !!(estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000);
|
||||||
|
|
||||||
|
return [...entries]
|
||||||
|
.sort(([leftLabel], [rightLabel]) => {
|
||||||
|
return (
|
||||||
|
rankStreamLabel(rightLabel, preferLowBandwidth) -
|
||||||
|
rankStreamLabel(leftLabel, preferLowBandwidth)
|
||||||
|
);
|
||||||
|
})[0][1];
|
||||||
|
}
|
||||||
324
web/src/utils/recordingPlayback.ts
Normal file
324
web/src/utils/recordingPlayback.ts
Normal file
@ -0,0 +1,324 @@
|
|||||||
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
|
import {
|
||||||
|
Recording,
|
||||||
|
RecordingPlaybackPreference,
|
||||||
|
} from "@/types/record";
|
||||||
|
|
||||||
|
export type PlaybackCapabilities = {
|
||||||
|
estimatedBandwidthBps?: number;
|
||||||
|
saveData: boolean;
|
||||||
|
supports: Record<string, boolean>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type RecordingPlaybackDecision = {
|
||||||
|
mode: "direct" | "transcoded";
|
||||||
|
variant: string;
|
||||||
|
url: string;
|
||||||
|
reason: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type DecisionOptions = {
|
||||||
|
apiHost: string;
|
||||||
|
config?: FrigateConfig;
|
||||||
|
recordings: Recording[];
|
||||||
|
preference: RecordingPlaybackPreference;
|
||||||
|
vodPath: string;
|
||||||
|
capabilities: PlaybackCapabilities;
|
||||||
|
};
|
||||||
|
|
||||||
|
const CODEC_SAMPLES: Record<string, string[]> = {
|
||||||
|
h264: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
|
||||||
|
avc1: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
|
||||||
|
hevc: [
|
||||||
|
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||||
|
],
|
||||||
|
h265: [
|
||||||
|
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||||
|
],
|
||||||
|
hev1: [
|
||||||
|
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||||
|
],
|
||||||
|
hvc1: [
|
||||||
|
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||||
|
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||||
|
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||||
|
],
|
||||||
|
av1: ['video/mp4; codecs="av01.0.05M.08"'],
|
||||||
|
av01: ['video/mp4; codecs="av01.0.05M.08"'],
|
||||||
|
vp9: ['video/mp4; codecs="vp09.00.10.08"'],
|
||||||
|
vp09: ['video/mp4; codecs="vp09.00.10.08"'],
|
||||||
|
};
|
||||||
|
|
||||||
|
function trimTrailingSlash(value: string): string {
|
||||||
|
return value.replace(/\/$/, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
function appendQuery(url: string, params: Record<string, string | undefined>): string {
|
||||||
|
const entries = Object.entries(params).filter(([, value]) => value);
|
||||||
|
if (entries.length === 0) {
|
||||||
|
return url;
|
||||||
|
}
|
||||||
|
|
||||||
|
const search = new URLSearchParams(entries as [string, string][]);
|
||||||
|
return `${url}${url.includes("?") ? "&" : "?"}${search.toString()}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function average(values: number[]): number | undefined {
|
||||||
|
if (!values.length) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return values.reduce((sum, value) => sum + value, 0) / values.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function normalizeCodecName(codecName?: string | null): string | undefined {
|
||||||
|
return codecName?.toLowerCase().trim() || undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getCodecMimeTypes(codecName?: string | null): string[] {
|
||||||
|
const normalized = normalizeCodecName(codecName);
|
||||||
|
if (!normalized) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
return CODEC_SAMPLES[normalized] ?? [];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function estimateRecordingBitrate(recordings: Recording[]): number | undefined {
|
||||||
|
const explicit = recordings
|
||||||
|
.map((recording) => recording.bitrate)
|
||||||
|
.filter((value): value is number => typeof value === "number" && value > 0);
|
||||||
|
|
||||||
|
if (explicit.length > 0) {
|
||||||
|
return average(explicit);
|
||||||
|
}
|
||||||
|
|
||||||
|
const derived = recordings
|
||||||
|
.map((recording) => {
|
||||||
|
if (!recording.segment_size || !recording.duration) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (recording.segment_size * 1024 * 1024 * 8) / recording.duration;
|
||||||
|
})
|
||||||
|
.filter((value): value is number => typeof value === "number" && value > 0);
|
||||||
|
|
||||||
|
return average(derived);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function groupRecordingsByVariant(
|
||||||
|
recordings: Recording[],
|
||||||
|
): Record<string, Recording[]> {
|
||||||
|
return recordings.reduce<Record<string, Recording[]>>((acc, recording) => {
|
||||||
|
const variant = recording.variant || "main";
|
||||||
|
if (!acc[variant]) {
|
||||||
|
acc[variant] = [];
|
||||||
|
}
|
||||||
|
acc[variant].push(recording);
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
function canDirectPlayVariant(
|
||||||
|
capabilities: PlaybackCapabilities,
|
||||||
|
recordings: Recording[],
|
||||||
|
): boolean {
|
||||||
|
const codecName = normalizeCodecName(recordings[0]?.codec_name);
|
||||||
|
if (!codecName) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return capabilities.supports[codecName] === true;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getDirectBaseUrl(apiHost: string): string {
|
||||||
|
return trimTrailingSlash(apiHost);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTranscodeBaseUrl(apiHost: string, config?: FrigateConfig): string | undefined {
|
||||||
|
if (!config?.transcode_proxy?.enabled) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.transcode_proxy.vod_proxy_url?.trim()) {
|
||||||
|
return trimTrailingSlash(config.transcode_proxy.vod_proxy_url);
|
||||||
|
}
|
||||||
|
|
||||||
|
return `${trimTrailingSlash(apiHost)}/vod-transcoded`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTranscodeProfile(estimatedBandwidthBps?: number, saveData = false) {
|
||||||
|
if (saveData || (estimatedBandwidthBps && estimatedBandwidthBps <= 1_500_000)) {
|
||||||
|
return { bitrate: "512k", maxWidth: "640", maxHeight: "360" };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000) {
|
||||||
|
return { bitrate: "1200k", maxWidth: "960", maxHeight: "540" };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { bitrate: "2500k", maxWidth: "1280", maxHeight: "720" };
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildDirectUrl(apiHost: string, vodPath: string, variant: string): string {
|
||||||
|
const baseUrl = `${getDirectBaseUrl(apiHost)}${vodPath}`;
|
||||||
|
return appendQuery(baseUrl, {
|
||||||
|
variant: variant !== "main" ? variant : undefined,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildTranscodeUrl(
|
||||||
|
apiHost: string,
|
||||||
|
config: FrigateConfig | undefined,
|
||||||
|
vodPath: string,
|
||||||
|
variant: string,
|
||||||
|
capabilities: PlaybackCapabilities,
|
||||||
|
): string {
|
||||||
|
const transcodeBase = getTranscodeBaseUrl(apiHost, config);
|
||||||
|
if (!transcodeBase) {
|
||||||
|
return buildDirectUrl(apiHost, vodPath, variant);
|
||||||
|
}
|
||||||
|
|
||||||
|
const profile = getTranscodeProfile(
|
||||||
|
capabilities.estimatedBandwidthBps,
|
||||||
|
capabilities.saveData,
|
||||||
|
);
|
||||||
|
|
||||||
|
return appendQuery(`${transcodeBase}${vodPath}`, {
|
||||||
|
variant,
|
||||||
|
bitrate: profile.bitrate,
|
||||||
|
max_width: profile.maxWidth,
|
||||||
|
max_height: profile.maxHeight,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
export function chooseRecordingPlayback({
|
||||||
|
apiHost,
|
||||||
|
config,
|
||||||
|
recordings,
|
||||||
|
preference,
|
||||||
|
vodPath,
|
||||||
|
capabilities,
|
||||||
|
}: DecisionOptions): RecordingPlaybackDecision {
|
||||||
|
const recordingsByVariant = groupRecordingsByVariant(recordings);
|
||||||
|
const mainRecordings = recordingsByVariant.main ?? [];
|
||||||
|
const subRecordings = recordingsByVariant.sub ?? [];
|
||||||
|
const transcodeAvailable = !!getTranscodeBaseUrl(apiHost, config);
|
||||||
|
const estimatedBandwidthBps =
|
||||||
|
capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000);
|
||||||
|
|
||||||
|
const candidates: Record<
|
||||||
|
"main" | "sub",
|
||||||
|
{ recordings: Recording[]; playable: boolean; bitrate?: number }
|
||||||
|
> = {
|
||||||
|
main: {
|
||||||
|
recordings: mainRecordings,
|
||||||
|
playable: canDirectPlayVariant(capabilities, mainRecordings),
|
||||||
|
bitrate: estimateRecordingBitrate(mainRecordings),
|
||||||
|
},
|
||||||
|
sub: {
|
||||||
|
recordings: subRecordings,
|
||||||
|
playable: canDirectPlayVariant(capabilities, subRecordings),
|
||||||
|
bitrate: estimateRecordingBitrate(subRecordings),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const preferDirect = (variant: "main" | "sub") => {
|
||||||
|
const candidate = candidates[variant];
|
||||||
|
return (
|
||||||
|
candidate.recordings.length > 0 &&
|
||||||
|
candidate.playable &&
|
||||||
|
(!candidate.bitrate || candidate.bitrate <= estimatedBandwidthBps * 0.85)
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (preference === "main" && candidates.main.recordings.length > 0) {
|
||||||
|
return {
|
||||||
|
mode: "direct",
|
||||||
|
variant: "main",
|
||||||
|
url: buildDirectUrl(apiHost, vodPath, "main"),
|
||||||
|
reason: "manual-main",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (preference === "sub" && candidates.sub.recordings.length > 0) {
|
||||||
|
if (candidates.sub.playable) {
|
||||||
|
return {
|
||||||
|
mode: "direct",
|
||||||
|
variant: "sub",
|
||||||
|
url: buildDirectUrl(apiHost, vodPath, "sub"),
|
||||||
|
reason: "manual-sub",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
mode: "transcoded",
|
||||||
|
variant: "sub",
|
||||||
|
url: buildTranscodeUrl(apiHost, config, vodPath, "sub", capabilities),
|
||||||
|
reason: "manual-sub-transcoded",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (preference === "transcoded") {
|
||||||
|
const targetVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
|
||||||
|
if (!transcodeAvailable) {
|
||||||
|
return {
|
||||||
|
mode: "direct",
|
||||||
|
variant: targetVariant,
|
||||||
|
url: buildDirectUrl(apiHost, vodPath, targetVariant),
|
||||||
|
reason: "manual-transcoded-unavailable",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
mode: "transcoded",
|
||||||
|
variant: targetVariant,
|
||||||
|
url: buildTranscodeUrl(apiHost, config, vodPath, targetVariant, capabilities),
|
||||||
|
reason: "manual-transcoded",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (preferDirect("main")) {
|
||||||
|
return {
|
||||||
|
mode: "direct",
|
||||||
|
variant: "main",
|
||||||
|
url: buildDirectUrl(apiHost, vodPath, "main"),
|
||||||
|
reason: "raw-main",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (preferDirect("sub")) {
|
||||||
|
return {
|
||||||
|
mode: "direct",
|
||||||
|
variant: "sub",
|
||||||
|
url: buildDirectUrl(apiHost, vodPath, "sub"),
|
||||||
|
reason: "raw-sub",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const transcodeVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
|
||||||
|
if (!transcodeAvailable) {
|
||||||
|
return {
|
||||||
|
mode: "direct",
|
||||||
|
variant: transcodeVariant,
|
||||||
|
url: buildDirectUrl(apiHost, vodPath, transcodeVariant),
|
||||||
|
reason: "direct-fallback",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
mode: "transcoded",
|
||||||
|
variant: transcodeVariant,
|
||||||
|
url: buildTranscodeUrl(apiHost, config, vodPath, transcodeVariant, capabilities),
|
||||||
|
reason: "transcode-fallback",
|
||||||
|
};
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user