Made-with:

This commit is contained in:
ericj 2026-03-17 10:46:42 -07:00
parent 687fefb343
commit 5560af611a
52 changed files with 17247 additions and 14837 deletions

View File

@ -16,7 +16,9 @@ FROM ${BASE_IMAGE} AS base
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK ARG BASE_HOOK
RUN sh -c "$BASE_HOOK" RUN if [ -n "$BASE_HOOK" ]; then \
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
fi
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
@ -25,7 +27,9 @@ FROM ${SLIM_BASE} AS slim-base
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES
ARG BASE_HOOK ARG BASE_HOOK
RUN sh -c "$BASE_HOOK" RUN if [ -n "$BASE_HOOK" ]; then \
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
fi
FROM slim-base AS wget FROM slim-base AS wget
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
@ -40,7 +44,8 @@ ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G ENV CCACHE_MAXSIZE 2G
RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \ RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
/deps/build_nginx.sh tr -d '\r' </deps/build_nginx.sh >/tmp/build_nginx.sh \
&& bash /tmp/build_nginx.sh
FROM wget AS sqlite-vec FROM wget AS sqlite-vec
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
@ -50,7 +55,8 @@ COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \ --mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
--mount=type=cache,target=/root/.ccache \ --mount=type=cache,target=/root/.ccache \
/deps/build_sqlite_vec.sh tr -d '\r' </deps/build_sqlite_vec.sh >/tmp/build_sqlite_vec.sh \
&& bash /tmp/build_sqlite_vec.sh
FROM scratch AS go2rtc FROM scratch AS go2rtc
ARG TARGETARCH ARG TARGETARCH
@ -60,7 +66,8 @@ ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9
FROM wget AS tempio FROM wget AS tempio
ARG TARGETARCH ARG TARGETARCH
RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \ RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
/deps/install_tempio.sh tr -d '\r' </deps/install_tempio.sh >/tmp/install_tempio.sh \
&& bash /tmp/install_tempio.sh
#### ####
# #
@ -142,7 +149,8 @@ COPY audio-labelmap.txt .
FROM wget AS s6-overlay FROM wget AS s6-overlay
ARG TARGETARCH ARG TARGETARCH
RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \ RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
/deps/install_s6_overlay.sh tr -d '\r' </deps/install_s6_overlay.sh >/tmp/install_s6_overlay.sh \
&& bash /tmp/install_s6_overlay.sh
FROM base AS wheels FROM base AS wheels
@ -184,7 +192,8 @@ RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source # Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
RUN /build_pysqlite3.sh RUN tr -d '\r' </build_pysqlite3.sh >/tmp/build_pysqlite3.sh \
&& bash /tmp/build_pysqlite3.sh
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \ RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
@ -194,7 +203,8 @@ RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
# Install HailoRT & Wheels # Install HailoRT & Wheels
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \ RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
/deps/install_hailort.sh tr -d '\r' </deps/install_hailort.sh >/tmp/install_hailort.sh \
&& bash /tmp/install_hailort.sh
# Collect deps in a single layer # Collect deps in a single layer
FROM scratch AS deps-rootfs FROM scratch AS deps-rootfs
@ -254,7 +264,8 @@ ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PA
# Install dependencies # Install dependencies
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh tr -d '\r' </deps/install_deps.sh >/tmp/install_deps.sh \
&& bash /tmp/install_deps.sh
ENV DEFAULT_FFMPEG_VERSION="7.0" ENV DEFAULT_FFMPEG_VERSION="7.0"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0" ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
@ -274,13 +285,21 @@ ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image) # Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \ RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
bash -c "bash /deps/install_memryx.sh" tr -d '\r' </deps/install_memryx.sh >/tmp/install_memryx.sh \
&& bash /tmp/install_memryx.sh
COPY --from=deps-rootfs / / COPY --from=deps-rootfs / /
RUN find /etc/s6-overlay/s6-rc.d -type f -exec sed -i 's/\r$//' {} +
RUN find /etc/s6-overlay/s6-rc.d -type f \
\( -name run -o -name up \) \
-exec chmod +x {} +
RUN ldconfig RUN ldconfig
EXPOSE 5000 EXPOSE 5000
EXPOSE 5010
EXPOSE 8554 EXPOSE 8554
EXPOSE 8555/tcp 8555/udp EXPOSE 8555/tcp 8555/udp
@ -347,6 +366,7 @@ FROM scratch AS rootfs
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
COPY frigate frigate/ COPY frigate frigate/
COPY migrations migrations/ COPY migrations migrations/
COPY transcode_proxy transcode_proxy/
COPY --from=web-build /work/dist/ web/ COPY --from=web-build /work/dist/ web/
# Frigate final container # Frigate final container
@ -354,3 +374,4 @@ FROM deps AS frigate
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
COPY --from=rootfs / / COPY --from=rootfs / /
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt

View File

@ -21,9 +21,32 @@ function set_libva_version() {
export LIBAVFORMAT_VERSION_MAJOR export LIBAVFORMAT_VERSION_MAJOR
} }
function start_transcode_proxy() {
(
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
if [[ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]]; then
TRANSCODE_PROXY_FFMPEG=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
export TRANSCODE_PROXY_FFMPEG
fi
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
sleep 1
done
echo "[INFO] Starting transcode proxy..."
exec python3 -m uvicorn transcode_proxy.main:app \
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
--port "${TRANSCODE_PROXY_PORT:-5010}"
) &
}
echo "[INFO] Preparing Frigate..." echo "[INFO] Preparing Frigate..."
set_libva_version set_libva_version
start_transcode_proxy
echo "[INFO] Starting Frigate..." echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"

View File

@ -4,7 +4,7 @@
set -o errexit -o nounset -o pipefail set -o errexit -o nounset -o pipefail
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync) dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync /dev/shm/logs/transcode-proxy)
mkdir -p "${dirs[@]}" mkdir -p "${dirs[@]}"
chown nobody:nogroup "${dirs[@]}" chown nobody:nogroup "${dirs[@]}"

View File

@ -0,0 +1 @@
transcode-proxy

View File

@ -0,0 +1 @@
transcode-proxy-pipeline

View File

@ -0,0 +1,4 @@
#!/command/with-contenv bash
# shellcheck shell=bash
exec logutil-service /dev/shm/logs/transcode-proxy

View File

@ -0,0 +1 @@
longrun

View File

@ -0,0 +1 @@
transcode-proxy-log

View File

@ -0,0 +1,32 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Start the transcode proxy (in-process with Frigate container)
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
echo "[INFO] Starting transcode proxy..."
# Default upstream to nginx internal port when not set
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
# Use Frigate's FFmpeg when not set
if [ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]; then
export TRANSCODE_PROXY_FFMPEG="$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)"
fi
# Wait for nginx/API to be ready so proxy can reach upstream
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
echo "[INFO] Waiting for upstream ${TRANSCODE_PROXY_UPSTREAM}..."
sleep 1
done
echo "[INFO] Upstream ready, starting transcode proxy on port ${TRANSCODE_PROXY_PORT:-5010}"
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
exec 2>&1
exec python3 -m uvicorn transcode_proxy.main:app \
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
--port "${TRANSCODE_PROXY_PORT:-5010}"

View File

@ -0,0 +1 @@
longrun

View File

@ -105,6 +105,16 @@ http {
include auth_location.conf; include auth_location.conf;
include base_path.conf; include base_path.conf;
location = /vod-transcoded {
return 302 /vod-transcoded/;
}
location /vod-transcoded/ {
include auth_request.conf;
proxy_pass http://127.0.0.1:5010;
include proxy.conf;
}
location /vod/ { location /vod/ {
include auth_request.conf; include auth_request.conf;
aio threads; aio threads;

View File

@ -25,6 +25,7 @@ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels
&& pip3 install -U /deps/trt-wheels/*.whl && pip3 install -U /deps/trt-wheels/*.whl
COPY --from=rootfs / / COPY --from=rootfs / /
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
RUN ldconfig RUN ldconfig

View File

@ -16,7 +16,7 @@ variable "COMPUTE_LEVEL" {
variable "BASE_HOOK" { variable "BASE_HOOK" {
# Ensure an up-to-date python 3.11 is available in jetson images # Ensure an up-to-date python 3.11 is available in jetson images
default = <<EOT default = <<EOT
if grep -iq \"ubuntu\" /etc/os-release; then if grep -iq "ubuntu" /etc/os-release; then
. /etc/os-release . /etc/os-release
# Add the deadsnakes PPA repository # Add the deadsnakes PPA repository

View File

@ -426,6 +426,7 @@ async def recording_clip(
camera_name: str, camera_name: str,
start_ts: float, start_ts: float,
end_ts: float, end_ts: float,
variant: str = Query("main", description="Recording variant to use for playback."),
): ):
def run_download(ffmpeg_cmd: list[str], file_path: str): def run_download(ffmpeg_cmd: list[str], file_path: str):
with sp.Popen( with sp.Popen(
@ -459,6 +460,7 @@ async def recording_clip(
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time)) | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
) )
.where(Recordings.camera == camera_name) .where(Recordings.camera == camera_name)
.where(Recordings.variant == variant)
.order_by(Recordings.start_time.asc()) .order_by(Recordings.start_time.asc())
) )
@ -534,13 +536,15 @@ async def vod_ts(
start_ts: float, start_ts: float,
end_ts: float, end_ts: float,
force_discontinuity: bool = False, force_discontinuity: bool = False,
variant: str = "main",
): ):
logger.debug( logger.debug(
"VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s", "VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s variant=%s",
camera_name, camera_name,
start_ts, start_ts,
end_ts, end_ts,
force_discontinuity, force_discontinuity,
variant,
) )
recordings = ( recordings = (
Recordings.select( Recordings.select(
@ -555,6 +559,7 @@ async def vod_ts(
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time)) | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
) )
.where(Recordings.camera == camera_name) .where(Recordings.camera == camera_name)
.where(Recordings.variant == variant)
.order_by(Recordings.start_time.asc()) .order_by(Recordings.start_time.asc())
.iterator() .iterator()
) )
@ -644,10 +649,17 @@ async def vod_ts(
dependencies=[Depends(require_camera_access)], dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str): async def vod_hour_no_timezone(
year_month: str, day: int, hour: int, camera_name: str, variant: str = "main"
):
"""VOD for specific hour. Uses the default timezone (UTC).""" """VOD for specific hour. Uses the default timezone (UTC)."""
return await vod_hour( return await vod_hour(
year_month, day, hour, camera_name, get_localzone_name().replace("/", ",") year_month,
day,
hour,
camera_name,
get_localzone_name().replace("/", ","),
variant,
) )
@ -657,7 +669,12 @@ async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
async def vod_hour( async def vod_hour(
year_month: str, day: int, hour: int, camera_name: str, tz_name: str year_month: str,
day: int,
hour: int,
camera_name: str,
tz_name: str,
variant: str = "main",
): ):
parts = year_month.split("-") parts = year_month.split("-")
start_date = ( start_date = (
@ -668,7 +685,7 @@ async def vod_hour(
start_ts = start_date.timestamp() start_ts = start_date.timestamp()
end_ts = end_date.timestamp() end_ts = end_date.timestamp()
return await vod_ts(camera_name, start_ts, end_ts) return await vod_ts(camera_name, start_ts, end_ts, variant=variant)
@router.get( @router.get(
@ -680,6 +697,7 @@ async def vod_event(
request: Request, request: Request,
event_id: str, event_id: str,
padding: int = Query(0, description="Padding to apply to the vod."), padding: int = Query(0, description="Padding to apply to the vod."),
variant: str = Query("main", description="Recording variant to use for playback."),
): ):
try: try:
event: Event = Event.get(Event.id == event_id) event: Event = Event.get(Event.id == event_id)
@ -700,7 +718,9 @@ async def vod_event(
if event.end_time is None if event.end_time is None
else (event.end_time + padding) else (event.end_time + padding)
) )
vod_response = await vod_ts(event.camera, event.start_time - padding, end_ts) vod_response = await vod_ts(
event.camera, event.start_time - padding, end_ts, variant=variant
)
# If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
if ( if (
@ -723,8 +743,11 @@ async def vod_clip(
camera_name: str, camera_name: str,
start_ts: float, start_ts: float,
end_ts: float, end_ts: float,
variant: str = Query("main", description="Recording variant to use for playback."),
): ):
return await vod_ts(camera_name, start_ts, end_ts, force_discontinuity=True) return await vod_ts(
camera_name, start_ts, end_ts, force_discontinuity=True, variant=variant
)
@router.get( @router.get(

View File

@ -229,29 +229,39 @@ async def recordings(
camera_name: str, camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(), after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(), before: float = datetime.now().timestamp(),
variant: str = "main",
): ):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used""" """Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
recordings = ( query = (
Recordings.select( Recordings.select(
Recordings.id, Recordings.id,
Recordings.camera,
Recordings.start_time, Recordings.start_time,
Recordings.end_time, Recordings.end_time,
Recordings.path,
Recordings.variant,
Recordings.segment_size, Recordings.segment_size,
Recordings.motion, Recordings.motion,
Recordings.objects, Recordings.objects,
Recordings.motion_heatmap, Recordings.motion_heatmap,
Recordings.duration, Recordings.duration,
Recordings.codec_name,
Recordings.width,
Recordings.height,
Recordings.bitrate,
) )
.where( .where(
Recordings.camera == camera_name, Recordings.camera == camera_name,
Recordings.end_time >= after, Recordings.end_time >= after,
Recordings.start_time <= before, Recordings.start_time <= before,
) )
.order_by(Recordings.start_time)
.dicts()
.iterator()
) )
if variant != "all":
query = query.where(Recordings.variant == variant)
recordings = query.order_by(Recordings.start_time).dicts().iterator()
return JSONResponse(content=list(recordings)) return JSONResponse(content=list(recordings))

View File

@ -256,7 +256,13 @@ class CameraConfig(FrigateBaseModel):
if ffmpeg_cmd is None: if ffmpeg_cmd is None:
continue continue
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd}) ffmpeg_cmds.append(
{
"roles": ffmpeg_input.roles,
"cmd": ffmpeg_cmd,
"record_variant": ffmpeg_input.record_variant,
}
)
self._ffmpeg_cmds = ffmpeg_cmds self._ffmpeg_cmds = ffmpeg_cmds
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput): def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
@ -281,10 +287,13 @@ class CameraConfig(FrigateBaseModel):
) )
or self.ffmpeg.output_args.record or self.ffmpeg.output_args.record
) )
record_variant = ffmpeg_input.record_variant or "main"
cache_prefix = os.path.join(CACHE_DIR, self.name)
cache_path = f"{cache_prefix}@{record_variant}@{CACHE_SEGMENT_FORMAT}.mp4"
ffmpeg_output_args = ( ffmpeg_output_args = (
record_args record_args
+ [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"] + [cache_path]
+ ffmpeg_output_args + ffmpeg_output_args
) )

View File

@ -1,9 +1,9 @@
from enum import Enum from enum import Enum
from typing import Union from typing import Union
from pydantic import Field, field_validator from pydantic import Field, field_validator, model_validator
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, REGEX_CAMERA_NAME
from ..base import FrigateBaseModel from ..base import FrigateBaseModel
from ..env import EnvString from ..env import EnvString
@ -137,6 +137,22 @@ class CameraInput(FrigateBaseModel):
title="Input arguments", title="Input arguments",
description="Input arguments specific to this stream.", description="Input arguments specific to this stream.",
) )
record_variant: str | None = Field(
default=None,
title="Recording variant",
description="Optional recording variant label for record role inputs such as main or sub.",
pattern=REGEX_CAMERA_NAME,
)
@model_validator(mode="after")
def validate_record_variant(self):
if CameraRoleEnum.record in self.roles:
if not self.record_variant:
self.record_variant = "main"
else:
self.record_variant = None
return self
class CameraFfmpegConfig(FfmpegConfig): class CameraFfmpegConfig(FfmpegConfig):
@ -148,12 +164,29 @@ class CameraFfmpegConfig(FfmpegConfig):
@field_validator("inputs") @field_validator("inputs")
@classmethod @classmethod
def validate_roles(cls, v): def validate_roles(cls, v):
roles = [role for input in v for role in input.roles] detect_inputs = 0
audio_inputs = 0
record_variants: set[str] = set()
if len(roles) != len(set(roles)): for camera_input in v:
raise ValueError("Each input role may only be used once.") if CameraRoleEnum.detect in camera_input.roles:
detect_inputs += 1
if "detect" not in roles: if CameraRoleEnum.audio in camera_input.roles:
audio_inputs += 1
if CameraRoleEnum.record in camera_input.roles:
record_variant = camera_input.record_variant or "main"
if record_variant in record_variants:
raise ValueError(
f"Record variant '{record_variant}' may only be used once."
)
record_variants.add(record_variant)
if detect_inputs != 1:
raise ValueError("The detect role is required.") raise ValueError("The detect role is required.")
if audio_inputs > 1:
raise ValueError("Each input role may only be used once.")
return v return v

View File

@ -71,6 +71,7 @@ from .network import NetworkingConfig
from .proxy import ProxyConfig from .proxy import ProxyConfig
from .telemetry import TelemetryConfig from .telemetry import TelemetryConfig
from .tls import TlsConfig from .tls import TlsConfig
from .transcode_proxy import TranscodeProxyConfig
from .ui import UIConfig from .ui import UIConfig
__all__ = ["FrigateConfig"] __all__ = ["FrigateConfig"]
@ -450,6 +451,11 @@ class FrigateConfig(FrigateBaseModel):
title="UI", title="UI",
description="User interface preferences such as timezone, time/date formatting, and units.", description="User interface preferences such as timezone, time/date formatting, and units.",
) )
transcode_proxy: TranscodeProxyConfig = Field(
default_factory=TranscodeProxyConfig,
title="Transcode proxy",
description="Optional proxy for transcoding VOD playback to H.264 on the fly (e.g. for HEVC compatibility).",
)
# Detector config # Detector config
detectors: Dict[str, BaseDetectorConfig] = Field( detectors: Dict[str, BaseDetectorConfig] = Field(

View File

@ -0,0 +1,21 @@
"""Configuration for the VOD transcode proxy (optional playback transcoding)."""
from pydantic import Field
from .base import FrigateBaseModel
__all__ = ["TranscodeProxyConfig"]
class TranscodeProxyConfig(FrigateBaseModel):
"""Settings for the optional transcode proxy used for recording playback."""
enabled: bool = Field(
default=False,
title="Transcode proxy enabled",
description="When enabled, the UI uses the transcode proxy URL for VOD playback so recordings are transcoded to H.264 on the fly (e.g. for HEVC compatibility or lower bitrate).",
)
vod_proxy_url: str = Field(
default="",
title="VOD proxy base URL",
description="Base URL for the transcode proxy (e.g. http://host:5010). When enabled, recording playback requests go to this URL + /vod/... Leave empty if the proxy is mounted at the same host (e.g. /vod-transcoded/ under the same origin).",
)

View File

@ -70,6 +70,7 @@ class Recordings(Model):
id = CharField(null=False, primary_key=True, max_length=30) id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20) camera = CharField(index=True, max_length=20)
path = CharField(unique=True) path = CharField(unique=True)
variant = CharField(default="main", index=True, max_length=20)
start_time = DateTimeField() start_time = DateTimeField()
end_time = DateTimeField() end_time = DateTimeField()
duration = FloatField() duration = FloatField()
@ -77,6 +78,10 @@ class Recordings(Model):
objects = IntegerField(null=True) objects = IntegerField(null=True)
dBFS = IntegerField(null=True) dBFS = IntegerField(null=True)
segment_size = FloatField(default=0) # this should be stored as MB segment_size = FloatField(default=0) # this should be stored as MB
codec_name = CharField(null=True, max_length=32)
width = IntegerField(null=True)
height = IntegerField(null=True)
bitrate = IntegerField(null=True)
regions = IntegerField(null=True) regions = IntegerField(null=True)
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255) motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)

View File

@ -101,6 +101,29 @@ class RecordingMaintainer(threading.Thread):
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {} self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
self.unexpected_cache_files_logged: bool = False self.unexpected_cache_files_logged: bool = False
def _parse_cache_segment(self, cache_name: str) -> Optional[dict[str, Any]]:
basename = os.path.splitext(cache_name)[0]
parts = basename.rsplit("@", maxsplit=2)
if len(parts) == 2:
camera, date = parts
variant = "main"
elif len(parts) == 3:
camera, variant, date = parts
else:
return None
start_time = datetime.datetime.strptime(
date, CACHE_SEGMENT_FORMAT
).astimezone(datetime.timezone.utc)
return {
"camera": camera,
"variant": variant,
"start_time": start_time,
"cache_path": os.path.join(CACHE_DIR, cache_name),
}
async def move_files(self) -> None: async def move_files(self) -> None:
cache_files = [ cache_files = [
d d
@ -113,26 +136,22 @@ class RecordingMaintainer(threading.Thread):
# publish newest cached segment per camera (including in use files) # publish newest cached segment per camera (including in use files)
newest_cache_segments: dict[str, dict[str, Any]] = {} newest_cache_segments: dict[str, dict[str, Any]] = {}
for cache in cache_files: for cache in cache_files:
cache_path = os.path.join(CACHE_DIR, cache) parsed = self._parse_cache_segment(cache)
basename = os.path.splitext(cache)[0] if parsed is None:
try:
camera, date = basename.rsplit("@", maxsplit=1)
except ValueError:
if not self.unexpected_cache_files_logged: if not self.unexpected_cache_files_logged:
logger.warning("Skipping unexpected files in cache") logger.warning("Skipping unexpected files in cache")
self.unexpected_cache_files_logged = True self.unexpected_cache_files_logged = True
continue continue
start_time = datetime.datetime.strptime( camera = parsed["camera"]
date, CACHE_SEGMENT_FORMAT start_time = parsed["start_time"]
).astimezone(datetime.timezone.utc)
if ( if (
camera not in newest_cache_segments camera not in newest_cache_segments
or start_time > newest_cache_segments[camera]["start_time"] or start_time > newest_cache_segments[camera]["start_time"]
): ):
newest_cache_segments[camera] = { newest_cache_segments[camera] = {
"start_time": start_time, "start_time": start_time,
"cache_path": cache_path, "cache_path": parsed["cache_path"],
} }
for camera, newest in newest_cache_segments.items(): for camera, newest in newest_cache_segments.items():
@ -172,27 +191,14 @@ class RecordingMaintainer(threading.Thread):
if cache in files_in_use: if cache in files_in_use:
continue continue
cache_path = os.path.join(CACHE_DIR, cache) parsed = self._parse_cache_segment(cache)
basename = os.path.splitext(cache)[0] if parsed is None:
try:
camera, date = basename.rsplit("@", maxsplit=1)
except ValueError:
if not self.unexpected_cache_files_logged: if not self.unexpected_cache_files_logged:
logger.warning("Skipping unexpected files in cache") logger.warning("Skipping unexpected files in cache")
self.unexpected_cache_files_logged = True self.unexpected_cache_files_logged = True
continue continue
# important that start_time is utc because recordings are stored and compared in utc grouped_recordings[parsed["camera"]].append(parsed)
start_time = datetime.datetime.strptime(
date, CACHE_SEGMENT_FORMAT
).astimezone(datetime.timezone.utc)
grouped_recordings[camera].append(
{
"cache_path": cache_path,
"start_time": start_time,
}
)
# delete all cached files past the most recent MAX_SEGMENTS_IN_CACHE # delete all cached files past the most recent MAX_SEGMENTS_IN_CACHE
keep_count = MAX_SEGMENTS_IN_CACHE keep_count = MAX_SEGMENTS_IN_CACHE
@ -318,6 +324,7 @@ class RecordingMaintainer(threading.Thread):
) -> Optional[Recordings]: ) -> Optional[Recordings]:
cache_path: str = recording["cache_path"] cache_path: str = recording["cache_path"]
start_time: datetime.datetime = recording["start_time"] start_time: datetime.datetime = recording["start_time"]
variant: str = recording.get("variant", "main")
# Just delete files if camera removed or recordings are turned off # Just delete files if camera removed or recordings are turned off
if ( if (
@ -327,8 +334,12 @@ class RecordingMaintainer(threading.Thread):
self.drop_segment(cache_path) self.drop_segment(cache_path)
return None return None
segment_info: dict[str, Any]
if cache_path in self.end_time_cache: if cache_path in self.end_time_cache:
end_time, duration = self.end_time_cache[cache_path] end_time, duration = self.end_time_cache[cache_path]
segment_info = await get_video_properties(
self.config.ffmpeg, cache_path, get_duration=False
)
else: else:
segment_info = await get_video_properties( segment_info = await get_video_properties(
self.config.ffmpeg, cache_path, get_duration=True self.config.ffmpeg, cache_path, get_duration=True
@ -400,7 +411,14 @@ class RecordingMaintainer(threading.Thread):
else RetainModeEnum.motion else RetainModeEnum.motion
) )
return await self.move_segment( return await self.move_segment(
camera, start_time, end_time, duration, cache_path, record_mode camera,
variant,
start_time,
end_time,
duration,
cache_path,
record_mode,
segment_info,
) )
# we fell through the continuous / motion check, so we need to check the review items # we fell through the continuous / motion check, so we need to check the review items
@ -436,11 +454,13 @@ class RecordingMaintainer(threading.Thread):
# move from cache to recordings immediately # move from cache to recordings immediately
return await self.move_segment( return await self.move_segment(
camera, camera,
variant,
start_time, start_time,
end_time, end_time,
duration, duration,
cache_path, cache_path,
record_mode, record_mode,
segment_info,
) )
# if it doesn't overlap with an review item, go ahead and drop the segment # if it doesn't overlap with an review item, go ahead and drop the segment
# if it ends more than the configured pre_capture for the camera # if it ends more than the configured pre_capture for the camera
@ -570,11 +590,13 @@ class RecordingMaintainer(threading.Thread):
async def move_segment( async def move_segment(
self, self,
camera: str, camera: str,
variant: str,
start_time: datetime.datetime, start_time: datetime.datetime,
end_time: datetime.datetime, end_time: datetime.datetime,
duration: float, duration: float,
cache_path: str, cache_path: str,
store_mode: RetainModeEnum, store_mode: RetainModeEnum,
media_info: Optional[dict[str, Any]] = None,
) -> Optional[Recordings]: ) -> Optional[Recordings]:
segment_info = self.segment_stats(camera, start_time, end_time) segment_info = self.segment_stats(camera, start_time, end_time)
@ -588,6 +610,7 @@ class RecordingMaintainer(threading.Thread):
RECORD_DIR, RECORD_DIR,
start_time.strftime("%Y-%m-%d/%H"), start_time.strftime("%Y-%m-%d/%H"),
camera, camera,
variant,
) )
if not os.path.exists(directory): if not os.path.exists(directory):
@ -646,6 +669,7 @@ class RecordingMaintainer(threading.Thread):
Recordings.id.name: f"{start_time.timestamp()}-{rand_id}", Recordings.id.name: f"{start_time.timestamp()}-{rand_id}",
Recordings.camera.name: camera, Recordings.camera.name: camera,
Recordings.path.name: file_path, Recordings.path.name: file_path,
Recordings.variant.name: variant,
Recordings.start_time.name: start_time.timestamp(), Recordings.start_time.name: start_time.timestamp(),
Recordings.end_time.name: end_time.timestamp(), Recordings.end_time.name: end_time.timestamp(),
Recordings.duration.name: duration, Recordings.duration.name: duration,
@ -655,6 +679,16 @@ class RecordingMaintainer(threading.Thread):
Recordings.regions.name: segment_info.region_count, Recordings.regions.name: segment_info.region_count,
Recordings.dBFS.name: segment_info.average_dBFS, Recordings.dBFS.name: segment_info.average_dBFS,
Recordings.segment_size.name: segment_size, Recordings.segment_size.name: segment_size,
Recordings.codec_name.name: (
media_info.get("codec_name") if media_info else None
),
Recordings.width.name: media_info.get("width") if media_info else None,
Recordings.height.name: media_info.get("height") if media_info else None,
Recordings.bitrate.name: (
int((segment_size * pow(2, 20) * 8) / duration)
if duration > 0 and segment_size > 0
else None
),
Recordings.motion_heatmap.name: segment_info.motion_heatmap, Recordings.motion_heatmap.name: segment_info.motion_heatmap,
} }
except Exception as e: except Exception as e:

View File

@ -44,6 +44,59 @@ class TestHttpMedia(BaseTestHttp):
self.app.dependency_overrides.clear() self.app.dependency_overrides.clear()
super().tearDown() super().tearDown()
def test_camera_recordings_variant_filter(self):
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
end_ts = start_ts + 3600
with AuthTestClient(self.app) as client:
Recordings.insert(
id="recording_main",
path="/media/recordings/front/main.mp4",
camera="front_door",
variant="main",
start_time=start_ts,
end_time=end_ts,
duration=3600,
motion=100,
objects=5,
codec_name="h264",
width=1920,
height=1080,
bitrate=4_000_000,
).execute()
Recordings.insert(
id="recording_sub",
path="/media/recordings/front/sub.mp4",
camera="front_door",
variant="sub",
start_time=start_ts,
end_time=end_ts,
duration=3600,
motion=100,
objects=5,
codec_name="h264",
width=640,
height=360,
bitrate=512_000,
).execute()
default_response = client.get(
"/front_door/recordings",
params={"after": start_ts, "before": end_ts},
)
assert default_response.status_code == 200
default_recordings = default_response.json()
assert len(default_recordings) == 1
assert default_recordings[0]["variant"] == "main"
all_response = client.get(
"/front_door/recordings",
params={"after": start_ts, "before": end_ts, "variant": "all"},
)
assert all_response.status_code == 200
variants = {recording["variant"] for recording in all_response.json()}
assert variants == {"main", "sub"}
def test_recordings_summary_across_dst_spring_forward(self): def test_recordings_summary_across_dst_spring_forward(self):
""" """
Test recordings summary across spring DST transition (spring forward). Test recordings summary across spring DST transition (spring forward).

View File

@ -14,6 +14,18 @@ from frigate.record.maintainer import RecordingMaintainer # noqa: E402
class TestMaintainer(unittest.IsolatedAsyncioTestCase): class TestMaintainer(unittest.IsolatedAsyncioTestCase):
async def test_parse_cache_segment_supports_variant(self):
config = MagicMock(spec=FrigateConfig)
config.cameras = {}
stop_event = MagicMock()
maintainer = RecordingMaintainer(config, stop_event)
parsed = maintainer._parse_cache_segment("front@sub@20210101000000+0000.mp4")
self.assertIsNotNone(parsed)
self.assertEqual("front", parsed["camera"])
self.assertEqual("sub", parsed["variant"])
async def test_move_files_survives_bad_filename(self): async def test_move_files_survives_bad_filename(self):
config = MagicMock(spec=FrigateConfig) config = MagicMock(spec=FrigateConfig)
config.cameras = {} config.cameras = {}

View File

@ -820,6 +820,7 @@ async def get_video_properties(
result.update({"width": width, "height": height}) result.update({"width": width, "height": height})
if fourcc: if fourcc:
result["fourcc"] = fourcc result["fourcc"] = fourcc
result["codec_name"] = fourcc
if get_duration: if get_duration:
result["duration"] = duration result["duration"] = duration

View File

@ -0,0 +1,38 @@
"""Peewee migrations -- 036_add_recording_variants.py."""
import peewee as pw
from frigate.models import Recordings
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
existing_columns = {
row[1] for row in database.execute_sql('PRAGMA table_info("recordings")').fetchall()
}
fields_to_add = {}
if "variant" not in existing_columns:
fields_to_add["variant"] = pw.CharField(default="main", max_length=20)
if "codec_name" not in existing_columns:
fields_to_add["codec_name"] = pw.CharField(null=True, max_length=32)
if "width" not in existing_columns:
fields_to_add["width"] = pw.IntegerField(null=True)
if "height" not in existing_columns:
fields_to_add["height"] = pw.IntegerField(null=True)
if "bitrate" not in existing_columns:
fields_to_add["bitrate"] = pw.IntegerField(null=True)
if fields_to_add:
migrator.add_fields(Recordings, **fields_to_add)
migrator.sql(
'CREATE INDEX IF NOT EXISTS "recordings_camera_variant_start_time_end_time" ON "recordings" ("camera", "variant", "start_time" DESC, "end_time" DESC)'
)
def rollback(migrator, database, fake=False, **kwargs):
migrator.remove_fields(
Recordings, ["variant", "codec_name", "width", "height", "bitrate"]
)

83
scripts/README.md Normal file
View File

@ -0,0 +1,83 @@
# Scripts
## Transcode benchmarks
Proof-of-concept benchmarks for **real-time VOD transcoding**: transcode a video file with FFmpeg (optionally with hardware acceleration) and measure time and throughput. Used to de-risk the real-time VOD transcoding feature (segment-level transcode + cache): we need ~10s segments to transcode in well under 10s (ideally &lt;2s) so timeline scrubbing stays responsive.
### Python (recommended)
From the repo root:
```bash
# Full file, CPU
python scripts/transcode_benchmark.py path/to/recording.mp4
# First 10 seconds only (simulates one HLS segment)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10
# 10s segment with NVIDIA HW accel
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel nvidia
# Simulate scrubbing: start 60s in, transcode 10s (VAAPI)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --seek 60 --hwaccel vaapi
# Intel QSV H.265 (preset-intel-qsv-h265)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel qsv-h265
# Custom FFmpeg binary (e.g. Frigate container)
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --ffmpeg /usr/lib/ffmpeg/7/bin/ffmpeg
```
Options:
- `--duration SEC` Transcode only this many seconds (default: full file). Use 10 to simulate one HLS segment.
- `--seek SEC` Start at this position (fast seek before `-i`). Simulates scrubbing into the file.
- `--hwaccel cpu|nvidia|vaapi|qsv-h265` Matches Frigate presets: libx264, h264_nvenc, h264_vaapi, preset-intel-qsv-h265 (hevc_qsv).
- `--vaapi-device` VAAPI device (default: `/dev/dri/renderD128`).
- `--qsv-device` Intel QSV device: on Linux defaults to `/dev/dri/renderD129` if present (else `renderD128`, else `0`). With two GPUs, the second node is often the Intel iGPU. Override if you get “No VA display found” (e.g. try the other node).
- `--output PATH` Write output here (default: temp file, deleted).
- `--keep-output` Keep the temp output file.
Output: real time, speed (× realtime), output size. The script suggests whether the speed is good for ~10s segment transcode.
### Shell
Quick one-liners without Python:
```bash
chmod +x scripts/transcode_benchmark.sh
./scripts/transcode_benchmark.sh path/to/recording.mp4
./scripts/transcode_benchmark.sh path/to/recording.mp4 10
./scripts/transcode_benchmark.sh path/to/recording.mp4 10 nvidia
```
Arguments: `INPUT [DURATION_SEC] [cpu|nvidia|vaapi|qsv-h265]`. Optional env: `FFMPEG`, `FFPROBE`, `VAAPI_DEVICE`, `QSV_DEVICE`.
### Interpreting results
- **Speed ≥ 5× realtime** A 10s segment transcodes in ~2s or less; good for on-demand segment transcode with cache.
- **Speed 15×** Marginal; segment may take several seconds; transcode-ahead or caching helps.
- **Speed &lt; 1×** Too slow for real-time; consider stronger HW or lower resolution/bitrate.
Run with a real Frigate recording (or any H.264/HEVC MP4) and try both `--duration 10` and full file to see segment vs full transcode cost.
### Troubleshooting `qsv-h265` (“No VA display found”)
Intel QSV (`qsv-h265`) only works on **Intel GPUs** with a working **Intel VA-API** stack. If both `/dev/dri/renderD128` and `renderD129` fail with “No VA display found” or “Device creation failed: -22”, then:
1. **Check which GPUs you have** With two cards, both may be non-Intel (e.g. NVIDIA + AMD). QSV is Intel-only. Use `lspci -k | grep -A3 VGA` to see adapters and drivers.
2. **Check VA-API** Run `vainfo` or `vainfo --display drm --device /dev/dri/renderD128` (then `renderD129`). If it errors or shows no Intel driver, QSV wont work. On Intel you typically need `intel-media-driver` (newer) or `intel-vaapi-driver` (i965, older).
3. **Permissions** Ensure your user is in the `render` (and often `video`) group: `groups`; add with `sudo usermod -aG render $USER` and log in again.
4. **Use another HW accel** If you have an **AMD** GPU, use `vaapi` (H.264). If you have **NVIDIA**, use `nvidia`. Otherwise use `cpu`.
5. **Frigate Docker uses QSV but host benchmark fails** The container has the Intel VA/QSV stack and device access; the host may not. Run the benchmark **inside the same environment** (e.g. inside the Frigate container):
```bash
# Copy script and a sample recording into the container (adjust container name)
docker cp scripts/transcode_benchmark.sh frigate:/tmp/
docker cp /path/to/59.24.mp4 frigate:/tmp/
docker exec -it frigate bash -c 'chmod +x /tmp/transcode_benchmark.sh && /tmp/transcode_benchmark.sh /tmp/59.24.mp4 10 qsv-h265'
```
The script auto-detects FFmpeg under `/usr/lib/ffmpeg/*/bin` when `ffmpeg` isnt on PATH (Frigate container). If it doesnt, set `FFMPEG` and `FFPROBE` explicitly, e.g. `docker exec ... env FFMPEG=/usr/lib/ffmpeg/7.0/bin/ffmpeg FFPROBE=/usr/lib/ffmpeg/7.0/bin/ffprobe /tmp/transcode_benchmark.sh ...`.

View File

@ -0,0 +1,289 @@
#!/usr/bin/env python3
"""
Proof-of-concept benchmark: transcode a video file with FFmpeg (optionally with
hardware acceleration) and report timing and throughput.
Used to de-risk real-time VOD transcoding: we need ~10s segments to transcode
in well under 10s (ideally <2s) so scrubbing stays responsive.
Usage:
python scripts/transcode_benchmark.py path/to/video.mp4
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --hwaccel nvidia
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --seek 60 --hwaccel vaapi
Output: real time, speed (x realtime), output size. Aligns with Frigate export/timelapse
HW presets (preset-nvidia, preset-vaapi, libx264 default).
"""
import argparse
import os
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from typing import Optional
def get_ffmpeg_command(
ffmpeg_path: str,
input_path: str,
output_path: str,
*,
duration_sec: Optional[float] = None,
seek_sec: float = 0,
hwaccel: str = "cpu",
gpu_device: str = "/dev/dri/renderD128",
qsv_device: str = "0",
) -> list[str]:
"""Build argv for FFmpeg transcode (H.264 or HEVC, no audio). Matches Frigate timelapse-style encode."""
cmd = [ffmpeg_path, "-hide_banner", "-y", "-loglevel", "warning", "-stats"]
# Optional seek: -ss before -i for fast seek (keyframe then decode)
if seek_sec > 0:
cmd.extend(["-ss", str(seek_sec)])
if hwaccel == "nvidia":
cmd.extend(
[
"-hwaccel",
"cuda",
"-hwaccel_output_format",
"cuda",
"-extra_hw_frames",
"8",
]
)
elif hwaccel == "vaapi":
cmd.extend(
[
"-hwaccel",
"vaapi",
"-hwaccel_device",
gpu_device,
"-hwaccel_output_format",
"vaapi",
]
)
elif hwaccel == "qsv-h265":
# preset-intel-qsv-h265: load_plugin for HEVC decode, QSV device for decode+encode
cmd.extend(
[
"-load_plugin",
"hevc_hw",
"-hwaccel",
"qsv",
"-qsv_device",
qsv_device,
"-hwaccel_output_format",
"qsv",
]
)
cmd.extend(["-i", input_path])
if duration_sec is not None and duration_sec > 0:
cmd.extend(["-t", str(duration_sec)])
cmd.extend(["-an"])
if hwaccel == "nvidia":
cmd.extend(["-c:v", "h264_nvenc"])
elif hwaccel == "vaapi":
# VAAPI encode needs frames in vaapi format; decoder outputs vaapi when hwaccel_output_format vaapi
cmd.extend(["-c:v", "h264_vaapi"])
elif hwaccel == "qsv-h265":
# Use CQP explicitly; profile/level can be unsupported on some QSV runtimes
cmd.extend(["-c:v", "hevc_qsv", "-global_quality", "23"])
else:
cmd.extend(
["-c:v", "libx264", "-preset:v", "ultrafast", "-tune:v", "zerolatency"]
)
cmd.extend(["-f", "mp4", "-movflags", "+faststart", output_path])
return cmd
def get_video_duration_sec(ffprobe_path: str, input_path: str) -> Optional[float]:
"""Return duration in seconds or None on failure."""
try:
out = subprocess.run(
[
ffprobe_path,
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
input_path,
],
capture_output=True,
text=True,
timeout=10,
)
if out.returncode == 0 and out.stdout.strip():
return float(out.stdout.strip())
except (subprocess.TimeoutExpired, ValueError, FileNotFoundError):
pass
return None
def main() -> int:
parser = argparse.ArgumentParser(
description="Benchmark FFmpeg transcode (H.264) with optional HW accel."
)
parser.add_argument(
"input",
type=Path,
help="Input video file (e.g. recording segment)",
)
parser.add_argument(
"--duration",
type=float,
default=None,
metavar="SEC",
help="Transcode only this many seconds (default: full file). Simulates segment length.",
)
parser.add_argument(
"--seek",
type=float,
default=0,
metavar="SEC",
help="Start at this position (before -i for fast seek). Simulates scrubbing into file.",
)
parser.add_argument(
"--hwaccel",
choices=("cpu", "nvidia", "vaapi", "qsv-h265"),
default="cpu",
help="HW accel: cpu (libx264), nvidia (h264_nvenc), vaapi (h264_vaapi), qsv-h265 (preset-intel-qsv-h265, hevc_qsv).",
)
parser.add_argument(
"--vaapi-device",
default="/dev/dri/renderD128",
help="VAAPI device (default: /dev/dri/renderD128).",
)
parser.add_argument(
"--qsv-device",
default=(
"/dev/dri/renderD129"
if os.path.exists("/dev/dri/renderD129")
else "/dev/dri/renderD128"
if os.path.exists("/dev/dri/renderD128")
else "0"
),
help="Intel QSV device: path (e.g. /dev/dri/renderD129 or renderD128 on Linux) or 0 (Windows). With two GPUs, try renderD129 if renderD128 fails. Used for --hwaccel qsv-h265.",
)
parser.add_argument(
"--ffmpeg",
default="ffmpeg",
metavar="PATH",
help="FFmpeg binary (default: ffmpeg in PATH).",
)
parser.add_argument(
"--ffprobe",
default="ffprobe",
metavar="PATH",
help="FFprobe binary (default: ffprobe in PATH).",
)
parser.add_argument(
"--output",
type=Path,
default=None,
help="Output file (default: temp file, deleted after).",
)
parser.add_argument(
"--keep-output",
action="store_true",
help="Keep output file when using default temp path.",
)
args = parser.parse_args()
input_path = args.input.resolve()
if not input_path.is_file():
print(f"Error: input file not found: {input_path}", file=sys.stderr)
return 1
effective_duration = args.duration
if effective_duration is None:
duration_from_probe = get_video_duration_sec(str(args.ffprobe), str(input_path))
if duration_from_probe is not None:
effective_duration = duration_from_probe - args.seek
if effective_duration <= 0:
print("Error: seek >= file duration", file=sys.stderr)
return 1
else:
print("Warning: could not probe duration; reporting real time only.", file=sys.stderr)
use_temp = args.output is None
if use_temp:
fd, out_path = tempfile.mkstemp(suffix=".mp4")
os.close(fd)
output_path = Path(out_path)
else:
output_path = args.output.resolve()
cmd = get_ffmpeg_command(
args.ffmpeg,
str(input_path),
str(output_path),
duration_sec=args.duration,
seek_sec=args.seek,
hwaccel=args.hwaccel,
gpu_device=args.vaapi_device,
qsv_device=args.qsv_device,
)
print(f"Input: {input_path}")
print(f"Output: {output_path}")
print(f"HW: {args.hwaccel}")
if args.duration is not None:
print(f"Limit: {args.duration}s")
if args.seek > 0:
print(f"Seek: {args.seek}s")
print(f"Run: {' '.join(cmd)}")
print()
start = time.perf_counter()
try:
subprocess.run(cmd, check=True, timeout=3600)
except subprocess.CalledProcessError as e:
print(f"FFmpeg failed: {e}", file=sys.stderr)
if use_temp and output_path.exists():
output_path.unlink()
return 1
except subprocess.TimeoutExpired:
print("FFmpeg timed out.", file=sys.stderr)
if use_temp and output_path.exists():
output_path.unlink()
return 1
elapsed = time.perf_counter() - start
size_bytes = output_path.stat().st_size if output_path.exists() else 0
print("--- Results ---")
print(f"Real time: {elapsed:.2f}s")
if effective_duration is not None and effective_duration > 0:
speed = effective_duration / elapsed
print(f"Video duration: {effective_duration:.2f}s")
print(f"Speed: {speed:.2f}x realtime")
if args.duration and args.duration <= 15:
if speed >= 5:
print("(Good for ~10s segment transcode: well under 2s.)")
elif speed >= 1:
print("(Marginal: segment may take several seconds.)")
else:
print("(Slow: segment transcode would exceed segment length.)")
print(f"Output size: {size_bytes / (1024*1024):.2f} MiB")
if use_temp:
if args.keep_output:
print(f"(Output kept: {output_path})")
else:
output_path.unlink(missing_ok=True)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,101 @@
#!/usr/bin/env bash
# Proof-of-concept: run FFmpeg transcode and report real time.
# Usage:
# ./scripts/transcode_benchmark.sh path/to/video.mp4
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 # first 10 seconds only
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 nvidia
#
# Optional: DURATION (seconds), HWACCEL (cpu|nvidia|vaapi|qsv-h265). Default: full file, cpu.
# Requires: ffmpeg, ffprobe. Output: temp file, then deleted. Reports real time and speed.
set -e
INPUT="${1:?Usage: $0 <input.mp4> [duration_sec] [cpu|nvidia|vaapi|qsv-h265]}"
DURATION="${2:-}"
HWACCEL="${3:-cpu}"
# On Linux, QSV needs a DRM render node. With two GPUs, renderD128 is often non-Intel and renderD129 the Intel iGPU; prefer 129 when both exist so QSV finds VA.
if [[ -z "${QSV_DEVICE:-}" ]]; then
if [[ -e /dev/dri/renderD129 ]]; then
QSV_DEVICE="/dev/dri/renderD129"
elif [[ -e /dev/dri/renderD128 ]]; then
QSV_DEVICE="/dev/dri/renderD128"
else
QSV_DEVICE="0"
fi
fi
# Frigate container has ffmpeg under /usr/lib/ffmpeg/<ver>/bin, not on PATH
if [[ -z "${FFMPEG:-}" ]]; then
if command -v ffmpeg &>/dev/null; then
FFMPEG="ffmpeg"
elif [[ -d /usr/lib/ffmpeg ]] && FFMPEG_CANDIDATE=$(find /usr/lib/ffmpeg -path '*/bin/ffmpeg' -type f 2>/dev/null | head -1); [[ -n "${FFMPEG_CANDIDATE:-}" ]]; then
FFMPEG="$FFMPEG_CANDIDATE"
else
FFMPEG="ffmpeg"
fi
fi
FFPROBE="${FFPROBE:-$(dirname "$FFMPEG")/ffprobe}"
if [[ ! -x "$FFPROBE" ]]; then
FFPROBE="ffprobe"
fi
OUTPUT=$(mktemp -u).mp4
cleanup() { rm -f "$OUTPUT"; }
trap cleanup EXIT
# Build base decode/input args
INPUT_ARGS=(-hide_banner -y -loglevel warning -stats -i "$INPUT")
if [[ -n "$DURATION" && "$DURATION" =~ ^[0-9]+\.?[0-9]*$ ]]; then
INPUT_ARGS+=(-t "$DURATION")
fi
case "$HWACCEL" in
nvidia)
PRE=( -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 )
ENC=(-c:v h264_nvenc)
;;
vaapi)
PRE=( -hwaccel vaapi -hwaccel_device "${VAAPI_DEVICE:-/dev/dri/renderD128}" -hwaccel_output_format vaapi )
ENC=(-c:v h264_vaapi)
;;
qsv-h265)
PRE=( -load_plugin hevc_hw -hwaccel qsv -qsv_device "$QSV_DEVICE" -hwaccel_output_format qsv )
# Use CQP explicitly; -profile:v/-level can be unsupported on some QSV runtimes
ENC=(-c:v hevc_qsv -global_quality 23)
;;
*)
PRE=()
ENC=(-c:v libx264 -preset:v ultrafast -tune:v zerolatency)
;;
esac
echo "Input: $INPUT"
echo "Output: $OUTPUT (temp)"
echo "HW: $HWACCEL"
[[ -n "$DURATION" ]] && echo "Limit: ${DURATION}s"
# QSV is Intel-only and needs a working Intel VA-API stack; if you see 'No VA display found', see scripts/README.md troubleshooting.
[[ "$HWACCEL" = "qsv-h265" ]] && echo "QSV device: $QSV_DEVICE"
echo ""
# Get duration for speed calculation (if not limiting, use full file length)
if [[ -n "$DURATION" ]]; then
DUR_SEC="$DURATION"
else
DUR_SEC=$("${FFPROBE:-ffprobe}" -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "$INPUT" 2>/dev/null || true)
fi
# Use $SECONDS (bash) so we don't rely on date %N or bc in minimal containers
START=$SECONDS
"$FFMPEG" "${PRE[@]}" "${INPUT_ARGS[@]}" -an "${ENC[@]}" -f mp4 -movflags +faststart "$OUTPUT"
ELAPSED=$((SECONDS - START))
[[ "$ELAPSED" -eq 0 ]] && ELAPSED=1
SIZE=$(stat -c%s "$OUTPUT" 2>/dev/null || stat -f%z "$OUTPUT" 2>/dev/null || echo 0)
SIZE_MB=$(awk "BEGIN {printf \"%.2f\", $SIZE/1048576}" 2>/dev/null || echo "$((SIZE / 1048576))")
echo "--- Results ---"
echo "Real time: ${ELAPSED}s"
if [[ -n "$DUR_SEC" && "$DUR_SEC" =~ ^[0-9]+\.?[0-9]*$ ]]; then
SPEED=$(awk "BEGIN {printf \"%.2f\", $DUR_SEC/$ELAPSED}" 2>/dev/null || echo "?")
echo "Duration: ${DUR_SEC}s"
echo "Speed: ${SPEED}x realtime"
fi
echo "Output size: ${SIZE_MB} MiB"

View File

@ -0,0 +1,69 @@
# Dev workflow: frigate-dev (single image with transcode proxy)
Use **frigate-dev** so your working Docker setup keeps using the stable image. You switch between stable and dev by changing the image in compose and restarting. The transcode proxy runs **inside** the Frigate container; there is no separate proxy image.
## Image names
- **frigate-dev** Frigate image built from this repo (includes transcode proxy, config + UI for transcode_proxy).
- Your normal setup keeps using **ghcr.io/blakeblackshear/frigate:stable-tensorrt** (or whatever you use today).
## Start / stop (switch between stable and dev)
You cant run both stacks at once (same ports). Use one compose file and swap the image.
**Stop everything:**
```bash
cd ~/docker-compose # or wherever your compose file is
docker compose down
```
**Run dev stack (Frigate with in-container transcode proxy):**
- In `docker-compose.yml`, set the frigate service to `image: frigate-dev` and publish port 5010 if you use transcode_proxy.
```bash
docker compose up -d
```
**Switch back to stable:**
- Stop: `docker compose down`
- In `docker-compose.yml`, set frigate back to `image: ghcr.io/blakeblackshear/frigate:stable-tensorrt`.
```bash
docker compose up -d
```
**Useful commands:**
- `docker compose down` stop and remove containers.
- `docker compose up -d` start in the background.
- `docker compose ps` see whats running.
- `docker compose logs -f frigate` follow Frigate logs.
## Building (Ubuntu server recommended)
Frigates image **is not** “just Python” it has a **compile phase** (nginx, sqlite-vec, etc.). Building is done with Docker and can take a while.
**Where to build:** On the **Ubuntu server** where you run Frigate. That way you get the right architecture and avoid Windows/Linux cross-build issues. Sync the repo from your Windows machine via git (clone or push from Windows to a repo and pull on the server, or copy the repo onto the server).
**On the Ubuntu server:**
1. Clone (or pull) the Frigate repo with this code.
2. **Build Frigate (TensorRT variant, same as stable-tensorrt):**
```bash
cd /path/to/frigate
make version
make local-trt
docker tag frigate:latest-tensorrt frigate-dev
```
(`make local-trt` uses buildx; first time may be slow.) The resulting image includes the transcode proxy; no separate proxy image is built.
**If you prefer to build on Windows:** You can use Docker buildx to build for `linux/amd64` and push to a registry, then pull `frigate-dev` on the Ubuntu server. The Frigate build is heavy and may be slower or more fragile on Windows; building on the server is simpler.
## One-time setup on the server
```bash
# Clone or copy the repo, then:
cd /path/to/frigate
make version
make local-trt
docker tag frigate:latest-tensorrt frigate-dev
```
Then in your compose use `image: frigate-dev`, publish port 5010 if you use the transcode proxy, and set `transcode_proxy` in Frigate config as in the main README.

55
transcode_proxy/README.md Normal file
View File

@ -0,0 +1,55 @@
# Frigate VOD Transcode Proxy
Optional proxy that runs **inside the Frigate container** and rewrites VOD HLS playback to an H.264 transport-stream rendition on the fly. Use it when recordings are HEVC (or high bitrate) and you want compatible or lower-bitrate playback.
## How it works
- **Manifest requests** (e.g. `.../master.m3u8` and `.../index-v1.m3u8`): Fetched from upstream and rewritten so the browser sees a proxy-owned H.264 HLS rendition.
- **Segment requests**: The rewritten media playlist points to proxy-owned `.transcoded.ts` segment URLs. Those requests fetch the upstream source segment, transcode it to H.264 MPEG-TS with FFmpeg, cache it in memory (LRU, configurable size), then serve it.
- **Init fragments**: The rewritten media playlist removes upstream `#EXT-X-MAP` usage, so the browser no longer depends on upstream fragmented MP4 init files for transcoded playback.
The proxy is an s6-managed service in the same Docker image as Frigate. It binds to port **5010** inside the container and starts after nginx is ready.
## Configuration
Environment variables (optional; defaults work when running in the same container):
| Variable | Default | Description |
|----------|---------|-------------|
| `TRANSCODE_PROXY_UPSTREAM` | `http://127.0.0.1:5000` | Upstream Frigate VOD base URL (nginx internal port when in-container). |
| `TRANSCODE_PROXY_PATH_PREFIX` | (empty) | If the proxy is mounted at a path (e.g. `/vod-transcoded`), set this so the proxy strips it when forwarding. |
| `TRANSCODE_PROXY_HOST` | `0.0.0.0` | Bind host. |
| `TRANSCODE_PROXY_PORT` | `5010` | Bind port. |
| `TRANSCODE_PROXY_CACHE_MB` | `500` | Max in-memory cache size (MB). |
| `TRANSCODE_PROXY_FFMPEG` | (system) | FFmpeg binary path; uses Frigates FFmpeg when not set. |
| `TRANSCODE_PROXY_H264_BITRATE` | `128k` | H.264 bitrate for transcoded segments. |
| `TRANSCODE_PROXY_MAX_WIDTH` | `640` | Max output width for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
| `TRANSCODE_PROXY_MAX_HEIGHT` | `480` | Max output height for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
## Enabling in Frigate
1. Build Frigate from this repo (e.g. `frigate-dev`) so the image includes the proxy and config/UI support.
2. Expose the proxy either internally through Frigate nginx (recommended, e.g. `/vod-transcoded`) or by publishing port **5010** for direct access.
3. In Frigate config (YAML), add:
```yaml
transcode_proxy:
enabled: true
vod_proxy_url: "http://YOUR_FRIGATE_HOST:5010" # same host as Frigate, port 5010
```
4. Restart Frigate. The UI will use the proxy for recording playback when enabled.
If Frigate is behind a reverse proxy and you expose the transcode service at a path (e.g. `https://frigate.example.com/vod-transcoded`), set `TRANSCODE_PROXY_PATH_PREFIX=/vod-transcoded` in the container environment and use that full URL as `vod_proxy_url`.
## Running (single container)
The proxy runs automatically inside the Frigate container. No separate container or image is needed. For same-origin playback, keep the service internal and route it through Frigate nginx on the normal UI origin.
See **transcode_proxy/DEV_WORKFLOW.md** for building the dev image (e.g. `frigate-dev`) and switching between stable and dev.
## Endpoints
- `GET /vod/.../master.m3u8` Rewritten HLS master playlist for the transcoded rendition.
- `GET /vod/.../index*.m3u8` Rewritten HLS media playlist that points at proxy-owned transcoded transport-stream segments.
- `GET /vod/.../*.transcoded.ts` Transcoded H.264 MPEG-TS segments.
- `GET /cache` Cache stats (size, entry count).
- `GET /health` Health check.

View File

@ -0,0 +1 @@
"""Transcode proxy: sits in front of Frigate VOD and transcodes segments on the fly to H.264."""

View File

@ -0,0 +1,5 @@
"""Run the transcode proxy: python -m transcode_proxy."""
from transcode_proxy.main import run
if __name__ == "__main__":
run()

47
transcode_proxy/cache.py Normal file
View File

@ -0,0 +1,47 @@
"""In-memory LRU cache for transcoded segments (byte-size limited)."""
import logging
import threading
from collections import OrderedDict
from typing import Optional
logger = logging.getLogger(__name__)
class ByteLRUCache:
"""LRU cache that evicts by total byte size."""
def __init__(self, max_bytes: int):
self._max_bytes = max_bytes
self._current_bytes = 0
self._order: OrderedDict[str, bytes] = OrderedDict()
self._lock = threading.Lock()
def get(self, key: str) -> Optional[bytes]:
with self._lock:
data = self._order.pop(key, None)
if data is not None:
self._order[key] = data # move to end (most recent)
return data
return None
def set(self, key: str, value: bytes) -> None:
size = len(value)
if size > self._max_bytes:
logger.warning("Segment larger than cache max (%s bytes), not caching", size)
return
with self._lock:
while self._current_bytes + size > self._max_bytes and self._order:
evicted_key = next(iter(self._order))
evicted = self._order.pop(evicted_key)
self._current_bytes -= len(evicted)
logger.debug("Evicted %s from transcode cache", evicted_key)
self._order[key] = value
self._current_bytes += size
def size_bytes(self) -> int:
with self._lock:
return self._current_bytes
def count(self) -> int:
with self._lock:
return len(self._order)

44
transcode_proxy/config.py Normal file
View File

@ -0,0 +1,44 @@
"""Configuration from environment."""
import os
from dataclasses import dataclass, field
@dataclass
class Config:
"""Proxy configuration."""
# Upstream Frigate VOD base URL (e.g. http://nginx:80 or http://127.0.0.1:5001)
upstream_base: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_UPSTREAM", "http://127.0.0.1:80")
)
# Optional path prefix the proxy is mounted at (e.g. /vod-transcoded); strip when forwarding
path_prefix: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_PATH_PREFIX", "").rstrip("/")
)
# Host/port to bind
host: str = field(default_factory=lambda: os.environ.get("TRANSCODE_PROXY_HOST", "0.0.0.0"))
port: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_PORT", "5010"))
)
# In-memory cache max size in bytes
cache_max_bytes: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_CACHE_MB", "500")) * 1024 * 1024
)
# FFmpeg binary
ffmpeg_path: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_FFMPEG", "ffmpeg")
)
# H.264 bitrate for transcoded segments
h264_bitrate: str = field(
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_H264_BITRATE", "128k")
)
# Max output size for transcoded playback; preserves aspect ratio and will not upscale
max_width: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_WIDTH", "640"))
)
max_height: int = field(
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_HEIGHT", "480"))
)
config = Config()

View File

@ -0,0 +1,24 @@
# Example: Frigate with in-container transcode proxy (single image).
#
# 1. Build Frigate from this repo (on Ubuntu recommended):
# make version && make local-trt && docker tag frigate:latest-tensorrt frigate-dev
#
# 2. Use image: frigate-dev and publish port 5010 for the transcode proxy.
# 3. In Frigate config (config.yml), set:
# transcode_proxy:
# enabled: true
# vod_proxy_url: "http://YOUR_HOST:5010"
services:
frigate:
container_name: frigate
restart: unless-stopped
image: frigate-dev
# ... your existing frigate config (gpus, shm_size, devices, volumes) ...
ports:
- "5000:5000" # or 8971:8971 depending on your setup
- "5010:5010" # transcode proxy (only needed if transcode_proxy.enabled is true)
# Optional: override proxy defaults
# environment:
# TRANSCODE_PROXY_PORT: "5010"
# TRANSCODE_PROXY_CACHE_MB: "500"

419
transcode_proxy/main.py Normal file
View File

@ -0,0 +1,419 @@
"""FastAPI app: proxy VOD requests, transcode segments on the fly."""
import logging
import re
from collections.abc import AsyncIterator
from typing import Optional
import httpx
from fastapi import FastAPI, Request, Response
from fastapi.responses import StreamingResponse
from transcode_proxy.cache import ByteLRUCache
from transcode_proxy.config import config
from transcode_proxy.transcode import (
TranscodeError,
stream_transcode_segment_to_h264_ts,
)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(title="Frigate VOD Transcode Proxy", version="0.1.0")
cache = ByteLRUCache(config.cache_max_bytes)
# Segment extensions that the upstream VOD may expose.
SEGMENT_EXTENSIONS = (".m4s", ".mp4", ".ts")
FORWARD_HEADERS = ("cookie", "authorization", "referer")
TRANSCODED_SEGMENT_SUFFIX = ".transcoded.ts"
H264_CODEC = "avc1.64001f"
LOCAL_QUERY_KEYS = {"bitrate", "max_width", "max_height"}
def _upstream_path(path: str) -> Optional[str]:
"""Strip path_prefix and only allow VOD paths through to upstream."""
p = path.lstrip("/")
if config.path_prefix:
prefix = config.path_prefix.strip("/")
if p.startswith(prefix + "/"):
p = p[len(prefix) + 1 :]
if p == "vod" or p.startswith("vod/"):
return "/" + p
if p.startswith("vod-transcoded/"):
return "/" + p[len("vod-transcoded/") :]
if p == "vod-transcoded":
return "/vod"
return None
def _is_segment(path: str) -> bool:
return path.rstrip("/").endswith(TRANSCODED_SEGMENT_SUFFIX) or any(
path.rstrip("/").endswith(ext) for ext in SEGMENT_EXTENSIONS
)
def _is_init_path(path: str) -> bool:
return bool(re.search(r"/init.*\.mp4$", path))
def _is_master_playlist(path: str) -> bool:
return path.endswith("/master.m3u8") or path.endswith("master.m3u8")
def _init_upstream_path(segment_path: str) -> Optional[str]:
"""Infer the matching init fragment for an fMP4 media fragment path."""
match = re.search(r"/seg-\d+(?P<suffix>.*)\.m4s$", segment_path)
if not match:
return None
suffix = match.group("suffix")
return re.sub(r"/seg-\d+.*\.m4s$", f"/init{suffix}.mp4", segment_path)
async def _fetch_upstream_bytes(
client: httpx.AsyncClient, url: str, headers: dict[str, str]
) -> Optional[bytes]:
try:
upstream_resp = await client.get(url, headers=headers)
upstream_resp.raise_for_status()
return upstream_resp.content
except Exception as e:
logger.warning("Upstream fetch failed %s: %s", url, e)
return None
async def _fetch_source_init_bytes(
client: httpx.AsyncClient,
init_path: str,
query: str,
headers: dict[str, str],
) -> Optional[bytes]:
init_url = f"{config.upstream_base.rstrip('/')}{init_path}"
if query:
init_url += f"?{query}"
cache_key = f"source-init:{init_url}"
cached = cache.get(cache_key)
if cached is not None:
return cached
init_bytes = await _fetch_upstream_bytes(client, init_url, headers)
if init_bytes is not None:
cache.set(cache_key, init_bytes)
return init_bytes
async def _stream_source_segment_bytes(
source_url: str,
headers: dict[str, str],
init_bytes: Optional[bytes] = None,
) -> AsyncIterator[bytes]:
if init_bytes is not None:
yield init_bytes
async with httpx.AsyncClient(timeout=60.0) as client:
async with client.stream("GET", source_url, headers=headers) as upstream_resp:
upstream_resp.raise_for_status()
async for chunk in upstream_resp.aiter_bytes():
if chunk:
yield chunk
def _proxy_segment_uri(entry: str) -> str:
return f"{entry}{TRANSCODED_SEGMENT_SUFFIX}"
def _source_segment_path(path: str) -> str:
if path.endswith(TRANSCODED_SEGMENT_SUFFIX):
return path[: -len(TRANSCODED_SEGMENT_SUFFIX)]
return path
def _resolution_for_transcode(
width: int, height: int, max_width: int, max_height: int
) -> tuple[int, int]:
if width <= 0 or height <= 0:
return (max_width, max_height)
max_width = max(max_width, 2)
max_height = max(max_height, 2)
scale = min(max_width / width, max_height / height, 1.0)
out_width = max(2, int(width * scale))
out_height = max(2, int(height * scale))
if out_width % 2:
out_width -= 1
if out_height % 2:
out_height -= 1
return (max(out_width, 2), max(out_height, 2))
def _bandwidth_bits(bitrate: str) -> int:
match = re.fullmatch(r"(?P<value>\d+(?:\.\d+)?)(?P<suffix>[kKmMgG]?)", bitrate.strip())
if not match:
return 2_000_000
value = float(match.group("value"))
suffix = match.group("suffix").upper()
multiplier = {
"": 1,
"K": 1_000,
"M": 1_000_000,
"G": 1_000_000_000,
}[suffix]
return int(value * multiplier)
def _transcode_request_profile(request: Request) -> tuple[str, int, int, str]:
bitrate = request.query_params.get("bitrate", config.h264_bitrate)
max_width = int(request.query_params.get("max_width", config.max_width))
max_height = int(request.query_params.get("max_height", config.max_height))
upstream_query = "&".join(
f"{key}={value}"
for key, value in request.query_params.multi_items()
if key not in LOCAL_QUERY_KEYS
)
return bitrate, max_width, max_height, upstream_query
def _rewrite_master_playlist(
upstream_bytes: bytes, bitrate: str, max_width: int, max_height: int
) -> bytes:
playlist = upstream_bytes.decode("utf-8", errors="replace")
lines = [line.strip() for line in playlist.splitlines() if line.strip()]
child_uri: Optional[str] = None
stream_inf_line: Optional[str] = None
for idx, line in enumerate(lines):
if line.startswith("#EXT-X-STREAM-INF:"):
stream_inf_line = line
for child_line in lines[idx + 1 :]:
if child_line and not child_line.startswith("#"):
child_uri = child_line
break
break
if child_uri is None or stream_inf_line is None:
logger.warning("Unable to parse master playlist, returning upstream manifest")
return upstream_bytes
attrs = [
f'BANDWIDTH={max(_bandwidth_bits(bitrate), 1)}',
f'CODECS="{H264_CODEC}"',
]
resolution_match = re.search(r"RESOLUTION=(\d+)x(\d+)", stream_inf_line)
if resolution_match:
width = int(resolution_match.group(1))
height = int(resolution_match.group(2))
out_width, out_height = _resolution_for_transcode(
width, height, max_width, max_height
)
attrs.insert(1, f"RESOLUTION={out_width}x{out_height}")
rewritten = [
"#EXTM3U",
"#EXT-X-STREAM-INF:" + ",".join(attrs),
child_uri,
"",
]
return "\n".join(rewritten).encode()
def _rewrite_media_playlist(upstream_bytes: bytes) -> bytes:
playlist = upstream_bytes.decode("utf-8", errors="replace")
output_lines: list[str] = []
segment_index = 0
for line in playlist.splitlines():
stripped = line.strip()
if stripped.startswith("#EXT-X-MAP:"):
continue
if stripped.startswith("#EXTINF:") and segment_index > 0:
output_lines.append("#EXT-X-DISCONTINUITY")
if stripped and not stripped.startswith("#"):
output_lines.append(_proxy_segment_uri(stripped))
segment_index += 1
continue
output_lines.append(line)
if output_lines and output_lines[-1] != "":
output_lines.append("")
return "\n".join(output_lines).encode()
async def _proxy_upstream_response(
client: httpx.AsyncClient, url: str, headers: dict[str, str]
) -> Optional[httpx.Response]:
try:
upstream_resp = await client.get(url, headers=headers)
upstream_resp.raise_for_status()
return upstream_resp
except Exception as e:
logger.warning("Upstream fetch failed %s: %s", url, e)
return None
async def _transcoded_segment_response(
source_url: str,
cache_key: str,
headers: dict[str, str],
init_bytes: Optional[bytes] = None,
bitrate: Optional[str] = None,
max_width: Optional[int] = None,
max_height: Optional[int] = None,
) -> Response:
stream = await stream_transcode_segment_to_h264_ts(
_stream_source_segment_bytes(source_url, headers, init_bytes),
config.ffmpeg_path,
bitrate or config.h264_bitrate,
max_width or config.max_width,
max_height or config.max_height,
)
try:
first_chunk = await stream.first_chunk()
except TranscodeError as e:
await stream.aclose()
logger.warning("Transcode stream failed %s: %s", source_url, e)
return Response(status_code=502, content=b"Transcode failed")
async def body() -> AsyncIterator[bytes]:
try:
async for chunk in stream.iter_chunks(first_chunk):
yield chunk
except TranscodeError as e:
logger.warning("Transcode stream failed %s: %s", source_url, e)
raise
else:
cache.set(cache_key, stream.output_bytes)
return StreamingResponse(
body(),
media_type="video/mp2t",
headers={"Cache-Control": "private, max-age=300"},
)
@app.get("/cache")
async def cache_info() -> dict:
"""Return cache size and entry count (for debugging)."""
return {
"size_bytes": cache.size_bytes(),
"size_mb": round(cache.size_bytes() / (1024 * 1024), 2),
"entries": cache.count(),
}
@app.get("/health")
async def health() -> dict:
return {"status": "ok"}
@app.get("/{full_path:path}")
async def vod_proxy(request: Request, full_path: str) -> Response:
"""Handle /vod/... or /vod-transcoded/... (when path_prefix is set)."""
path = "/" + full_path.lstrip("/")
upstream_path = _upstream_path(path)
if upstream_path is None or not (
upstream_path == "/vod" or upstream_path.startswith("/vod/")
):
return Response(status_code=404, content=b"Not found")
bitrate, max_width, max_height, upstream_query = _transcode_request_profile(request)
upstream_url = f"{config.upstream_base.rstrip('/')}{upstream_path}"
if upstream_query:
upstream_url += f"?{upstream_query}"
headers = {
k: v for k, v in request.headers.items() if k.lower() in FORWARD_HEADERS
}
if upstream_path.endswith(TRANSCODED_SEGMENT_SUFFIX):
cache_key = f"{upstream_url}|{bitrate}|{max_width}x{max_height}"
cached = cache.get(cache_key)
if cached is not None:
return Response(
content=cached,
media_type="video/mp2t",
headers={"Cache-Control": "private, max-age=300"},
)
source_path = _source_segment_path(upstream_path)
source_url = f"{config.upstream_base.rstrip('/')}{source_path}"
if upstream_query:
source_url += f"?{upstream_query}"
init_bytes: Optional[bytes] = None
if source_path.endswith(".m4s"):
init_path = _init_upstream_path(source_path)
if init_path is None:
return Response(status_code=502, content=b"Init segment inference failed")
async with httpx.AsyncClient(timeout=30.0) as client:
init_bytes = await _fetch_source_init_bytes(
client, init_path, upstream_query, headers
)
if init_bytes is None:
return Response(status_code=502, content=b"Init segment fetch failed")
return await _transcoded_segment_response(
source_url=source_url,
cache_key=cache_key,
headers=headers,
init_bytes=init_bytes,
bitrate=bitrate,
max_width=max_width,
max_height=max_height,
)
async with httpx.AsyncClient(timeout=30.0) as client:
if _is_master_playlist(upstream_path):
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
if upstream_resp is None:
return Response(status_code=502, content=b"Upstream fetch failed")
return Response(
content=_rewrite_master_playlist(
upstream_resp.content, bitrate, max_width, max_height
),
media_type="application/vnd.apple.mpegurl",
headers={"Cache-Control": "no-store"},
)
if upstream_path.endswith(".m3u8"):
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
if upstream_resp is None:
return Response(status_code=502, content=b"Upstream fetch failed")
return Response(
content=_rewrite_media_playlist(upstream_resp.content),
media_type="application/vnd.apple.mpegurl",
headers={"Cache-Control": "no-store"},
)
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
if upstream_resp is None:
return Response(status_code=502, content=b"Upstream fetch failed")
return Response(
content=upstream_resp.content,
media_type=upstream_resp.headers.get("content-type", "application/octet-stream"),
headers={"Cache-Control": "no-store"},
)
def run() -> None:
import uvicorn
uvicorn.run(
"transcode_proxy.main:app",
host=config.host,
port=config.port,
log_level="info",
)
if __name__ == "__main__":
run()

View File

@ -0,0 +1,5 @@
# Dependencies for running the transcode proxy standalone (e.g. in a separate container).
# Frigate's main container may already have these; the proxy can share the same env.
fastapi>=0.100.0
uvicorn>=0.22.0
httpx>=0.24.0

View File

@ -0,0 +1,256 @@
"""Transcode media segments to H.264 transport stream bytes using FFmpeg."""
import asyncio
import logging
import subprocess
from collections.abc import AsyncIterable, AsyncIterator
from typing import Optional
logger = logging.getLogger(__name__)
class TranscodeError(RuntimeError):
"""Raised when FFmpeg cannot produce a valid transcoded segment."""
def _build_scale_filter(max_width: int, max_height: int) -> Optional[str]:
if max_width <= 0 or max_height <= 0:
return None
return (
f"scale=w={max_width}:h={max_height}:"
"force_original_aspect_ratio=decrease:"
"force_divisible_by=2"
)
def _build_ffmpeg_cmd(
ffmpeg_path: str,
bitrate: str,
max_width: int,
max_height: int,
) -> list[str]:
cmd = [
ffmpeg_path,
"-hide_banner",
"-loglevel",
"error",
"-i",
"pipe:0",
"-an",
"-pix_fmt",
"yuv420p",
"-c:v",
"libx264",
"-preset",
"fast",
"-profile:v",
"high",
"-level:v",
"3.1",
"-b:v",
bitrate,
"-maxrate",
bitrate,
"-bufsize",
bitrate,
"-muxdelay",
"0",
"-muxpreload",
"0",
"-f",
"mpegts",
"-mpegts_flags",
"+initial_discontinuity",
"pipe:1",
]
scale_filter = _build_scale_filter(max_width, max_height)
if scale_filter:
cmd[7:7] = ["-vf", scale_filter]
return cmd
class H264TSStream:
"""Manage a streaming FFmpeg transcode process."""
def __init__(self, process: asyncio.subprocess.Process):
self._process = process
self._stderr = bytearray()
self._output = bytearray()
self._input_error: Exception | None = None
self._closed = False
self._stdin_task: asyncio.Task[None] | None = None
self._stderr_task: asyncio.Task[None] | None = None
@classmethod
async def start(
cls,
source_chunks: AsyncIterable[bytes],
ffmpeg_path: str,
bitrate: str = "2M",
max_width: int = 640,
max_height: int = 480,
) -> "H264TSStream":
process = await asyncio.create_subprocess_exec(
*_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stream = cls(process)
stream._stdin_task = asyncio.create_task(stream._feed_stdin(source_chunks))
stream._stderr_task = asyncio.create_task(stream._drain_stderr())
return stream
async def _feed_stdin(self, source_chunks: AsyncIterable[bytes]) -> None:
assert self._process.stdin is not None
try:
async for chunk in source_chunks:
if not chunk:
continue
self._process.stdin.write(chunk)
await self._process.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
self._input_error = exc
except Exception as exc: # pragma: no cover - depends on upstream/network failures
self._input_error = exc
finally:
stdin = self._process.stdin
if stdin is not None and not stdin.is_closing():
stdin.close()
try:
await stdin.wait_closed()
except Exception:
pass
async def _drain_stderr(self) -> None:
assert self._process.stderr is not None
while True:
chunk = await self._process.stderr.read(8192)
if not chunk:
break
self._stderr.extend(chunk)
async def _read_stdout_chunk(self) -> bytes:
assert self._process.stdout is not None
chunk = await self._process.stdout.read(65536)
if chunk:
self._output.extend(chunk)
return chunk
def _error_message(self) -> str:
if self._input_error is not None:
return f"Source stream failed: {self._input_error}"
if self._stderr:
return self._stderr.decode(errors="replace")
return "unknown FFmpeg error"
async def _ensure_success(self) -> bytes:
if self._stdin_task is not None:
await self._stdin_task
if self._stderr_task is not None:
await self._stderr_task
returncode = await self._process.wait()
if returncode != 0:
raise TranscodeError(self._error_message())
return bytes(self._output)
async def first_chunk(self) -> bytes:
chunk = await self._read_stdout_chunk()
if chunk:
return chunk
try:
await self._ensure_success()
finally:
self._closed = True
raise TranscodeError("FFmpeg produced no output")
async def iter_chunks(self, first_chunk: bytes) -> AsyncIterator[bytes]:
try:
yield first_chunk
while True:
chunk = await self._read_stdout_chunk()
if not chunk:
break
yield chunk
await self._ensure_success()
finally:
await self.aclose()
async def aclose(self) -> None:
if self._closed:
return
self._closed = True
if self._process.returncode is None:
self._process.kill()
await self._process.wait()
for task in (self._stdin_task, self._stderr_task):
if task is None or task.done():
continue
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
@property
def output_bytes(self) -> bytes:
return bytes(self._output)
async def stream_transcode_segment_to_h264_ts(
source_chunks: AsyncIterable[bytes],
ffmpeg_path: str,
bitrate: str = "2M",
max_width: int = 640,
max_height: int = 480,
) -> H264TSStream:
"""Start an FFmpeg process that streams H.264 MPEG-TS output."""
return await H264TSStream.start(
source_chunks,
ffmpeg_path,
bitrate,
max_width,
max_height,
)
def transcode_segment_to_h264_ts(
segment_bytes: bytes,
ffmpeg_path: str,
bitrate: str = "2M",
max_width: int = 640,
max_height: int = 480,
) -> Optional[bytes]:
"""Decode a segment and re-encode it as H.264 MPEG-TS bytes."""
try:
result = subprocess.run(
_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
input=segment_bytes,
capture_output=True,
timeout=60,
)
if result.returncode != 0:
logger.warning(
"FFmpeg transcode failed: %s",
result.stderr.decode(errors="replace") if result.stderr else "unknown",
)
return None
return result.stdout
except subprocess.TimeoutExpired:
logger.warning("FFmpeg transcode timed out")
return None
except Exception as e:
logger.warning("FFmpeg transcode error: %s", e)
return None

View File

@ -35,6 +35,7 @@ import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { ExportCase } from "@/types/export"; import { ExportCase } from "@/types/export";
import { CustomTimeSelector } from "./CustomTimeSelector"; import { CustomTimeSelector } from "./CustomTimeSelector";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
const EXPORT_OPTIONS = [ const EXPORT_OPTIONS = [
"1", "1",
@ -428,11 +429,22 @@ export function ExportPreviewDialog({
setShowPreview, setShowPreview,
}: ExportPreviewDialogProps) { }: ExportPreviewDialogProps) {
const { t } = useTranslation(["components/dialog"]); const { t } = useTranslation(["components/dialog"]);
const vodPath = range
? `/vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`
: `/vod/${camera}/start/0/end/0/index.m3u8`;
const playbackSource = useRecordingPlaybackSource({
camera,
after: range?.after ?? 0,
before: range?.before ?? 0,
vodPath,
enabled: !!range,
});
if (!range) { if (!range) {
return null; return null;
} }
const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`; const source = playbackSource ?? `${baseUrl}${vodPath}`;
return ( return (
<Dialog open={showPreview} onOpenChange={setShowPreview}> <Dialog open={showPreview} onOpenChange={setShowPreview}>

View File

@ -80,6 +80,7 @@ import {
DrawerTitle, DrawerTitle,
DrawerTrigger, DrawerTrigger,
} from "@/components/ui/drawer"; } from "@/components/ui/drawer";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
import { LuInfo } from "react-icons/lu"; import { LuInfo } from "react-icons/lu";
import { TooltipPortal } from "@radix-ui/react-tooltip"; import { TooltipPortal } from "@radix-ui/react-tooltip";
import { FaPencilAlt } from "react-icons/fa"; import { FaPencilAlt } from "react-icons/fa";
@ -1866,8 +1867,16 @@ export function VideoTab({ search }: VideoTabProps) {
const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING; const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
return `start/${startTime}/end/${endTime}`; return `start/${startTime}/end/${endTime}`;
}, [search]); }, [search]);
const startTime = search.start_time - REVIEW_PADDING;
const source = `${baseUrl}vod/${search.camera}/${clipTimeRange}/index.m3u8`; const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
const vodPath = `/vod/${search.camera}/${clipTimeRange}/index.m3u8`;
const playbackSource = useRecordingPlaybackSource({
camera: search.camera,
after: startTime,
before: endTime,
vodPath,
});
const source = playbackSource ?? `${baseUrl}${vodPath}`;
return ( return (
<> <>

View File

@ -41,6 +41,7 @@ import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator
import ObjectTrackOverlay from "../ObjectTrackOverlay"; import ObjectTrackOverlay from "../ObjectTrackOverlay";
import { useIsAdmin } from "@/hooks/use-is-admin"; import { useIsAdmin } from "@/hooks/use-is-admin";
import { VideoResolutionType } from "@/types/live"; import { VideoResolutionType } from "@/types/live";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
type TrackingDetailsProps = { type TrackingDetailsProps = {
className?: string; className?: string;
@ -513,25 +514,36 @@ export function TrackingDetails({
setBlueLineHeightPx(bluePx); setBlueLineHeightPx(bluePx);
}, [eventSequence, timelineSize.width, timelineSize.height, effectiveTime]); }, [eventSequence, timelineSize.width, timelineSize.height, effectiveTime]);
const videoSource = useMemo(() => { const videoWindow = useMemo(() => {
// event.start_time and event.end_time are in DETECT stream time
// Convert to record stream time, then create video clip with padding.
// Use sourceOffsetRef (stable per event) so the HLS player doesn't
// reload while the user is dragging the annotation offset slider.
const sourceOffset = sourceOffsetRef.current; const sourceOffset = sourceOffsetRef.current;
const eventStartRec = event.start_time + sourceOffset / 1000; const eventStartRec = event.start_time + sourceOffset / 1000;
const eventEndRec = const eventEndRec =
(event.end_time ?? Date.now() / 1000) + sourceOffset / 1000; (event.end_time ?? Date.now() / 1000) + sourceOffset / 1000;
const startTime = eventStartRec - REVIEW_PADDING; const startTime = eventStartRec - REVIEW_PADDING;
const endTime = eventEndRec + REVIEW_PADDING; const endTime = eventEndRec + REVIEW_PADDING;
const playlist = `${baseUrl}vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`;
return {
startTime,
endTime,
vodPath: `/vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`,
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [event]);
const playbackSource = useRecordingPlaybackSource({
camera: event.camera,
after: videoWindow.startTime,
before: videoWindow.endTime,
vodPath: videoWindow.vodPath,
});
const videoSource = useMemo(() => {
const playlist = playbackSource ?? `${baseUrl}${videoWindow.vodPath}`;
return { return {
playlist, playlist,
startPosition: 0, startPosition: 0,
}; };
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [event]); }, [playbackSource, videoWindow]);
// Determine camera aspect ratio category // Determine camera aspect ratio category
const cameraAspect = useMemo(() => { const cameraAspect = useMemo(() => {

View File

@ -9,7 +9,10 @@ import {
import { useApiHost } from "@/api"; import { useApiHost } from "@/api";
import useSWR from "swr"; import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import { Recording } from "@/types/record"; import {
Recording,
RecordingPlaybackPreference,
} from "@/types/record";
import { Preview } from "@/types/preview"; import { Preview } from "@/types/preview";
import PreviewPlayer, { PreviewController } from "../PreviewPlayer"; import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
import { DynamicVideoController } from "./DynamicVideoController"; import { DynamicVideoController } from "./DynamicVideoController";
@ -21,11 +24,21 @@ import { VideoResolutionType } from "@/types/live";
import axios from "axios"; import axios from "axios";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { useUserPersistence } from "@/hooks/use-user-persistence";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
import { import {
calculateInpointOffset, calculateInpointOffset,
calculateSeekPosition, calculateSeekPosition,
} from "@/utils/videoUtil"; } from "@/utils/videoUtil";
import { isFirefox } from "react-device-detect"; import { isFirefox } from "react-device-detect";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
/** /**
* Dynamically switches between video playback and scrubbing preview player. * Dynamically switches between video playback and scrubbing preview player.
@ -121,6 +134,11 @@ export default function DynamicVideoPlayer({
const [isLoading, setIsLoading] = useState(false); const [isLoading, setIsLoading] = useState(false);
const [isBuffering, setIsBuffering] = useState(false); const [isBuffering, setIsBuffering] = useState(false);
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>(); const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
const [playbackPreference, setPlaybackPreference] =
useUserPersistence<RecordingPlaybackPreference>(
`${camera}-recording-playback-v2`,
"sub",
);
// Don't set source until recordings load - we need accurate startPosition // Don't set source until recordings load - we need accurate startPosition
// to avoid hls.js clamping to video end when startPosition exceeds duration // to avoid hls.js clamping to video end when startPosition exceeds duration
@ -190,10 +208,29 @@ export default function DynamicVideoPlayer({
}), }),
[timeRange], [timeRange],
); );
const { data: recordings } = useSWR<Recording[]>( const { data: allRecordings } = useSWR<Recording[]>(
[`${camera}/recordings`, recordingParams], [`${camera}/recordings`, { ...recordingParams, variant: "all" }],
{ revalidateOnFocus: false }, { revalidateOnFocus: false },
); );
const recordings = useMemo(() => {
if (!allRecordings?.length) {
return allRecordings;
}
const mainRecordings = allRecordings.filter(
(recording) => (recording.variant || "main") === "main",
);
return mainRecordings.length > 0 ? mainRecordings : allRecordings;
}, [allRecordings]);
const codecNames = useMemo(
() =>
Array.from(
new Set((allRecordings ?? []).map((recording) => recording.codec_name)),
),
[allRecordings],
);
const playbackCapabilities = usePlaybackCapabilities(codecNames);
useEffect(() => { useEffect(() => {
if (!recordings?.length) { if (!recordings?.length) {
@ -219,13 +256,34 @@ export default function DynamicVideoPlayer({
); );
} }
const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
const decision = chooseRecordingPlayback({
apiHost,
config,
recordings: allRecordings ?? recordings,
preference: playbackPreference ?? "sub",
vodPath,
capabilities: playbackCapabilities,
});
setSource({ setSource({
playlist: `${apiHost}vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`, playlist: decision.url,
startPosition, startPosition,
}); });
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [recordings]); }, [
apiHost,
camera,
recordingParams.after,
recordingParams.before,
allRecordings,
recordings,
startTimestamp,
playbackPreference,
playbackCapabilities,
config?.transcode_proxy?.enabled,
config?.transcode_proxy?.vod_proxy_url,
]);
useEffect(() => { useEffect(() => {
if (!controller || !recordings?.length) { if (!controller || !recordings?.length) {
@ -324,6 +382,26 @@ export default function DynamicVideoPlayer({
transformedOverlay={transformedOverlay} transformedOverlay={transformedOverlay}
/> />
)} )}
{!isScrubbing && source && (
<div className="absolute right-3 top-3 z-50">
<Select
value={playbackPreference ?? "sub"}
onValueChange={(value) =>
setPlaybackPreference(value as RecordingPlaybackPreference)
}
>
<SelectTrigger className="h-8 w-32 bg-background/90 text-xs backdrop-blur">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="auto">Auto</SelectItem>
<SelectItem value="main">Main</SelectItem>
<SelectItem value="sub">Sub</SelectItem>
<SelectItem value="transcoded">Transcoded</SelectItem>
</SelectContent>
</Select>
</div>
)}
<PreviewPlayer <PreviewPlayer
className={cn( className={cn(
className, className,

View File

@ -0,0 +1,77 @@
import { useMemo } from "react";
import {
getCodecMimeTypes,
normalizeCodecName,
PlaybackCapabilities,
} from "@/utils/recordingPlayback";
type NavigatorConnection = {
downlink?: number;
effectiveType?: string;
rtt?: number;
saveData?: boolean;
};
declare global {
interface Navigator {
connection?: NavigatorConnection;
mozConnection?: NavigatorConnection;
webkitConnection?: NavigatorConnection;
}
interface Window {
ManagedMediaSource?: typeof MediaSource;
}
}
function canPlayMimeType(mimeType?: string): boolean {
if (!mimeType || typeof window === "undefined") {
return false;
}
if (window.ManagedMediaSource?.isTypeSupported(mimeType)) {
return true;
}
if (window.MediaSource?.isTypeSupported(mimeType)) {
return true;
}
const video = document.createElement("video");
return video.canPlayType(mimeType) !== "";
}
function canPlayAnyMimeType(mimeTypes: string[]): boolean {
return mimeTypes.some((mimeType) => canPlayMimeType(mimeType));
}
export default function usePlaybackCapabilities(codecNames: Array<string | null | undefined>) {
return useMemo<PlaybackCapabilities>(() => {
if (typeof window === "undefined") {
return { estimatedBandwidthBps: undefined, saveData: false, supports: {} };
}
const connection =
navigator.connection ?? navigator.mozConnection ?? navigator.webkitConnection;
const supports: Record<string, boolean> = {};
codecNames.forEach((codecName) => {
const normalized = normalizeCodecName(codecName);
if (!normalized || normalized in supports) {
return;
}
supports[normalized] = canPlayAnyMimeType(getCodecMimeTypes(normalized));
});
const downlinkMbps = connection?.downlink;
return {
estimatedBandwidthBps:
typeof downlinkMbps === "number" && downlinkMbps > 0
? downlinkMbps * 1_000_000
: undefined,
saveData: connection?.saveData === true,
supports,
};
}, [codecNames]);
}

View File

@ -0,0 +1,72 @@
import { useApiHost } from "@/api";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import {
Recording,
RecordingPlaybackPreference,
} from "@/types/record";
import { useMemo } from "react";
import { useUserPersistence } from "@/hooks/use-user-persistence";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
type RecordingPlaybackSourceOptions = {
camera: string;
after: number;
before: number;
vodPath: string;
preference?: RecordingPlaybackPreference;
enabled?: boolean;
};
export default function useRecordingPlaybackSource({
camera,
after,
before,
vodPath,
preference,
enabled = true,
}: RecordingPlaybackSourceOptions) {
const apiHost = useApiHost();
const { data: config } = useSWR<FrigateConfig>("config");
const [storedPreference] = useUserPersistence<RecordingPlaybackPreference>(
`${camera}-recording-playback-v2`,
"sub",
);
const { data: recordings } = useSWR<Recording[]>(
enabled ? [`${camera}/recordings`, { after, before, variant: "all" }] : null,
{ revalidateOnFocus: false },
);
const codecNames = useMemo(
() =>
Array.from(
new Set((recordings ?? []).map((recording) => recording.codec_name)),
),
[recordings],
);
const capabilities = usePlaybackCapabilities(codecNames);
return useMemo(() => {
if (!recordings?.length) {
return undefined;
}
return chooseRecordingPlayback({
apiHost,
config,
recordings,
preference: preference ?? storedPreference ?? "sub",
vodPath,
capabilities,
}).url;
}, [
apiHost,
capabilities,
config,
preference,
recordings,
storedPreference,
vodPath,
]);
}

View File

@ -540,6 +540,11 @@ export interface FrigateConfig {
logout_url?: string; logout_url?: string;
}; };
transcode_proxy?: {
enabled: boolean;
vod_proxy_url: string;
};
record: { record: {
enabled: boolean; enabled: boolean;
enabled_in_config: boolean | null; enabled_in_config: boolean | null;

View File

@ -7,12 +7,17 @@ export type Recording = {
start_time: number; start_time: number;
end_time: number; end_time: number;
path: string; path: string;
variant?: string;
segment_size: number; segment_size: number;
duration: number; duration: number;
motion: number; motion: number;
objects: number; objects: number;
motion_heatmap?: Record<string, number> | null; motion_heatmap?: Record<string, number> | null;
dBFS: number; dBFS: number;
codec_name?: string | null;
width?: number | null;
height?: number | null;
bitrate?: number | null;
}; };
export type RecordingSegment = { export type RecordingSegment = {
@ -44,6 +49,12 @@ export type RecordingStartingPoint = {
export type RecordingPlayerError = "stalled" | "startup"; export type RecordingPlayerError = "stalled" | "startup";
export type RecordingPlaybackPreference =
| "auto"
| "main"
| "sub"
| "transcoded";
export const ASPECT_VERTICAL_LAYOUT = 1.5; export const ASPECT_VERTICAL_LAYOUT = 1.5;
export const ASPECT_PORTRAIT_LAYOUT = 1.333; export const ASPECT_PORTRAIT_LAYOUT = 1.333;
export const ASPECT_WIDE_LAYOUT = 2; export const ASPECT_WIDE_LAYOUT = 2;

View File

@ -0,0 +1,44 @@
const LOW_BANDWIDTH_PATTERN = /\b(sub|low|mobile|small|sd|lowres|low-res)\b/i;
const HIGH_BANDWIDTH_PATTERN = /\b(main|high|hd|full|primary)\b/i;
function rankStreamLabel(label: string, preferLowBandwidth: boolean): number {
if (preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
return 3;
}
if (!preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
return 3;
}
if (preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
return 1;
}
if (!preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
return 1;
}
return 2;
}
export function chooseAutoLiveStream(
streams: Record<string, string>,
estimatedBandwidthBps?: number,
saveData = false,
): string {
const entries = Object.entries(streams || {});
if (entries.length === 0) {
return "";
}
const preferLowBandwidth =
saveData || !!(estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000);
return [...entries]
.sort(([leftLabel], [rightLabel]) => {
return (
rankStreamLabel(rightLabel, preferLowBandwidth) -
rankStreamLabel(leftLabel, preferLowBandwidth)
);
})[0][1];
}

View File

@ -0,0 +1,324 @@
import { FrigateConfig } from "@/types/frigateConfig";
import {
Recording,
RecordingPlaybackPreference,
} from "@/types/record";
export type PlaybackCapabilities = {
estimatedBandwidthBps?: number;
saveData: boolean;
supports: Record<string, boolean>;
};
export type RecordingPlaybackDecision = {
mode: "direct" | "transcoded";
variant: string;
url: string;
reason: string;
};
type DecisionOptions = {
apiHost: string;
config?: FrigateConfig;
recordings: Recording[];
preference: RecordingPlaybackPreference;
vodPath: string;
capabilities: PlaybackCapabilities;
};
const CODEC_SAMPLES: Record<string, string[]> = {
h264: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
avc1: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
hevc: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
h265: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
hev1: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
hvc1: [
'video/mp4; codecs="hev1.1.6.L120.90"',
'video/mp4; codecs="hvc1.1.6.L120.90"',
'video/mp4; codecs="hev1.1.6.L93.B0"',
'video/mp4; codecs="hvc1.1.6.L93.B0"',
],
av1: ['video/mp4; codecs="av01.0.05M.08"'],
av01: ['video/mp4; codecs="av01.0.05M.08"'],
vp9: ['video/mp4; codecs="vp09.00.10.08"'],
vp09: ['video/mp4; codecs="vp09.00.10.08"'],
};
function trimTrailingSlash(value: string): string {
return value.replace(/\/$/, "");
}
function appendQuery(url: string, params: Record<string, string | undefined>): string {
const entries = Object.entries(params).filter(([, value]) => value);
if (entries.length === 0) {
return url;
}
const search = new URLSearchParams(entries as [string, string][]);
return `${url}${url.includes("?") ? "&" : "?"}${search.toString()}`;
}
function average(values: number[]): number | undefined {
if (!values.length) {
return undefined;
}
return values.reduce((sum, value) => sum + value, 0) / values.length;
}
export function normalizeCodecName(codecName?: string | null): string | undefined {
return codecName?.toLowerCase().trim() || undefined;
}
export function getCodecMimeTypes(codecName?: string | null): string[] {
const normalized = normalizeCodecName(codecName);
if (!normalized) {
return [];
}
return CODEC_SAMPLES[normalized] ?? [];
}
export function estimateRecordingBitrate(recordings: Recording[]): number | undefined {
const explicit = recordings
.map((recording) => recording.bitrate)
.filter((value): value is number => typeof value === "number" && value > 0);
if (explicit.length > 0) {
return average(explicit);
}
const derived = recordings
.map((recording) => {
if (!recording.segment_size || !recording.duration) {
return undefined;
}
return (recording.segment_size * 1024 * 1024 * 8) / recording.duration;
})
.filter((value): value is number => typeof value === "number" && value > 0);
return average(derived);
}
export function groupRecordingsByVariant(
recordings: Recording[],
): Record<string, Recording[]> {
return recordings.reduce<Record<string, Recording[]>>((acc, recording) => {
const variant = recording.variant || "main";
if (!acc[variant]) {
acc[variant] = [];
}
acc[variant].push(recording);
return acc;
}, {});
}
function canDirectPlayVariant(
capabilities: PlaybackCapabilities,
recordings: Recording[],
): boolean {
const codecName = normalizeCodecName(recordings[0]?.codec_name);
if (!codecName) {
return false;
}
return capabilities.supports[codecName] === true;
}
function getDirectBaseUrl(apiHost: string): string {
return trimTrailingSlash(apiHost);
}
function getTranscodeBaseUrl(apiHost: string, config?: FrigateConfig): string | undefined {
if (!config?.transcode_proxy?.enabled) {
return undefined;
}
if (config.transcode_proxy.vod_proxy_url?.trim()) {
return trimTrailingSlash(config.transcode_proxy.vod_proxy_url);
}
return `${trimTrailingSlash(apiHost)}/vod-transcoded`;
}
function getTranscodeProfile(estimatedBandwidthBps?: number, saveData = false) {
if (saveData || (estimatedBandwidthBps && estimatedBandwidthBps <= 1_500_000)) {
return { bitrate: "512k", maxWidth: "640", maxHeight: "360" };
}
if (estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000) {
return { bitrate: "1200k", maxWidth: "960", maxHeight: "540" };
}
return { bitrate: "2500k", maxWidth: "1280", maxHeight: "720" };
}
function buildDirectUrl(apiHost: string, vodPath: string, variant: string): string {
const baseUrl = `${getDirectBaseUrl(apiHost)}${vodPath}`;
return appendQuery(baseUrl, {
variant: variant !== "main" ? variant : undefined,
});
}
function buildTranscodeUrl(
apiHost: string,
config: FrigateConfig | undefined,
vodPath: string,
variant: string,
capabilities: PlaybackCapabilities,
): string {
const transcodeBase = getTranscodeBaseUrl(apiHost, config);
if (!transcodeBase) {
return buildDirectUrl(apiHost, vodPath, variant);
}
const profile = getTranscodeProfile(
capabilities.estimatedBandwidthBps,
capabilities.saveData,
);
return appendQuery(`${transcodeBase}${vodPath}`, {
variant,
bitrate: profile.bitrate,
max_width: profile.maxWidth,
max_height: profile.maxHeight,
});
}
export function chooseRecordingPlayback({
apiHost,
config,
recordings,
preference,
vodPath,
capabilities,
}: DecisionOptions): RecordingPlaybackDecision {
const recordingsByVariant = groupRecordingsByVariant(recordings);
const mainRecordings = recordingsByVariant.main ?? [];
const subRecordings = recordingsByVariant.sub ?? [];
const transcodeAvailable = !!getTranscodeBaseUrl(apiHost, config);
const estimatedBandwidthBps =
capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000);
const candidates: Record<
"main" | "sub",
{ recordings: Recording[]; playable: boolean; bitrate?: number }
> = {
main: {
recordings: mainRecordings,
playable: canDirectPlayVariant(capabilities, mainRecordings),
bitrate: estimateRecordingBitrate(mainRecordings),
},
sub: {
recordings: subRecordings,
playable: canDirectPlayVariant(capabilities, subRecordings),
bitrate: estimateRecordingBitrate(subRecordings),
},
};
const preferDirect = (variant: "main" | "sub") => {
const candidate = candidates[variant];
return (
candidate.recordings.length > 0 &&
candidate.playable &&
(!candidate.bitrate || candidate.bitrate <= estimatedBandwidthBps * 0.85)
);
};
if (preference === "main" && candidates.main.recordings.length > 0) {
return {
mode: "direct",
variant: "main",
url: buildDirectUrl(apiHost, vodPath, "main"),
reason: "manual-main",
};
}
if (preference === "sub" && candidates.sub.recordings.length > 0) {
if (candidates.sub.playable) {
return {
mode: "direct",
variant: "sub",
url: buildDirectUrl(apiHost, vodPath, "sub"),
reason: "manual-sub",
};
}
return {
mode: "transcoded",
variant: "sub",
url: buildTranscodeUrl(apiHost, config, vodPath, "sub", capabilities),
reason: "manual-sub-transcoded",
};
}
if (preference === "transcoded") {
const targetVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
if (!transcodeAvailable) {
return {
mode: "direct",
variant: targetVariant,
url: buildDirectUrl(apiHost, vodPath, targetVariant),
reason: "manual-transcoded-unavailable",
};
}
return {
mode: "transcoded",
variant: targetVariant,
url: buildTranscodeUrl(apiHost, config, vodPath, targetVariant, capabilities),
reason: "manual-transcoded",
};
}
if (preferDirect("main")) {
return {
mode: "direct",
variant: "main",
url: buildDirectUrl(apiHost, vodPath, "main"),
reason: "raw-main",
};
}
if (preferDirect("sub")) {
return {
mode: "direct",
variant: "sub",
url: buildDirectUrl(apiHost, vodPath, "sub"),
reason: "raw-sub",
};
}
const transcodeVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
if (!transcodeAvailable) {
return {
mode: "direct",
variant: transcodeVariant,
url: buildDirectUrl(apiHost, vodPath, transcodeVariant),
reason: "direct-fallback",
};
}
return {
mode: "transcoded",
variant: transcodeVariant,
url: buildTranscodeUrl(apiHost, config, vodPath, transcodeVariant, capabilities),
reason: "transcode-fallback",
};
}

View File

@ -50,6 +50,8 @@ import { Toaster } from "@/components/ui/sonner";
import LiveContextMenu from "@/components/menu/LiveContextMenu"; import LiveContextMenu from "@/components/menu/LiveContextMenu";
import { useStreamingSettings } from "@/context/streaming-settings-provider"; import { useStreamingSettings } from "@/context/streaming-settings-provider";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
type DraggableGridLayoutProps = { type DraggableGridLayoutProps = {
cameras: CameraConfig[]; cameras: CameraConfig[];
@ -96,6 +98,7 @@ export default function DraggableGridLayout({
streamMetadata, streamMetadata,
}: DraggableGridLayoutProps) { }: DraggableGridLayoutProps) {
const { t } = useTranslation(["views/live"]); const { t } = useTranslation(["views/live"]);
const playbackCapabilities = usePlaybackCapabilities([]);
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
const birdseyeConfig = useMemo(() => config?.birdseye, [config]); const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
@ -588,7 +591,11 @@ export default function DraggableGridLayout({
grow = "aspect-video"; grow = "aspect-video";
} }
const availableStreams = camera.live.streams || {}; const availableStreams = camera.live.streams || {};
const firstStreamEntry = Object.values(availableStreams)[0] || ""; const firstStreamEntry = chooseAutoLiveStream(
availableStreams,
playbackCapabilities.estimatedBandwidthBps,
playbackCapabilities.saveData,
);
const streamNameFromSettings = const streamNameFromSettings =
currentGroupStreamingSettings?.[camera.name]?.streamName || ""; currentGroupStreamingSettings?.[camera.name]?.streamName || "";

View File

@ -122,6 +122,8 @@ import {
SnapshotResult, SnapshotResult,
} from "@/utils/snapshotUtil"; } from "@/utils/snapshotUtil";
import ActivityIndicator from "@/components/indicators/activity-indicator"; import ActivityIndicator from "@/components/indicators/activity-indicator";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
type LiveCameraViewProps = { type LiveCameraViewProps = {
config?: FrigateConfig; config?: FrigateConfig;
@ -144,13 +146,23 @@ export default function LiveCameraView({
const containerRef = useRef<HTMLDivElement>(null); const containerRef = useRef<HTMLDivElement>(null);
const [{ width: windowWidth, height: windowHeight }] = const [{ width: windowWidth, height: windowHeight }] =
useResizeObserver(window); useResizeObserver(window);
const playbackCapabilities = usePlaybackCapabilities([]);
const autoStreamName = useMemo(
() =>
chooseAutoLiveStream(
camera.live.streams,
playbackCapabilities.estimatedBandwidthBps,
playbackCapabilities.saveData,
),
[camera.live.streams, playbackCapabilities],
);
// supported features // supported features
const [streamName, setStreamName, streamNameLoaded] = const [streamName, setStreamName, streamNameLoaded] =
useUserPersistence<string>( useUserPersistence<string>(
`${camera.name}-stream`, `${camera.name}-stream`,
Object.values(camera.live.streams)[0], autoStreamName || Object.values(camera.live.streams)[0],
); );
const isRestreamed = useMemo( const isRestreamed = useMemo(

View File

@ -55,6 +55,8 @@ import { EmptyCard } from "@/components/card/EmptyCard";
import { BsFillCameraVideoOffFill } from "react-icons/bs"; import { BsFillCameraVideoOffFill } from "react-icons/bs";
import { AuthContext } from "@/context/auth-context"; import { AuthContext } from "@/context/auth-context";
import { useIsAdmin } from "@/hooks/use-is-admin"; import { useIsAdmin } from "@/hooks/use-is-admin";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
type LiveDashboardViewProps = { type LiveDashboardViewProps = {
cameras: CameraConfig[]; cameras: CameraConfig[];
@ -190,6 +192,7 @@ export default function LiveDashboardView({
}, [visibilityListener]); }, [visibilityListener]);
const [visibleCameras, setVisibleCameras] = useState<string[]>([]); const [visibleCameras, setVisibleCameras] = useState<string[]>([]);
const playbackCapabilities = usePlaybackCapabilities([]);
const visibleCameraObserver = useRef<IntersectionObserver | null>(null); const visibleCameraObserver = useRef<IntersectionObserver | null>(null);
useEffect(() => { useEffect(() => {
const visibleCameras = new Set<string>(); const visibleCameras = new Set<string>();
@ -260,12 +263,16 @@ export default function LiveDashboardView({
const streamName = streamExists const streamName = streamExists
? streamNameFromSettings ? streamNameFromSettings
: Object.values(availableStreams)[0] || ""; : chooseAutoLiveStream(
availableStreams,
playbackCapabilities.estimatedBandwidthBps,
playbackCapabilities.saveData,
);
streams[camera.name] = streamName; streams[camera.name] = streamName;
}); });
return streams; return streams;
}, [cameras, currentGroupStreamingSettings]); }, [cameras, currentGroupStreamingSettings, playbackCapabilities]);
const { const {
preferredLiveModes, preferredLiveModes,