mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-01 19:17:41 +03:00
Made-with:
This commit is contained in:
parent
687fefb343
commit
5560af611a
@ -16,7 +16,9 @@ FROM ${BASE_IMAGE} AS base
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
ARG BASE_HOOK
|
||||
|
||||
RUN sh -c "$BASE_HOOK"
|
||||
RUN if [ -n "$BASE_HOOK" ]; then \
|
||||
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
|
||||
fi
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
@ -25,7 +27,9 @@ FROM ${SLIM_BASE} AS slim-base
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
ARG BASE_HOOK
|
||||
|
||||
RUN sh -c "$BASE_HOOK"
|
||||
RUN if [ -n "$BASE_HOOK" ]; then \
|
||||
printf '%s\n' "$BASE_HOOK" | tr -d '\r' >/tmp/base_hook.sh && sh /tmp/base_hook.sh && rm -f /tmp/base_hook.sh; \
|
||||
fi
|
||||
|
||||
FROM slim-base AS wget
|
||||
ARG DEBIAN_FRONTEND
|
||||
@ -40,7 +44,8 @@ ENV CCACHE_DIR /root/.ccache
|
||||
ENV CCACHE_MAXSIZE 2G
|
||||
|
||||
RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
|
||||
/deps/build_nginx.sh
|
||||
tr -d '\r' </deps/build_nginx.sh >/tmp/build_nginx.sh \
|
||||
&& bash /tmp/build_nginx.sh
|
||||
|
||||
FROM wget AS sqlite-vec
|
||||
ARG DEBIAN_FRONTEND
|
||||
@ -50,7 +55,8 @@ COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
|
||||
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
|
||||
--mount=type=cache,target=/root/.ccache \
|
||||
/deps/build_sqlite_vec.sh
|
||||
tr -d '\r' </deps/build_sqlite_vec.sh >/tmp/build_sqlite_vec.sh \
|
||||
&& bash /tmp/build_sqlite_vec.sh
|
||||
|
||||
FROM scratch AS go2rtc
|
||||
ARG TARGETARCH
|
||||
@ -60,7 +66,8 @@ ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9
|
||||
FROM wget AS tempio
|
||||
ARG TARGETARCH
|
||||
RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \
|
||||
/deps/install_tempio.sh
|
||||
tr -d '\r' </deps/install_tempio.sh >/tmp/install_tempio.sh \
|
||||
&& bash /tmp/install_tempio.sh
|
||||
|
||||
####
|
||||
#
|
||||
@ -142,7 +149,8 @@ COPY audio-labelmap.txt .
|
||||
FROM wget AS s6-overlay
|
||||
ARG TARGETARCH
|
||||
RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \
|
||||
/deps/install_s6_overlay.sh
|
||||
tr -d '\r' </deps/install_s6_overlay.sh >/tmp/install_s6_overlay.sh \
|
||||
&& bash /tmp/install_s6_overlay.sh
|
||||
|
||||
|
||||
FROM base AS wheels
|
||||
@ -184,7 +192,8 @@ RUN pip3 install -r /requirements.txt
|
||||
|
||||
# Build pysqlite3 from source
|
||||
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
||||
RUN /build_pysqlite3.sh
|
||||
RUN tr -d '\r' </build_pysqlite3.sh >/tmp/build_pysqlite3.sh \
|
||||
&& bash /tmp/build_pysqlite3.sh
|
||||
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
|
||||
@ -194,7 +203,8 @@ RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt && \
|
||||
|
||||
# Install HailoRT & Wheels
|
||||
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||
/deps/install_hailort.sh
|
||||
tr -d '\r' </deps/install_hailort.sh >/tmp/install_hailort.sh \
|
||||
&& bash /tmp/install_hailort.sh
|
||||
|
||||
# Collect deps in a single layer
|
||||
FROM scratch AS deps-rootfs
|
||||
@ -254,7 +264,8 @@ ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PA
|
||||
|
||||
# Install dependencies
|
||||
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
||||
/deps/install_deps.sh
|
||||
tr -d '\r' </deps/install_deps.sh >/tmp/install_deps.sh \
|
||||
&& bash /tmp/install_deps.sh
|
||||
|
||||
ENV DEFAULT_FFMPEG_VERSION="7.0"
|
||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
|
||||
@ -274,13 +285,21 @@ ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
|
||||
|
||||
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
|
||||
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
|
||||
bash -c "bash /deps/install_memryx.sh"
|
||||
tr -d '\r' </deps/install_memryx.sh >/tmp/install_memryx.sh \
|
||||
&& bash /tmp/install_memryx.sh
|
||||
|
||||
COPY --from=deps-rootfs / /
|
||||
|
||||
RUN find /etc/s6-overlay/s6-rc.d -type f -exec sed -i 's/\r$//' {} +
|
||||
|
||||
RUN find /etc/s6-overlay/s6-rc.d -type f \
|
||||
\( -name run -o -name up \) \
|
||||
-exec chmod +x {} +
|
||||
|
||||
RUN ldconfig
|
||||
|
||||
EXPOSE 5000
|
||||
EXPOSE 5010
|
||||
EXPOSE 8554
|
||||
EXPOSE 8555/tcp 8555/udp
|
||||
|
||||
@ -347,6 +366,7 @@ FROM scratch AS rootfs
|
||||
WORKDIR /opt/frigate/
|
||||
COPY frigate frigate/
|
||||
COPY migrations migrations/
|
||||
COPY transcode_proxy transcode_proxy/
|
||||
COPY --from=web-build /work/dist/ web/
|
||||
|
||||
# Frigate final container
|
||||
@ -354,3 +374,4 @@ FROM deps AS frigate
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
|
||||
|
||||
@ -21,9 +21,32 @@ function set_libva_version() {
|
||||
export LIBAVFORMAT_VERSION_MAJOR
|
||||
}
|
||||
|
||||
function start_transcode_proxy() {
|
||||
(
|
||||
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
|
||||
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
|
||||
|
||||
if [[ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]]; then
|
||||
TRANSCODE_PROXY_FFMPEG=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
export TRANSCODE_PROXY_FFMPEG
|
||||
fi
|
||||
|
||||
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "[INFO] Starting transcode proxy..."
|
||||
exec python3 -m uvicorn transcode_proxy.main:app \
|
||||
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
|
||||
--port "${TRANSCODE_PROXY_PORT:-5010}"
|
||||
) &
|
||||
}
|
||||
|
||||
echo "[INFO] Preparing Frigate..."
|
||||
set_libva_version
|
||||
|
||||
start_transcode_proxy
|
||||
|
||||
echo "[INFO] Starting Frigate..."
|
||||
|
||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync)
|
||||
dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync /dev/shm/logs/transcode-proxy)
|
||||
|
||||
mkdir -p "${dirs[@]}"
|
||||
chown nobody:nogroup "${dirs[@]}"
|
||||
|
||||
@ -0,0 +1 @@
|
||||
transcode-proxy
|
||||
@ -0,0 +1 @@
|
||||
transcode-proxy-pipeline
|
||||
@ -0,0 +1,4 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
exec logutil-service /dev/shm/logs/transcode-proxy
|
||||
@ -0,0 +1 @@
|
||||
longrun
|
||||
@ -0,0 +1 @@
|
||||
|
||||
@ -0,0 +1 @@
|
||||
transcode-proxy-log
|
||||
@ -0,0 +1,32 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Start the transcode proxy (in-process with Frigate container)
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
echo "[INFO] Starting transcode proxy..."
|
||||
|
||||
# Default upstream to nginx internal port when not set
|
||||
export TRANSCODE_PROXY_UPSTREAM="${TRANSCODE_PROXY_UPSTREAM:-http://127.0.0.1:5000}"
|
||||
|
||||
# Use Frigate's FFmpeg when not set
|
||||
if [ -z "${TRANSCODE_PROXY_FFMPEG:-}" ]; then
|
||||
export TRANSCODE_PROXY_FFMPEG="$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)"
|
||||
fi
|
||||
|
||||
# Wait for nginx/API to be ready so proxy can reach upstream
|
||||
until curl -sf -o /dev/null "${TRANSCODE_PROXY_UPSTREAM}/api/version"; do
|
||||
echo "[INFO] Waiting for upstream ${TRANSCODE_PROXY_UPSTREAM}..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "[INFO] Upstream ready, starting transcode proxy on port ${TRANSCODE_PROXY_PORT:-5010}"
|
||||
|
||||
export PYTHONPATH="/opt/frigate:${PYTHONPATH:-}"
|
||||
|
||||
exec 2>&1
|
||||
exec python3 -m uvicorn transcode_proxy.main:app \
|
||||
--host "${TRANSCODE_PROXY_HOST:-0.0.0.0}" \
|
||||
--port "${TRANSCODE_PROXY_PORT:-5010}"
|
||||
@ -0,0 +1 @@
|
||||
longrun
|
||||
@ -105,6 +105,16 @@ http {
|
||||
include auth_location.conf;
|
||||
include base_path.conf;
|
||||
|
||||
location = /vod-transcoded {
|
||||
return 302 /vod-transcoded/;
|
||||
}
|
||||
|
||||
location /vod-transcoded/ {
|
||||
include auth_request.conf;
|
||||
proxy_pass http://127.0.0.1:5010;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /vod/ {
|
||||
include auth_request.conf;
|
||||
aio threads;
|
||||
|
||||
@ -25,6 +25,7 @@ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
COPY --from=rootfs / /
|
||||
RUN pip3 install --no-cache-dir -r /opt/frigate/transcode_proxy/requirements.txt
|
||||
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
|
||||
RUN ldconfig
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ variable "COMPUTE_LEVEL" {
|
||||
variable "BASE_HOOK" {
|
||||
# Ensure an up-to-date python 3.11 is available in jetson images
|
||||
default = <<EOT
|
||||
if grep -iq \"ubuntu\" /etc/os-release; then
|
||||
if grep -iq "ubuntu" /etc/os-release; then
|
||||
. /etc/os-release
|
||||
|
||||
# Add the deadsnakes PPA repository
|
||||
|
||||
@ -426,6 +426,7 @@ async def recording_clip(
|
||||
camera_name: str,
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
variant: str = Query("main", description="Recording variant to use for playback."),
|
||||
):
|
||||
def run_download(ffmpeg_cmd: list[str], file_path: str):
|
||||
with sp.Popen(
|
||||
@ -459,6 +460,7 @@ async def recording_clip(
|
||||
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.where(Recordings.variant == variant)
|
||||
.order_by(Recordings.start_time.asc())
|
||||
)
|
||||
|
||||
@ -534,13 +536,15 @@ async def vod_ts(
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
force_discontinuity: bool = False,
|
||||
variant: str = "main",
|
||||
):
|
||||
logger.debug(
|
||||
"VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s",
|
||||
"VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s variant=%s",
|
||||
camera_name,
|
||||
start_ts,
|
||||
end_ts,
|
||||
force_discontinuity,
|
||||
variant,
|
||||
)
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
@ -555,6 +559,7 @@ async def vod_ts(
|
||||
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.where(Recordings.variant == variant)
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.iterator()
|
||||
)
|
||||
@ -644,10 +649,17 @@ async def vod_ts(
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
|
||||
async def vod_hour_no_timezone(
|
||||
year_month: str, day: int, hour: int, camera_name: str, variant: str = "main"
|
||||
):
|
||||
"""VOD for specific hour. Uses the default timezone (UTC)."""
|
||||
return await vod_hour(
|
||||
year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
|
||||
year_month,
|
||||
day,
|
||||
hour,
|
||||
camera_name,
|
||||
get_localzone_name().replace("/", ","),
|
||||
variant,
|
||||
)
|
||||
|
||||
|
||||
@ -657,7 +669,12 @@ async def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name
|
||||
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
async def vod_hour(
|
||||
year_month: str, day: int, hour: int, camera_name: str, tz_name: str
|
||||
year_month: str,
|
||||
day: int,
|
||||
hour: int,
|
||||
camera_name: str,
|
||||
tz_name: str,
|
||||
variant: str = "main",
|
||||
):
|
||||
parts = year_month.split("-")
|
||||
start_date = (
|
||||
@ -668,7 +685,7 @@ async def vod_hour(
|
||||
start_ts = start_date.timestamp()
|
||||
end_ts = end_date.timestamp()
|
||||
|
||||
return await vod_ts(camera_name, start_ts, end_ts)
|
||||
return await vod_ts(camera_name, start_ts, end_ts, variant=variant)
|
||||
|
||||
|
||||
@router.get(
|
||||
@ -680,6 +697,7 @@ async def vod_event(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
padding: int = Query(0, description="Padding to apply to the vod."),
|
||||
variant: str = Query("main", description="Recording variant to use for playback."),
|
||||
):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
@ -700,7 +718,9 @@ async def vod_event(
|
||||
if event.end_time is None
|
||||
else (event.end_time + padding)
|
||||
)
|
||||
vod_response = await vod_ts(event.camera, event.start_time - padding, end_ts)
|
||||
vod_response = await vod_ts(
|
||||
event.camera, event.start_time - padding, end_ts, variant=variant
|
||||
)
|
||||
|
||||
# If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
|
||||
if (
|
||||
@ -723,8 +743,11 @@ async def vod_clip(
|
||||
camera_name: str,
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
variant: str = Query("main", description="Recording variant to use for playback."),
|
||||
):
|
||||
return await vod_ts(camera_name, start_ts, end_ts, force_discontinuity=True)
|
||||
return await vod_ts(
|
||||
camera_name, start_ts, end_ts, force_discontinuity=True, variant=variant
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
|
||||
@ -229,29 +229,39 @@ async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
variant: str = "main",
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
query = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.camera,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.path,
|
||||
Recordings.variant,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.motion_heatmap,
|
||||
Recordings.duration,
|
||||
Recordings.codec_name,
|
||||
Recordings.width,
|
||||
Recordings.height,
|
||||
Recordings.bitrate,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
if variant != "all":
|
||||
query = query.where(Recordings.variant == variant)
|
||||
|
||||
recordings = query.order_by(Recordings.start_time).dicts().iterator()
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
|
||||
@ -256,7 +256,13 @@ class CameraConfig(FrigateBaseModel):
|
||||
if ffmpeg_cmd is None:
|
||||
continue
|
||||
|
||||
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
|
||||
ffmpeg_cmds.append(
|
||||
{
|
||||
"roles": ffmpeg_input.roles,
|
||||
"cmd": ffmpeg_cmd,
|
||||
"record_variant": ffmpeg_input.record_variant,
|
||||
}
|
||||
)
|
||||
self._ffmpeg_cmds = ffmpeg_cmds
|
||||
|
||||
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
|
||||
@ -281,10 +287,13 @@ class CameraConfig(FrigateBaseModel):
|
||||
)
|
||||
or self.ffmpeg.output_args.record
|
||||
)
|
||||
record_variant = ffmpeg_input.record_variant or "main"
|
||||
cache_prefix = os.path.join(CACHE_DIR, self.name)
|
||||
cache_path = f"{cache_prefix}@{record_variant}@{CACHE_SEGMENT_FORMAT}.mp4"
|
||||
|
||||
ffmpeg_output_args = (
|
||||
record_args
|
||||
+ [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"]
|
||||
+ [cache_path]
|
||||
+ ffmpeg_output_args
|
||||
)
|
||||
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
from enum import Enum
|
||||
from typing import Union
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic import Field, field_validator, model_validator
|
||||
|
||||
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
|
||||
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS, REGEX_CAMERA_NAME
|
||||
|
||||
from ..base import FrigateBaseModel
|
||||
from ..env import EnvString
|
||||
@ -137,6 +137,22 @@ class CameraInput(FrigateBaseModel):
|
||||
title="Input arguments",
|
||||
description="Input arguments specific to this stream.",
|
||||
)
|
||||
record_variant: str | None = Field(
|
||||
default=None,
|
||||
title="Recording variant",
|
||||
description="Optional recording variant label for record role inputs such as main or sub.",
|
||||
pattern=REGEX_CAMERA_NAME,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_record_variant(self):
|
||||
if CameraRoleEnum.record in self.roles:
|
||||
if not self.record_variant:
|
||||
self.record_variant = "main"
|
||||
else:
|
||||
self.record_variant = None
|
||||
|
||||
return self
|
||||
|
||||
|
||||
class CameraFfmpegConfig(FfmpegConfig):
|
||||
@ -148,12 +164,29 @@ class CameraFfmpegConfig(FfmpegConfig):
|
||||
@field_validator("inputs")
|
||||
@classmethod
|
||||
def validate_roles(cls, v):
|
||||
roles = [role for input in v for role in input.roles]
|
||||
detect_inputs = 0
|
||||
audio_inputs = 0
|
||||
record_variants: set[str] = set()
|
||||
|
||||
if len(roles) != len(set(roles)):
|
||||
raise ValueError("Each input role may only be used once.")
|
||||
for camera_input in v:
|
||||
if CameraRoleEnum.detect in camera_input.roles:
|
||||
detect_inputs += 1
|
||||
|
||||
if "detect" not in roles:
|
||||
if CameraRoleEnum.audio in camera_input.roles:
|
||||
audio_inputs += 1
|
||||
|
||||
if CameraRoleEnum.record in camera_input.roles:
|
||||
record_variant = camera_input.record_variant or "main"
|
||||
if record_variant in record_variants:
|
||||
raise ValueError(
|
||||
f"Record variant '{record_variant}' may only be used once."
|
||||
)
|
||||
record_variants.add(record_variant)
|
||||
|
||||
if detect_inputs != 1:
|
||||
raise ValueError("The detect role is required.")
|
||||
|
||||
if audio_inputs > 1:
|
||||
raise ValueError("Each input role may only be used once.")
|
||||
|
||||
return v
|
||||
|
||||
@ -71,6 +71,7 @@ from .network import NetworkingConfig
|
||||
from .proxy import ProxyConfig
|
||||
from .telemetry import TelemetryConfig
|
||||
from .tls import TlsConfig
|
||||
from .transcode_proxy import TranscodeProxyConfig
|
||||
from .ui import UIConfig
|
||||
|
||||
__all__ = ["FrigateConfig"]
|
||||
@ -450,6 +451,11 @@ class FrigateConfig(FrigateBaseModel):
|
||||
title="UI",
|
||||
description="User interface preferences such as timezone, time/date formatting, and units.",
|
||||
)
|
||||
transcode_proxy: TranscodeProxyConfig = Field(
|
||||
default_factory=TranscodeProxyConfig,
|
||||
title="Transcode proxy",
|
||||
description="Optional proxy for transcoding VOD playback to H.264 on the fly (e.g. for HEVC compatibility).",
|
||||
)
|
||||
|
||||
# Detector config
|
||||
detectors: Dict[str, BaseDetectorConfig] = Field(
|
||||
|
||||
21
frigate/config/transcode_proxy.py
Normal file
21
frigate/config/transcode_proxy.py
Normal file
@ -0,0 +1,21 @@
|
||||
"""Configuration for the VOD transcode proxy (optional playback transcoding)."""
|
||||
from pydantic import Field
|
||||
|
||||
from .base import FrigateBaseModel
|
||||
|
||||
__all__ = ["TranscodeProxyConfig"]
|
||||
|
||||
|
||||
class TranscodeProxyConfig(FrigateBaseModel):
|
||||
"""Settings for the optional transcode proxy used for recording playback."""
|
||||
|
||||
enabled: bool = Field(
|
||||
default=False,
|
||||
title="Transcode proxy enabled",
|
||||
description="When enabled, the UI uses the transcode proxy URL for VOD playback so recordings are transcoded to H.264 on the fly (e.g. for HEVC compatibility or lower bitrate).",
|
||||
)
|
||||
vod_proxy_url: str = Field(
|
||||
default="",
|
||||
title="VOD proxy base URL",
|
||||
description="Base URL for the transcode proxy (e.g. http://host:5010). When enabled, recording playback requests go to this URL + /vod/... Leave empty if the proxy is mounted at the same host (e.g. /vod-transcoded/ under the same origin).",
|
||||
)
|
||||
@ -70,6 +70,7 @@ class Recordings(Model):
|
||||
id = CharField(null=False, primary_key=True, max_length=30)
|
||||
camera = CharField(index=True, max_length=20)
|
||||
path = CharField(unique=True)
|
||||
variant = CharField(default="main", index=True, max_length=20)
|
||||
start_time = DateTimeField()
|
||||
end_time = DateTimeField()
|
||||
duration = FloatField()
|
||||
@ -77,6 +78,10 @@ class Recordings(Model):
|
||||
objects = IntegerField(null=True)
|
||||
dBFS = IntegerField(null=True)
|
||||
segment_size = FloatField(default=0) # this should be stored as MB
|
||||
codec_name = CharField(null=True, max_length=32)
|
||||
width = IntegerField(null=True)
|
||||
height = IntegerField(null=True)
|
||||
bitrate = IntegerField(null=True)
|
||||
regions = IntegerField(null=True)
|
||||
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
|
||||
|
||||
|
||||
@ -101,6 +101,29 @@ class RecordingMaintainer(threading.Thread):
|
||||
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
|
||||
self.unexpected_cache_files_logged: bool = False
|
||||
|
||||
def _parse_cache_segment(self, cache_name: str) -> Optional[dict[str, Any]]:
|
||||
basename = os.path.splitext(cache_name)[0]
|
||||
parts = basename.rsplit("@", maxsplit=2)
|
||||
|
||||
if len(parts) == 2:
|
||||
camera, date = parts
|
||||
variant = "main"
|
||||
elif len(parts) == 3:
|
||||
camera, variant, date = parts
|
||||
else:
|
||||
return None
|
||||
|
||||
start_time = datetime.datetime.strptime(
|
||||
date, CACHE_SEGMENT_FORMAT
|
||||
).astimezone(datetime.timezone.utc)
|
||||
|
||||
return {
|
||||
"camera": camera,
|
||||
"variant": variant,
|
||||
"start_time": start_time,
|
||||
"cache_path": os.path.join(CACHE_DIR, cache_name),
|
||||
}
|
||||
|
||||
async def move_files(self) -> None:
|
||||
cache_files = [
|
||||
d
|
||||
@ -113,26 +136,22 @@ class RecordingMaintainer(threading.Thread):
|
||||
# publish newest cached segment per camera (including in use files)
|
||||
newest_cache_segments: dict[str, dict[str, Any]] = {}
|
||||
for cache in cache_files:
|
||||
cache_path = os.path.join(CACHE_DIR, cache)
|
||||
basename = os.path.splitext(cache)[0]
|
||||
try:
|
||||
camera, date = basename.rsplit("@", maxsplit=1)
|
||||
except ValueError:
|
||||
parsed = self._parse_cache_segment(cache)
|
||||
if parsed is None:
|
||||
if not self.unexpected_cache_files_logged:
|
||||
logger.warning("Skipping unexpected files in cache")
|
||||
self.unexpected_cache_files_logged = True
|
||||
continue
|
||||
|
||||
start_time = datetime.datetime.strptime(
|
||||
date, CACHE_SEGMENT_FORMAT
|
||||
).astimezone(datetime.timezone.utc)
|
||||
camera = parsed["camera"]
|
||||
start_time = parsed["start_time"]
|
||||
if (
|
||||
camera not in newest_cache_segments
|
||||
or start_time > newest_cache_segments[camera]["start_time"]
|
||||
):
|
||||
newest_cache_segments[camera] = {
|
||||
"start_time": start_time,
|
||||
"cache_path": cache_path,
|
||||
"cache_path": parsed["cache_path"],
|
||||
}
|
||||
|
||||
for camera, newest in newest_cache_segments.items():
|
||||
@ -172,27 +191,14 @@ class RecordingMaintainer(threading.Thread):
|
||||
if cache in files_in_use:
|
||||
continue
|
||||
|
||||
cache_path = os.path.join(CACHE_DIR, cache)
|
||||
basename = os.path.splitext(cache)[0]
|
||||
try:
|
||||
camera, date = basename.rsplit("@", maxsplit=1)
|
||||
except ValueError:
|
||||
parsed = self._parse_cache_segment(cache)
|
||||
if parsed is None:
|
||||
if not self.unexpected_cache_files_logged:
|
||||
logger.warning("Skipping unexpected files in cache")
|
||||
self.unexpected_cache_files_logged = True
|
||||
continue
|
||||
|
||||
# important that start_time is utc because recordings are stored and compared in utc
|
||||
start_time = datetime.datetime.strptime(
|
||||
date, CACHE_SEGMENT_FORMAT
|
||||
).astimezone(datetime.timezone.utc)
|
||||
|
||||
grouped_recordings[camera].append(
|
||||
{
|
||||
"cache_path": cache_path,
|
||||
"start_time": start_time,
|
||||
}
|
||||
)
|
||||
grouped_recordings[parsed["camera"]].append(parsed)
|
||||
|
||||
# delete all cached files past the most recent MAX_SEGMENTS_IN_CACHE
|
||||
keep_count = MAX_SEGMENTS_IN_CACHE
|
||||
@ -318,6 +324,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
) -> Optional[Recordings]:
|
||||
cache_path: str = recording["cache_path"]
|
||||
start_time: datetime.datetime = recording["start_time"]
|
||||
variant: str = recording.get("variant", "main")
|
||||
|
||||
# Just delete files if camera removed or recordings are turned off
|
||||
if (
|
||||
@ -327,8 +334,12 @@ class RecordingMaintainer(threading.Thread):
|
||||
self.drop_segment(cache_path)
|
||||
return None
|
||||
|
||||
segment_info: dict[str, Any]
|
||||
if cache_path in self.end_time_cache:
|
||||
end_time, duration = self.end_time_cache[cache_path]
|
||||
segment_info = await get_video_properties(
|
||||
self.config.ffmpeg, cache_path, get_duration=False
|
||||
)
|
||||
else:
|
||||
segment_info = await get_video_properties(
|
||||
self.config.ffmpeg, cache_path, get_duration=True
|
||||
@ -400,7 +411,14 @@ class RecordingMaintainer(threading.Thread):
|
||||
else RetainModeEnum.motion
|
||||
)
|
||||
return await self.move_segment(
|
||||
camera, start_time, end_time, duration, cache_path, record_mode
|
||||
camera,
|
||||
variant,
|
||||
start_time,
|
||||
end_time,
|
||||
duration,
|
||||
cache_path,
|
||||
record_mode,
|
||||
segment_info,
|
||||
)
|
||||
|
||||
# we fell through the continuous / motion check, so we need to check the review items
|
||||
@ -436,11 +454,13 @@ class RecordingMaintainer(threading.Thread):
|
||||
# move from cache to recordings immediately
|
||||
return await self.move_segment(
|
||||
camera,
|
||||
variant,
|
||||
start_time,
|
||||
end_time,
|
||||
duration,
|
||||
cache_path,
|
||||
record_mode,
|
||||
segment_info,
|
||||
)
|
||||
# if it doesn't overlap with an review item, go ahead and drop the segment
|
||||
# if it ends more than the configured pre_capture for the camera
|
||||
@ -570,11 +590,13 @@ class RecordingMaintainer(threading.Thread):
|
||||
async def move_segment(
|
||||
self,
|
||||
camera: str,
|
||||
variant: str,
|
||||
start_time: datetime.datetime,
|
||||
end_time: datetime.datetime,
|
||||
duration: float,
|
||||
cache_path: str,
|
||||
store_mode: RetainModeEnum,
|
||||
media_info: Optional[dict[str, Any]] = None,
|
||||
) -> Optional[Recordings]:
|
||||
segment_info = self.segment_stats(camera, start_time, end_time)
|
||||
|
||||
@ -588,6 +610,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
RECORD_DIR,
|
||||
start_time.strftime("%Y-%m-%d/%H"),
|
||||
camera,
|
||||
variant,
|
||||
)
|
||||
|
||||
if not os.path.exists(directory):
|
||||
@ -646,6 +669,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
Recordings.id.name: f"{start_time.timestamp()}-{rand_id}",
|
||||
Recordings.camera.name: camera,
|
||||
Recordings.path.name: file_path,
|
||||
Recordings.variant.name: variant,
|
||||
Recordings.start_time.name: start_time.timestamp(),
|
||||
Recordings.end_time.name: end_time.timestamp(),
|
||||
Recordings.duration.name: duration,
|
||||
@ -655,6 +679,16 @@ class RecordingMaintainer(threading.Thread):
|
||||
Recordings.regions.name: segment_info.region_count,
|
||||
Recordings.dBFS.name: segment_info.average_dBFS,
|
||||
Recordings.segment_size.name: segment_size,
|
||||
Recordings.codec_name.name: (
|
||||
media_info.get("codec_name") if media_info else None
|
||||
),
|
||||
Recordings.width.name: media_info.get("width") if media_info else None,
|
||||
Recordings.height.name: media_info.get("height") if media_info else None,
|
||||
Recordings.bitrate.name: (
|
||||
int((segment_size * pow(2, 20) * 8) / duration)
|
||||
if duration > 0 and segment_size > 0
|
||||
else None
|
||||
),
|
||||
Recordings.motion_heatmap.name: segment_info.motion_heatmap,
|
||||
}
|
||||
except Exception as e:
|
||||
|
||||
@ -44,6 +44,59 @@ class TestHttpMedia(BaseTestHttp):
|
||||
self.app.dependency_overrides.clear()
|
||||
super().tearDown()
|
||||
|
||||
def test_camera_recordings_variant_filter(self):
|
||||
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
|
||||
end_ts = start_ts + 3600
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
Recordings.insert(
|
||||
id="recording_main",
|
||||
path="/media/recordings/front/main.mp4",
|
||||
camera="front_door",
|
||||
variant="main",
|
||||
start_time=start_ts,
|
||||
end_time=end_ts,
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
codec_name="h264",
|
||||
width=1920,
|
||||
height=1080,
|
||||
bitrate=4_000_000,
|
||||
).execute()
|
||||
Recordings.insert(
|
||||
id="recording_sub",
|
||||
path="/media/recordings/front/sub.mp4",
|
||||
camera="front_door",
|
||||
variant="sub",
|
||||
start_time=start_ts,
|
||||
end_time=end_ts,
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
codec_name="h264",
|
||||
width=640,
|
||||
height=360,
|
||||
bitrate=512_000,
|
||||
).execute()
|
||||
|
||||
default_response = client.get(
|
||||
"/front_door/recordings",
|
||||
params={"after": start_ts, "before": end_ts},
|
||||
)
|
||||
assert default_response.status_code == 200
|
||||
default_recordings = default_response.json()
|
||||
assert len(default_recordings) == 1
|
||||
assert default_recordings[0]["variant"] == "main"
|
||||
|
||||
all_response = client.get(
|
||||
"/front_door/recordings",
|
||||
params={"after": start_ts, "before": end_ts, "variant": "all"},
|
||||
)
|
||||
assert all_response.status_code == 200
|
||||
variants = {recording["variant"] for recording in all_response.json()}
|
||||
assert variants == {"main", "sub"}
|
||||
|
||||
def test_recordings_summary_across_dst_spring_forward(self):
|
||||
"""
|
||||
Test recordings summary across spring DST transition (spring forward).
|
||||
|
||||
@ -14,6 +14,18 @@ from frigate.record.maintainer import RecordingMaintainer # noqa: E402
|
||||
|
||||
|
||||
class TestMaintainer(unittest.IsolatedAsyncioTestCase):
|
||||
async def test_parse_cache_segment_supports_variant(self):
|
||||
config = MagicMock(spec=FrigateConfig)
|
||||
config.cameras = {}
|
||||
stop_event = MagicMock()
|
||||
|
||||
maintainer = RecordingMaintainer(config, stop_event)
|
||||
parsed = maintainer._parse_cache_segment("front@sub@20210101000000+0000.mp4")
|
||||
|
||||
self.assertIsNotNone(parsed)
|
||||
self.assertEqual("front", parsed["camera"])
|
||||
self.assertEqual("sub", parsed["variant"])
|
||||
|
||||
async def test_move_files_survives_bad_filename(self):
|
||||
config = MagicMock(spec=FrigateConfig)
|
||||
config.cameras = {}
|
||||
|
||||
@ -820,6 +820,7 @@ async def get_video_properties(
|
||||
result.update({"width": width, "height": height})
|
||||
if fourcc:
|
||||
result["fourcc"] = fourcc
|
||||
result["codec_name"] = fourcc
|
||||
if get_duration:
|
||||
result["duration"] = duration
|
||||
|
||||
|
||||
38
migrations/036_add_recording_variants.py
Normal file
38
migrations/036_add_recording_variants.py
Normal file
@ -0,0 +1,38 @@
|
||||
"""Peewee migrations -- 036_add_recording_variants.py."""
|
||||
|
||||
import peewee as pw
|
||||
|
||||
from frigate.models import Recordings
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
existing_columns = {
|
||||
row[1] for row in database.execute_sql('PRAGMA table_info("recordings")').fetchall()
|
||||
}
|
||||
|
||||
fields_to_add = {}
|
||||
if "variant" not in existing_columns:
|
||||
fields_to_add["variant"] = pw.CharField(default="main", max_length=20)
|
||||
if "codec_name" not in existing_columns:
|
||||
fields_to_add["codec_name"] = pw.CharField(null=True, max_length=32)
|
||||
if "width" not in existing_columns:
|
||||
fields_to_add["width"] = pw.IntegerField(null=True)
|
||||
if "height" not in existing_columns:
|
||||
fields_to_add["height"] = pw.IntegerField(null=True)
|
||||
if "bitrate" not in existing_columns:
|
||||
fields_to_add["bitrate"] = pw.IntegerField(null=True)
|
||||
|
||||
if fields_to_add:
|
||||
migrator.add_fields(Recordings, **fields_to_add)
|
||||
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "recordings_camera_variant_start_time_end_time" ON "recordings" ("camera", "variant", "start_time" DESC, "end_time" DESC)'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
migrator.remove_fields(
|
||||
Recordings, ["variant", "codec_name", "width", "height", "bitrate"]
|
||||
)
|
||||
83
scripts/README.md
Normal file
83
scripts/README.md
Normal file
@ -0,0 +1,83 @@
|
||||
# Scripts
|
||||
|
||||
## Transcode benchmarks
|
||||
|
||||
Proof-of-concept benchmarks for **real-time VOD transcoding**: transcode a video file with FFmpeg (optionally with hardware acceleration) and measure time and throughput. Used to de-risk the real-time VOD transcoding feature (segment-level transcode + cache): we need ~10s segments to transcode in well under 10s (ideally <2s) so timeline scrubbing stays responsive.
|
||||
|
||||
### Python (recommended)
|
||||
|
||||
From the repo root:
|
||||
|
||||
```bash
|
||||
# Full file, CPU
|
||||
python scripts/transcode_benchmark.py path/to/recording.mp4
|
||||
|
||||
# First 10 seconds only (simulates one HLS segment)
|
||||
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10
|
||||
|
||||
# 10s segment with NVIDIA HW accel
|
||||
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel nvidia
|
||||
|
||||
# Simulate scrubbing: start 60s in, transcode 10s (VAAPI)
|
||||
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --seek 60 --hwaccel vaapi
|
||||
|
||||
# Intel QSV H.265 (preset-intel-qsv-h265)
|
||||
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --hwaccel qsv-h265
|
||||
|
||||
# Custom FFmpeg binary (e.g. Frigate container)
|
||||
python scripts/transcode_benchmark.py path/to/recording.mp4 --duration 10 --ffmpeg /usr/lib/ffmpeg/7/bin/ffmpeg
|
||||
```
|
||||
|
||||
Options:
|
||||
|
||||
- `--duration SEC` – Transcode only this many seconds (default: full file). Use 10 to simulate one HLS segment.
|
||||
- `--seek SEC` – Start at this position (fast seek before `-i`). Simulates scrubbing into the file.
|
||||
- `--hwaccel cpu|nvidia|vaapi|qsv-h265` – Matches Frigate presets: libx264, h264_nvenc, h264_vaapi, preset-intel-qsv-h265 (hevc_qsv).
|
||||
- `--vaapi-device` – VAAPI device (default: `/dev/dri/renderD128`).
|
||||
- `--qsv-device` – Intel QSV device: on Linux defaults to `/dev/dri/renderD129` if present (else `renderD128`, else `0`). With two GPUs, the second node is often the Intel iGPU. Override if you get “No VA display found” (e.g. try the other node).
|
||||
- `--output PATH` – Write output here (default: temp file, deleted).
|
||||
- `--keep-output` – Keep the temp output file.
|
||||
|
||||
Output: real time, speed (× realtime), output size. The script suggests whether the speed is good for ~10s segment transcode.
|
||||
|
||||
### Shell
|
||||
|
||||
Quick one-liners without Python:
|
||||
|
||||
```bash
|
||||
chmod +x scripts/transcode_benchmark.sh
|
||||
|
||||
./scripts/transcode_benchmark.sh path/to/recording.mp4
|
||||
./scripts/transcode_benchmark.sh path/to/recording.mp4 10
|
||||
./scripts/transcode_benchmark.sh path/to/recording.mp4 10 nvidia
|
||||
```
|
||||
|
||||
Arguments: `INPUT [DURATION_SEC] [cpu|nvidia|vaapi|qsv-h265]`. Optional env: `FFMPEG`, `FFPROBE`, `VAAPI_DEVICE`, `QSV_DEVICE`.
|
||||
|
||||
### Interpreting results
|
||||
|
||||
- **Speed ≥ 5× realtime** – A 10s segment transcodes in ~2s or less; good for on-demand segment transcode with cache.
|
||||
- **Speed 1–5×** – Marginal; segment may take several seconds; transcode-ahead or caching helps.
|
||||
- **Speed < 1×** – Too slow for real-time; consider stronger HW or lower resolution/bitrate.
|
||||
|
||||
Run with a real Frigate recording (or any H.264/HEVC MP4) and try both `--duration 10` and full file to see segment vs full transcode cost.
|
||||
|
||||
### Troubleshooting `qsv-h265` (“No VA display found”)
|
||||
|
||||
Intel QSV (`qsv-h265`) only works on **Intel GPUs** with a working **Intel VA-API** stack. If both `/dev/dri/renderD128` and `renderD129` fail with “No VA display found” or “Device creation failed: -22”, then:
|
||||
|
||||
1. **Check which GPUs you have** – With two cards, both may be non-Intel (e.g. NVIDIA + AMD). QSV is Intel-only. Use `lspci -k | grep -A3 VGA` to see adapters and drivers.
|
||||
2. **Check VA-API** – Run `vainfo` or `vainfo --display drm --device /dev/dri/renderD128` (then `renderD129`). If it errors or shows no Intel driver, QSV won’t work. On Intel you typically need `intel-media-driver` (newer) or `intel-vaapi-driver` (i965, older).
|
||||
3. **Permissions** – Ensure your user is in the `render` (and often `video`) group: `groups`; add with `sudo usermod -aG render $USER` and log in again.
|
||||
4. **Use another HW accel** – If you have an **AMD** GPU, use `vaapi` (H.264). If you have **NVIDIA**, use `nvidia`. Otherwise use `cpu`.
|
||||
|
||||
5. **Frigate Docker uses QSV but host benchmark fails** – The container has the Intel VA/QSV stack and device access; the host may not. Run the benchmark **inside the same environment** (e.g. inside the Frigate container):
|
||||
|
||||
```bash
|
||||
# Copy script and a sample recording into the container (adjust container name)
|
||||
docker cp scripts/transcode_benchmark.sh frigate:/tmp/
|
||||
docker cp /path/to/59.24.mp4 frigate:/tmp/
|
||||
docker exec -it frigate bash -c 'chmod +x /tmp/transcode_benchmark.sh && /tmp/transcode_benchmark.sh /tmp/59.24.mp4 10 qsv-h265'
|
||||
```
|
||||
|
||||
The script auto-detects FFmpeg under `/usr/lib/ffmpeg/*/bin` when `ffmpeg` isn’t on PATH (Frigate container). If it doesn’t, set `FFMPEG` and `FFPROBE` explicitly, e.g. `docker exec ... env FFMPEG=/usr/lib/ffmpeg/7.0/bin/ffmpeg FFPROBE=/usr/lib/ffmpeg/7.0/bin/ffprobe /tmp/transcode_benchmark.sh ...`.
|
||||
289
scripts/transcode_benchmark.py
Normal file
289
scripts/transcode_benchmark.py
Normal file
@ -0,0 +1,289 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Proof-of-concept benchmark: transcode a video file with FFmpeg (optionally with
|
||||
hardware acceleration) and report timing and throughput.
|
||||
|
||||
Used to de-risk real-time VOD transcoding: we need ~10s segments to transcode
|
||||
in well under 10s (ideally <2s) so scrubbing stays responsive.
|
||||
|
||||
Usage:
|
||||
python scripts/transcode_benchmark.py path/to/video.mp4
|
||||
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --hwaccel nvidia
|
||||
python scripts/transcode_benchmark.py path/to/video.mp4 --duration 10 --seek 60 --hwaccel vaapi
|
||||
|
||||
Output: real time, speed (x realtime), output size. Aligns with Frigate export/timelapse
|
||||
HW presets (preset-nvidia, preset-vaapi, libx264 default).
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def get_ffmpeg_command(
|
||||
ffmpeg_path: str,
|
||||
input_path: str,
|
||||
output_path: str,
|
||||
*,
|
||||
duration_sec: Optional[float] = None,
|
||||
seek_sec: float = 0,
|
||||
hwaccel: str = "cpu",
|
||||
gpu_device: str = "/dev/dri/renderD128",
|
||||
qsv_device: str = "0",
|
||||
) -> list[str]:
|
||||
"""Build argv for FFmpeg transcode (H.264 or HEVC, no audio). Matches Frigate timelapse-style encode."""
|
||||
cmd = [ffmpeg_path, "-hide_banner", "-y", "-loglevel", "warning", "-stats"]
|
||||
|
||||
# Optional seek: -ss before -i for fast seek (keyframe then decode)
|
||||
if seek_sec > 0:
|
||||
cmd.extend(["-ss", str(seek_sec)])
|
||||
|
||||
if hwaccel == "nvidia":
|
||||
cmd.extend(
|
||||
[
|
||||
"-hwaccel",
|
||||
"cuda",
|
||||
"-hwaccel_output_format",
|
||||
"cuda",
|
||||
"-extra_hw_frames",
|
||||
"8",
|
||||
]
|
||||
)
|
||||
elif hwaccel == "vaapi":
|
||||
cmd.extend(
|
||||
[
|
||||
"-hwaccel",
|
||||
"vaapi",
|
||||
"-hwaccel_device",
|
||||
gpu_device,
|
||||
"-hwaccel_output_format",
|
||||
"vaapi",
|
||||
]
|
||||
)
|
||||
elif hwaccel == "qsv-h265":
|
||||
# preset-intel-qsv-h265: load_plugin for HEVC decode, QSV device for decode+encode
|
||||
cmd.extend(
|
||||
[
|
||||
"-load_plugin",
|
||||
"hevc_hw",
|
||||
"-hwaccel",
|
||||
"qsv",
|
||||
"-qsv_device",
|
||||
qsv_device,
|
||||
"-hwaccel_output_format",
|
||||
"qsv",
|
||||
]
|
||||
)
|
||||
|
||||
cmd.extend(["-i", input_path])
|
||||
|
||||
if duration_sec is not None and duration_sec > 0:
|
||||
cmd.extend(["-t", str(duration_sec)])
|
||||
|
||||
cmd.extend(["-an"])
|
||||
|
||||
if hwaccel == "nvidia":
|
||||
cmd.extend(["-c:v", "h264_nvenc"])
|
||||
elif hwaccel == "vaapi":
|
||||
# VAAPI encode needs frames in vaapi format; decoder outputs vaapi when hwaccel_output_format vaapi
|
||||
cmd.extend(["-c:v", "h264_vaapi"])
|
||||
elif hwaccel == "qsv-h265":
|
||||
# Use CQP explicitly; profile/level can be unsupported on some QSV runtimes
|
||||
cmd.extend(["-c:v", "hevc_qsv", "-global_quality", "23"])
|
||||
else:
|
||||
cmd.extend(
|
||||
["-c:v", "libx264", "-preset:v", "ultrafast", "-tune:v", "zerolatency"]
|
||||
)
|
||||
|
||||
cmd.extend(["-f", "mp4", "-movflags", "+faststart", output_path])
|
||||
return cmd
|
||||
|
||||
|
||||
def get_video_duration_sec(ffprobe_path: str, input_path: str) -> Optional[float]:
|
||||
"""Return duration in seconds or None on failure."""
|
||||
try:
|
||||
out = subprocess.run(
|
||||
[
|
||||
ffprobe_path,
|
||||
"-v",
|
||||
"error",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"default=noprint_wrappers=1:nokey=1",
|
||||
input_path,
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
if out.returncode == 0 and out.stdout.strip():
|
||||
return float(out.stdout.strip())
|
||||
except (subprocess.TimeoutExpired, ValueError, FileNotFoundError):
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Benchmark FFmpeg transcode (H.264) with optional HW accel."
|
||||
)
|
||||
parser.add_argument(
|
||||
"input",
|
||||
type=Path,
|
||||
help="Input video file (e.g. recording segment)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--duration",
|
||||
type=float,
|
||||
default=None,
|
||||
metavar="SEC",
|
||||
help="Transcode only this many seconds (default: full file). Simulates segment length.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--seek",
|
||||
type=float,
|
||||
default=0,
|
||||
metavar="SEC",
|
||||
help="Start at this position (before -i for fast seek). Simulates scrubbing into file.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hwaccel",
|
||||
choices=("cpu", "nvidia", "vaapi", "qsv-h265"),
|
||||
default="cpu",
|
||||
help="HW accel: cpu (libx264), nvidia (h264_nvenc), vaapi (h264_vaapi), qsv-h265 (preset-intel-qsv-h265, hevc_qsv).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vaapi-device",
|
||||
default="/dev/dri/renderD128",
|
||||
help="VAAPI device (default: /dev/dri/renderD128).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--qsv-device",
|
||||
default=(
|
||||
"/dev/dri/renderD129"
|
||||
if os.path.exists("/dev/dri/renderD129")
|
||||
else "/dev/dri/renderD128"
|
||||
if os.path.exists("/dev/dri/renderD128")
|
||||
else "0"
|
||||
),
|
||||
help="Intel QSV device: path (e.g. /dev/dri/renderD129 or renderD128 on Linux) or 0 (Windows). With two GPUs, try renderD129 if renderD128 fails. Used for --hwaccel qsv-h265.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ffmpeg",
|
||||
default="ffmpeg",
|
||||
metavar="PATH",
|
||||
help="FFmpeg binary (default: ffmpeg in PATH).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ffprobe",
|
||||
default="ffprobe",
|
||||
metavar="PATH",
|
||||
help="FFprobe binary (default: ffprobe in PATH).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
type=Path,
|
||||
default=None,
|
||||
help="Output file (default: temp file, deleted after).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-output",
|
||||
action="store_true",
|
||||
help="Keep output file when using default temp path.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
input_path = args.input.resolve()
|
||||
if not input_path.is_file():
|
||||
print(f"Error: input file not found: {input_path}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
effective_duration = args.duration
|
||||
if effective_duration is None:
|
||||
duration_from_probe = get_video_duration_sec(str(args.ffprobe), str(input_path))
|
||||
if duration_from_probe is not None:
|
||||
effective_duration = duration_from_probe - args.seek
|
||||
if effective_duration <= 0:
|
||||
print("Error: seek >= file duration", file=sys.stderr)
|
||||
return 1
|
||||
else:
|
||||
print("Warning: could not probe duration; reporting real time only.", file=sys.stderr)
|
||||
|
||||
use_temp = args.output is None
|
||||
if use_temp:
|
||||
fd, out_path = tempfile.mkstemp(suffix=".mp4")
|
||||
os.close(fd)
|
||||
output_path = Path(out_path)
|
||||
else:
|
||||
output_path = args.output.resolve()
|
||||
|
||||
cmd = get_ffmpeg_command(
|
||||
args.ffmpeg,
|
||||
str(input_path),
|
||||
str(output_path),
|
||||
duration_sec=args.duration,
|
||||
seek_sec=args.seek,
|
||||
hwaccel=args.hwaccel,
|
||||
gpu_device=args.vaapi_device,
|
||||
qsv_device=args.qsv_device,
|
||||
)
|
||||
|
||||
print(f"Input: {input_path}")
|
||||
print(f"Output: {output_path}")
|
||||
print(f"HW: {args.hwaccel}")
|
||||
if args.duration is not None:
|
||||
print(f"Limit: {args.duration}s")
|
||||
if args.seek > 0:
|
||||
print(f"Seek: {args.seek}s")
|
||||
print(f"Run: {' '.join(cmd)}")
|
||||
print()
|
||||
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
subprocess.run(cmd, check=True, timeout=3600)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"FFmpeg failed: {e}", file=sys.stderr)
|
||||
if use_temp and output_path.exists():
|
||||
output_path.unlink()
|
||||
return 1
|
||||
except subprocess.TimeoutExpired:
|
||||
print("FFmpeg timed out.", file=sys.stderr)
|
||||
if use_temp and output_path.exists():
|
||||
output_path.unlink()
|
||||
return 1
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
size_bytes = output_path.stat().st_size if output_path.exists() else 0
|
||||
|
||||
print("--- Results ---")
|
||||
print(f"Real time: {elapsed:.2f}s")
|
||||
if effective_duration is not None and effective_duration > 0:
|
||||
speed = effective_duration / elapsed
|
||||
print(f"Video duration: {effective_duration:.2f}s")
|
||||
print(f"Speed: {speed:.2f}x realtime")
|
||||
if args.duration and args.duration <= 15:
|
||||
if speed >= 5:
|
||||
print("(Good for ~10s segment transcode: well under 2s.)")
|
||||
elif speed >= 1:
|
||||
print("(Marginal: segment may take several seconds.)")
|
||||
else:
|
||||
print("(Slow: segment transcode would exceed segment length.)")
|
||||
print(f"Output size: {size_bytes / (1024*1024):.2f} MiB")
|
||||
|
||||
if use_temp:
|
||||
if args.keep_output:
|
||||
print(f"(Output kept: {output_path})")
|
||||
else:
|
||||
output_path.unlink(missing_ok=True)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
101
scripts/transcode_benchmark.sh
Normal file
101
scripts/transcode_benchmark.sh
Normal file
@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env bash
|
||||
# Proof-of-concept: run FFmpeg transcode and report real time.
|
||||
# Usage:
|
||||
# ./scripts/transcode_benchmark.sh path/to/video.mp4
|
||||
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 # first 10 seconds only
|
||||
# ./scripts/transcode_benchmark.sh path/to/video.mp4 10 nvidia
|
||||
#
|
||||
# Optional: DURATION (seconds), HWACCEL (cpu|nvidia|vaapi|qsv-h265). Default: full file, cpu.
|
||||
# Requires: ffmpeg, ffprobe. Output: temp file, then deleted. Reports real time and speed.
|
||||
|
||||
set -e
|
||||
INPUT="${1:?Usage: $0 <input.mp4> [duration_sec] [cpu|nvidia|vaapi|qsv-h265]}"
|
||||
DURATION="${2:-}"
|
||||
HWACCEL="${3:-cpu}"
|
||||
# On Linux, QSV needs a DRM render node. With two GPUs, renderD128 is often non-Intel and renderD129 the Intel iGPU; prefer 129 when both exist so QSV finds VA.
|
||||
if [[ -z "${QSV_DEVICE:-}" ]]; then
|
||||
if [[ -e /dev/dri/renderD129 ]]; then
|
||||
QSV_DEVICE="/dev/dri/renderD129"
|
||||
elif [[ -e /dev/dri/renderD128 ]]; then
|
||||
QSV_DEVICE="/dev/dri/renderD128"
|
||||
else
|
||||
QSV_DEVICE="0"
|
||||
fi
|
||||
fi
|
||||
# Frigate container has ffmpeg under /usr/lib/ffmpeg/<ver>/bin, not on PATH
|
||||
if [[ -z "${FFMPEG:-}" ]]; then
|
||||
if command -v ffmpeg &>/dev/null; then
|
||||
FFMPEG="ffmpeg"
|
||||
elif [[ -d /usr/lib/ffmpeg ]] && FFMPEG_CANDIDATE=$(find /usr/lib/ffmpeg -path '*/bin/ffmpeg' -type f 2>/dev/null | head -1); [[ -n "${FFMPEG_CANDIDATE:-}" ]]; then
|
||||
FFMPEG="$FFMPEG_CANDIDATE"
|
||||
else
|
||||
FFMPEG="ffmpeg"
|
||||
fi
|
||||
fi
|
||||
FFPROBE="${FFPROBE:-$(dirname "$FFMPEG")/ffprobe}"
|
||||
if [[ ! -x "$FFPROBE" ]]; then
|
||||
FFPROBE="ffprobe"
|
||||
fi
|
||||
OUTPUT=$(mktemp -u).mp4
|
||||
|
||||
cleanup() { rm -f "$OUTPUT"; }
|
||||
trap cleanup EXIT
|
||||
|
||||
# Build base decode/input args
|
||||
INPUT_ARGS=(-hide_banner -y -loglevel warning -stats -i "$INPUT")
|
||||
if [[ -n "$DURATION" && "$DURATION" =~ ^[0-9]+\.?[0-9]*$ ]]; then
|
||||
INPUT_ARGS+=(-t "$DURATION")
|
||||
fi
|
||||
|
||||
case "$HWACCEL" in
|
||||
nvidia)
|
||||
PRE=( -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 )
|
||||
ENC=(-c:v h264_nvenc)
|
||||
;;
|
||||
vaapi)
|
||||
PRE=( -hwaccel vaapi -hwaccel_device "${VAAPI_DEVICE:-/dev/dri/renderD128}" -hwaccel_output_format vaapi )
|
||||
ENC=(-c:v h264_vaapi)
|
||||
;;
|
||||
qsv-h265)
|
||||
PRE=( -load_plugin hevc_hw -hwaccel qsv -qsv_device "$QSV_DEVICE" -hwaccel_output_format qsv )
|
||||
# Use CQP explicitly; -profile:v/-level can be unsupported on some QSV runtimes
|
||||
ENC=(-c:v hevc_qsv -global_quality 23)
|
||||
;;
|
||||
*)
|
||||
PRE=()
|
||||
ENC=(-c:v libx264 -preset:v ultrafast -tune:v zerolatency)
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Input: $INPUT"
|
||||
echo "Output: $OUTPUT (temp)"
|
||||
echo "HW: $HWACCEL"
|
||||
[[ -n "$DURATION" ]] && echo "Limit: ${DURATION}s"
|
||||
# QSV is Intel-only and needs a working Intel VA-API stack; if you see 'No VA display found', see scripts/README.md troubleshooting.
|
||||
[[ "$HWACCEL" = "qsv-h265" ]] && echo "QSV device: $QSV_DEVICE"
|
||||
echo ""
|
||||
|
||||
# Get duration for speed calculation (if not limiting, use full file length)
|
||||
if [[ -n "$DURATION" ]]; then
|
||||
DUR_SEC="$DURATION"
|
||||
else
|
||||
DUR_SEC=$("${FFPROBE:-ffprobe}" -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "$INPUT" 2>/dev/null || true)
|
||||
fi
|
||||
|
||||
# Use $SECONDS (bash) so we don't rely on date %N or bc in minimal containers
|
||||
START=$SECONDS
|
||||
"$FFMPEG" "${PRE[@]}" "${INPUT_ARGS[@]}" -an "${ENC[@]}" -f mp4 -movflags +faststart "$OUTPUT"
|
||||
ELAPSED=$((SECONDS - START))
|
||||
[[ "$ELAPSED" -eq 0 ]] && ELAPSED=1
|
||||
|
||||
SIZE=$(stat -c%s "$OUTPUT" 2>/dev/null || stat -f%z "$OUTPUT" 2>/dev/null || echo 0)
|
||||
SIZE_MB=$(awk "BEGIN {printf \"%.2f\", $SIZE/1048576}" 2>/dev/null || echo "$((SIZE / 1048576))")
|
||||
|
||||
echo "--- Results ---"
|
||||
echo "Real time: ${ELAPSED}s"
|
||||
if [[ -n "$DUR_SEC" && "$DUR_SEC" =~ ^[0-9]+\.?[0-9]*$ ]]; then
|
||||
SPEED=$(awk "BEGIN {printf \"%.2f\", $DUR_SEC/$ELAPSED}" 2>/dev/null || echo "?")
|
||||
echo "Duration: ${DUR_SEC}s"
|
||||
echo "Speed: ${SPEED}x realtime"
|
||||
fi
|
||||
echo "Output size: ${SIZE_MB} MiB"
|
||||
69
transcode_proxy/DEV_WORKFLOW.md
Normal file
69
transcode_proxy/DEV_WORKFLOW.md
Normal file
@ -0,0 +1,69 @@
|
||||
# Dev workflow: frigate-dev (single image with transcode proxy)
|
||||
|
||||
Use **frigate-dev** so your working Docker setup keeps using the stable image. You switch between stable and dev by changing the image in compose and restarting. The transcode proxy runs **inside** the Frigate container; there is no separate proxy image.
|
||||
|
||||
## Image names
|
||||
|
||||
- **frigate-dev** – Frigate image built from this repo (includes transcode proxy, config + UI for transcode_proxy).
|
||||
- Your normal setup keeps using **ghcr.io/blakeblackshear/frigate:stable-tensorrt** (or whatever you use today).
|
||||
|
||||
## Start / stop (switch between stable and dev)
|
||||
|
||||
You can’t run both stacks at once (same ports). Use one compose file and swap the image.
|
||||
|
||||
**Stop everything:**
|
||||
```bash
|
||||
cd ~/docker-compose # or wherever your compose file is
|
||||
docker compose down
|
||||
```
|
||||
|
||||
**Run dev stack (Frigate with in-container transcode proxy):**
|
||||
- In `docker-compose.yml`, set the frigate service to `image: frigate-dev` and publish port 5010 if you use transcode_proxy.
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
**Switch back to stable:**
|
||||
- Stop: `docker compose down`
|
||||
- In `docker-compose.yml`, set frigate back to `image: ghcr.io/blakeblackshear/frigate:stable-tensorrt`.
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
**Useful commands:**
|
||||
- `docker compose down` – stop and remove containers.
|
||||
- `docker compose up -d` – start in the background.
|
||||
- `docker compose ps` – see what’s running.
|
||||
- `docker compose logs -f frigate` – follow Frigate logs.
|
||||
|
||||
## Building (Ubuntu server recommended)
|
||||
|
||||
Frigate’s image **is not** “just Python” – it has a **compile phase** (nginx, sqlite-vec, etc.). Building is done with Docker and can take a while.
|
||||
|
||||
**Where to build:** On the **Ubuntu server** where you run Frigate. That way you get the right architecture and avoid Windows/Linux cross-build issues. Sync the repo from your Windows machine via git (clone or push from Windows to a repo and pull on the server, or copy the repo onto the server).
|
||||
|
||||
**On the Ubuntu server:**
|
||||
|
||||
1. Clone (or pull) the Frigate repo with this code.
|
||||
2. **Build Frigate (TensorRT variant, same as stable-tensorrt):**
|
||||
```bash
|
||||
cd /path/to/frigate
|
||||
make version
|
||||
make local-trt
|
||||
docker tag frigate:latest-tensorrt frigate-dev
|
||||
```
|
||||
(`make local-trt` uses buildx; first time may be slow.) The resulting image includes the transcode proxy; no separate proxy image is built.
|
||||
|
||||
**If you prefer to build on Windows:** You can use Docker buildx to build for `linux/amd64` and push to a registry, then pull `frigate-dev` on the Ubuntu server. The Frigate build is heavy and may be slower or more fragile on Windows; building on the server is simpler.
|
||||
|
||||
## One-time setup on the server
|
||||
|
||||
```bash
|
||||
# Clone or copy the repo, then:
|
||||
cd /path/to/frigate
|
||||
make version
|
||||
make local-trt
|
||||
docker tag frigate:latest-tensorrt frigate-dev
|
||||
```
|
||||
|
||||
Then in your compose use `image: frigate-dev`, publish port 5010 if you use the transcode proxy, and set `transcode_proxy` in Frigate config as in the main README.
|
||||
55
transcode_proxy/README.md
Normal file
55
transcode_proxy/README.md
Normal file
@ -0,0 +1,55 @@
|
||||
# Frigate VOD Transcode Proxy
|
||||
|
||||
Optional proxy that runs **inside the Frigate container** and rewrites VOD HLS playback to an H.264 transport-stream rendition on the fly. Use it when recordings are HEVC (or high bitrate) and you want compatible or lower-bitrate playback.
|
||||
|
||||
## How it works
|
||||
|
||||
- **Manifest requests** (e.g. `.../master.m3u8` and `.../index-v1.m3u8`): Fetched from upstream and rewritten so the browser sees a proxy-owned H.264 HLS rendition.
|
||||
- **Segment requests**: The rewritten media playlist points to proxy-owned `.transcoded.ts` segment URLs. Those requests fetch the upstream source segment, transcode it to H.264 MPEG-TS with FFmpeg, cache it in memory (LRU, configurable size), then serve it.
|
||||
- **Init fragments**: The rewritten media playlist removes upstream `#EXT-X-MAP` usage, so the browser no longer depends on upstream fragmented MP4 init files for transcoded playback.
|
||||
|
||||
The proxy is an s6-managed service in the same Docker image as Frigate. It binds to port **5010** inside the container and starts after nginx is ready.
|
||||
|
||||
## Configuration
|
||||
|
||||
Environment variables (optional; defaults work when running in the same container):
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `TRANSCODE_PROXY_UPSTREAM` | `http://127.0.0.1:5000` | Upstream Frigate VOD base URL (nginx internal port when in-container). |
|
||||
| `TRANSCODE_PROXY_PATH_PREFIX` | (empty) | If the proxy is mounted at a path (e.g. `/vod-transcoded`), set this so the proxy strips it when forwarding. |
|
||||
| `TRANSCODE_PROXY_HOST` | `0.0.0.0` | Bind host. |
|
||||
| `TRANSCODE_PROXY_PORT` | `5010` | Bind port. |
|
||||
| `TRANSCODE_PROXY_CACHE_MB` | `500` | Max in-memory cache size (MB). |
|
||||
| `TRANSCODE_PROXY_FFMPEG` | (system) | FFmpeg binary path; uses Frigate’s FFmpeg when not set. |
|
||||
| `TRANSCODE_PROXY_H264_BITRATE` | `128k` | H.264 bitrate for transcoded segments. |
|
||||
| `TRANSCODE_PROXY_MAX_WIDTH` | `640` | Max output width for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
|
||||
| `TRANSCODE_PROXY_MAX_HEIGHT` | `480` | Max output height for transcoded playback; aspect ratio is preserved and smaller sources are not upscaled. |
|
||||
|
||||
## Enabling in Frigate
|
||||
|
||||
1. Build Frigate from this repo (e.g. `frigate-dev`) so the image includes the proxy and config/UI support.
|
||||
2. Expose the proxy either internally through Frigate nginx (recommended, e.g. `/vod-transcoded`) or by publishing port **5010** for direct access.
|
||||
3. In Frigate config (YAML), add:
|
||||
```yaml
|
||||
transcode_proxy:
|
||||
enabled: true
|
||||
vod_proxy_url: "http://YOUR_FRIGATE_HOST:5010" # same host as Frigate, port 5010
|
||||
```
|
||||
4. Restart Frigate. The UI will use the proxy for recording playback when enabled.
|
||||
|
||||
If Frigate is behind a reverse proxy and you expose the transcode service at a path (e.g. `https://frigate.example.com/vod-transcoded`), set `TRANSCODE_PROXY_PATH_PREFIX=/vod-transcoded` in the container environment and use that full URL as `vod_proxy_url`.
|
||||
|
||||
## Running (single container)
|
||||
|
||||
The proxy runs automatically inside the Frigate container. No separate container or image is needed. For same-origin playback, keep the service internal and route it through Frigate nginx on the normal UI origin.
|
||||
|
||||
See **transcode_proxy/DEV_WORKFLOW.md** for building the dev image (e.g. `frigate-dev`) and switching between stable and dev.
|
||||
|
||||
## Endpoints
|
||||
|
||||
- `GET /vod/.../master.m3u8` – Rewritten HLS master playlist for the transcoded rendition.
|
||||
- `GET /vod/.../index*.m3u8` – Rewritten HLS media playlist that points at proxy-owned transcoded transport-stream segments.
|
||||
- `GET /vod/.../*.transcoded.ts` – Transcoded H.264 MPEG-TS segments.
|
||||
- `GET /cache` – Cache stats (size, entry count).
|
||||
- `GET /health` – Health check.
|
||||
1
transcode_proxy/__init__.py
Normal file
1
transcode_proxy/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"""Transcode proxy: sits in front of Frigate VOD and transcodes segments on the fly to H.264."""
|
||||
5
transcode_proxy/__main__.py
Normal file
5
transcode_proxy/__main__.py
Normal file
@ -0,0 +1,5 @@
|
||||
"""Run the transcode proxy: python -m transcode_proxy."""
|
||||
from transcode_proxy.main import run
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
47
transcode_proxy/cache.py
Normal file
47
transcode_proxy/cache.py
Normal file
@ -0,0 +1,47 @@
|
||||
"""In-memory LRU cache for transcoded segments (byte-size limited)."""
|
||||
import logging
|
||||
import threading
|
||||
from collections import OrderedDict
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ByteLRUCache:
|
||||
"""LRU cache that evicts by total byte size."""
|
||||
|
||||
def __init__(self, max_bytes: int):
|
||||
self._max_bytes = max_bytes
|
||||
self._current_bytes = 0
|
||||
self._order: OrderedDict[str, bytes] = OrderedDict()
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def get(self, key: str) -> Optional[bytes]:
|
||||
with self._lock:
|
||||
data = self._order.pop(key, None)
|
||||
if data is not None:
|
||||
self._order[key] = data # move to end (most recent)
|
||||
return data
|
||||
return None
|
||||
|
||||
def set(self, key: str, value: bytes) -> None:
|
||||
size = len(value)
|
||||
if size > self._max_bytes:
|
||||
logger.warning("Segment larger than cache max (%s bytes), not caching", size)
|
||||
return
|
||||
with self._lock:
|
||||
while self._current_bytes + size > self._max_bytes and self._order:
|
||||
evicted_key = next(iter(self._order))
|
||||
evicted = self._order.pop(evicted_key)
|
||||
self._current_bytes -= len(evicted)
|
||||
logger.debug("Evicted %s from transcode cache", evicted_key)
|
||||
self._order[key] = value
|
||||
self._current_bytes += size
|
||||
|
||||
def size_bytes(self) -> int:
|
||||
with self._lock:
|
||||
return self._current_bytes
|
||||
|
||||
def count(self) -> int:
|
||||
with self._lock:
|
||||
return len(self._order)
|
||||
44
transcode_proxy/config.py
Normal file
44
transcode_proxy/config.py
Normal file
@ -0,0 +1,44 @@
|
||||
"""Configuration from environment."""
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""Proxy configuration."""
|
||||
|
||||
# Upstream Frigate VOD base URL (e.g. http://nginx:80 or http://127.0.0.1:5001)
|
||||
upstream_base: str = field(
|
||||
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_UPSTREAM", "http://127.0.0.1:80")
|
||||
)
|
||||
# Optional path prefix the proxy is mounted at (e.g. /vod-transcoded); strip when forwarding
|
||||
path_prefix: str = field(
|
||||
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_PATH_PREFIX", "").rstrip("/")
|
||||
)
|
||||
# Host/port to bind
|
||||
host: str = field(default_factory=lambda: os.environ.get("TRANSCODE_PROXY_HOST", "0.0.0.0"))
|
||||
port: int = field(
|
||||
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_PORT", "5010"))
|
||||
)
|
||||
# In-memory cache max size in bytes
|
||||
cache_max_bytes: int = field(
|
||||
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_CACHE_MB", "500")) * 1024 * 1024
|
||||
)
|
||||
# FFmpeg binary
|
||||
ffmpeg_path: str = field(
|
||||
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_FFMPEG", "ffmpeg")
|
||||
)
|
||||
# H.264 bitrate for transcoded segments
|
||||
h264_bitrate: str = field(
|
||||
default_factory=lambda: os.environ.get("TRANSCODE_PROXY_H264_BITRATE", "128k")
|
||||
)
|
||||
# Max output size for transcoded playback; preserves aspect ratio and will not upscale
|
||||
max_width: int = field(
|
||||
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_WIDTH", "640"))
|
||||
)
|
||||
max_height: int = field(
|
||||
default_factory=lambda: int(os.environ.get("TRANSCODE_PROXY_MAX_HEIGHT", "480"))
|
||||
)
|
||||
|
||||
|
||||
config = Config()
|
||||
24
transcode_proxy/docker-compose.example.yml
Normal file
24
transcode_proxy/docker-compose.example.yml
Normal file
@ -0,0 +1,24 @@
|
||||
# Example: Frigate with in-container transcode proxy (single image).
|
||||
#
|
||||
# 1. Build Frigate from this repo (on Ubuntu recommended):
|
||||
# make version && make local-trt && docker tag frigate:latest-tensorrt frigate-dev
|
||||
#
|
||||
# 2. Use image: frigate-dev and publish port 5010 for the transcode proxy.
|
||||
# 3. In Frigate config (config.yml), set:
|
||||
# transcode_proxy:
|
||||
# enabled: true
|
||||
# vod_proxy_url: "http://YOUR_HOST:5010"
|
||||
|
||||
services:
|
||||
frigate:
|
||||
container_name: frigate
|
||||
restart: unless-stopped
|
||||
image: frigate-dev
|
||||
# ... your existing frigate config (gpus, shm_size, devices, volumes) ...
|
||||
ports:
|
||||
- "5000:5000" # or 8971:8971 depending on your setup
|
||||
- "5010:5010" # transcode proxy (only needed if transcode_proxy.enabled is true)
|
||||
# Optional: override proxy defaults
|
||||
# environment:
|
||||
# TRANSCODE_PROXY_PORT: "5010"
|
||||
# TRANSCODE_PROXY_CACHE_MB: "500"
|
||||
419
transcode_proxy/main.py
Normal file
419
transcode_proxy/main.py
Normal file
@ -0,0 +1,419 @@
|
||||
"""FastAPI app: proxy VOD requests, transcode segments on the fly."""
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
from fastapi import FastAPI, Request, Response
|
||||
from fastapi.responses import StreamingResponse
|
||||
from transcode_proxy.cache import ByteLRUCache
|
||||
from transcode_proxy.config import config
|
||||
from transcode_proxy.transcode import (
|
||||
TranscodeError,
|
||||
stream_transcode_segment_to_h264_ts,
|
||||
)
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = FastAPI(title="Frigate VOD Transcode Proxy", version="0.1.0")
|
||||
cache = ByteLRUCache(config.cache_max_bytes)
|
||||
|
||||
# Segment extensions that the upstream VOD may expose.
|
||||
SEGMENT_EXTENSIONS = (".m4s", ".mp4", ".ts")
|
||||
FORWARD_HEADERS = ("cookie", "authorization", "referer")
|
||||
TRANSCODED_SEGMENT_SUFFIX = ".transcoded.ts"
|
||||
H264_CODEC = "avc1.64001f"
|
||||
LOCAL_QUERY_KEYS = {"bitrate", "max_width", "max_height"}
|
||||
|
||||
|
||||
def _upstream_path(path: str) -> Optional[str]:
|
||||
"""Strip path_prefix and only allow VOD paths through to upstream."""
|
||||
p = path.lstrip("/")
|
||||
if config.path_prefix:
|
||||
prefix = config.path_prefix.strip("/")
|
||||
if p.startswith(prefix + "/"):
|
||||
p = p[len(prefix) + 1 :]
|
||||
if p == "vod" or p.startswith("vod/"):
|
||||
return "/" + p
|
||||
if p.startswith("vod-transcoded/"):
|
||||
return "/" + p[len("vod-transcoded/") :]
|
||||
if p == "vod-transcoded":
|
||||
return "/vod"
|
||||
return None
|
||||
|
||||
|
||||
def _is_segment(path: str) -> bool:
|
||||
return path.rstrip("/").endswith(TRANSCODED_SEGMENT_SUFFIX) or any(
|
||||
path.rstrip("/").endswith(ext) for ext in SEGMENT_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
def _is_init_path(path: str) -> bool:
|
||||
return bool(re.search(r"/init.*\.mp4$", path))
|
||||
|
||||
|
||||
def _is_master_playlist(path: str) -> bool:
|
||||
return path.endswith("/master.m3u8") or path.endswith("master.m3u8")
|
||||
|
||||
|
||||
def _init_upstream_path(segment_path: str) -> Optional[str]:
|
||||
"""Infer the matching init fragment for an fMP4 media fragment path."""
|
||||
match = re.search(r"/seg-\d+(?P<suffix>.*)\.m4s$", segment_path)
|
||||
if not match:
|
||||
return None
|
||||
suffix = match.group("suffix")
|
||||
return re.sub(r"/seg-\d+.*\.m4s$", f"/init{suffix}.mp4", segment_path)
|
||||
|
||||
|
||||
async def _fetch_upstream_bytes(
|
||||
client: httpx.AsyncClient, url: str, headers: dict[str, str]
|
||||
) -> Optional[bytes]:
|
||||
try:
|
||||
upstream_resp = await client.get(url, headers=headers)
|
||||
upstream_resp.raise_for_status()
|
||||
return upstream_resp.content
|
||||
except Exception as e:
|
||||
logger.warning("Upstream fetch failed %s: %s", url, e)
|
||||
return None
|
||||
|
||||
|
||||
async def _fetch_source_init_bytes(
|
||||
client: httpx.AsyncClient,
|
||||
init_path: str,
|
||||
query: str,
|
||||
headers: dict[str, str],
|
||||
) -> Optional[bytes]:
|
||||
init_url = f"{config.upstream_base.rstrip('/')}{init_path}"
|
||||
if query:
|
||||
init_url += f"?{query}"
|
||||
|
||||
cache_key = f"source-init:{init_url}"
|
||||
cached = cache.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
init_bytes = await _fetch_upstream_bytes(client, init_url, headers)
|
||||
if init_bytes is not None:
|
||||
cache.set(cache_key, init_bytes)
|
||||
return init_bytes
|
||||
|
||||
|
||||
async def _stream_source_segment_bytes(
|
||||
source_url: str,
|
||||
headers: dict[str, str],
|
||||
init_bytes: Optional[bytes] = None,
|
||||
) -> AsyncIterator[bytes]:
|
||||
if init_bytes is not None:
|
||||
yield init_bytes
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
async with client.stream("GET", source_url, headers=headers) as upstream_resp:
|
||||
upstream_resp.raise_for_status()
|
||||
async for chunk in upstream_resp.aiter_bytes():
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
|
||||
def _proxy_segment_uri(entry: str) -> str:
|
||||
return f"{entry}{TRANSCODED_SEGMENT_SUFFIX}"
|
||||
|
||||
|
||||
def _source_segment_path(path: str) -> str:
|
||||
if path.endswith(TRANSCODED_SEGMENT_SUFFIX):
|
||||
return path[: -len(TRANSCODED_SEGMENT_SUFFIX)]
|
||||
return path
|
||||
|
||||
|
||||
def _resolution_for_transcode(
|
||||
width: int, height: int, max_width: int, max_height: int
|
||||
) -> tuple[int, int]:
|
||||
if width <= 0 or height <= 0:
|
||||
return (max_width, max_height)
|
||||
|
||||
max_width = max(max_width, 2)
|
||||
max_height = max(max_height, 2)
|
||||
scale = min(max_width / width, max_height / height, 1.0)
|
||||
out_width = max(2, int(width * scale))
|
||||
out_height = max(2, int(height * scale))
|
||||
|
||||
if out_width % 2:
|
||||
out_width -= 1
|
||||
if out_height % 2:
|
||||
out_height -= 1
|
||||
|
||||
return (max(out_width, 2), max(out_height, 2))
|
||||
|
||||
|
||||
def _bandwidth_bits(bitrate: str) -> int:
|
||||
match = re.fullmatch(r"(?P<value>\d+(?:\.\d+)?)(?P<suffix>[kKmMgG]?)", bitrate.strip())
|
||||
if not match:
|
||||
return 2_000_000
|
||||
|
||||
value = float(match.group("value"))
|
||||
suffix = match.group("suffix").upper()
|
||||
multiplier = {
|
||||
"": 1,
|
||||
"K": 1_000,
|
||||
"M": 1_000_000,
|
||||
"G": 1_000_000_000,
|
||||
}[suffix]
|
||||
return int(value * multiplier)
|
||||
|
||||
|
||||
def _transcode_request_profile(request: Request) -> tuple[str, int, int, str]:
|
||||
bitrate = request.query_params.get("bitrate", config.h264_bitrate)
|
||||
max_width = int(request.query_params.get("max_width", config.max_width))
|
||||
max_height = int(request.query_params.get("max_height", config.max_height))
|
||||
upstream_query = "&".join(
|
||||
f"{key}={value}"
|
||||
for key, value in request.query_params.multi_items()
|
||||
if key not in LOCAL_QUERY_KEYS
|
||||
)
|
||||
return bitrate, max_width, max_height, upstream_query
|
||||
|
||||
|
||||
def _rewrite_master_playlist(
|
||||
upstream_bytes: bytes, bitrate: str, max_width: int, max_height: int
|
||||
) -> bytes:
|
||||
playlist = upstream_bytes.decode("utf-8", errors="replace")
|
||||
lines = [line.strip() for line in playlist.splitlines() if line.strip()]
|
||||
child_uri: Optional[str] = None
|
||||
stream_inf_line: Optional[str] = None
|
||||
|
||||
for idx, line in enumerate(lines):
|
||||
if line.startswith("#EXT-X-STREAM-INF:"):
|
||||
stream_inf_line = line
|
||||
for child_line in lines[idx + 1 :]:
|
||||
if child_line and not child_line.startswith("#"):
|
||||
child_uri = child_line
|
||||
break
|
||||
break
|
||||
|
||||
if child_uri is None or stream_inf_line is None:
|
||||
logger.warning("Unable to parse master playlist, returning upstream manifest")
|
||||
return upstream_bytes
|
||||
|
||||
attrs = [
|
||||
f'BANDWIDTH={max(_bandwidth_bits(bitrate), 1)}',
|
||||
f'CODECS="{H264_CODEC}"',
|
||||
]
|
||||
|
||||
resolution_match = re.search(r"RESOLUTION=(\d+)x(\d+)", stream_inf_line)
|
||||
if resolution_match:
|
||||
width = int(resolution_match.group(1))
|
||||
height = int(resolution_match.group(2))
|
||||
out_width, out_height = _resolution_for_transcode(
|
||||
width, height, max_width, max_height
|
||||
)
|
||||
attrs.insert(1, f"RESOLUTION={out_width}x{out_height}")
|
||||
|
||||
rewritten = [
|
||||
"#EXTM3U",
|
||||
"#EXT-X-STREAM-INF:" + ",".join(attrs),
|
||||
child_uri,
|
||||
"",
|
||||
]
|
||||
return "\n".join(rewritten).encode()
|
||||
|
||||
|
||||
def _rewrite_media_playlist(upstream_bytes: bytes) -> bytes:
|
||||
playlist = upstream_bytes.decode("utf-8", errors="replace")
|
||||
output_lines: list[str] = []
|
||||
segment_index = 0
|
||||
|
||||
for line in playlist.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped.startswith("#EXT-X-MAP:"):
|
||||
continue
|
||||
|
||||
if stripped.startswith("#EXTINF:") and segment_index > 0:
|
||||
output_lines.append("#EXT-X-DISCONTINUITY")
|
||||
|
||||
if stripped and not stripped.startswith("#"):
|
||||
output_lines.append(_proxy_segment_uri(stripped))
|
||||
segment_index += 1
|
||||
continue
|
||||
|
||||
output_lines.append(line)
|
||||
|
||||
if output_lines and output_lines[-1] != "":
|
||||
output_lines.append("")
|
||||
|
||||
return "\n".join(output_lines).encode()
|
||||
|
||||
|
||||
async def _proxy_upstream_response(
|
||||
client: httpx.AsyncClient, url: str, headers: dict[str, str]
|
||||
) -> Optional[httpx.Response]:
|
||||
try:
|
||||
upstream_resp = await client.get(url, headers=headers)
|
||||
upstream_resp.raise_for_status()
|
||||
return upstream_resp
|
||||
except Exception as e:
|
||||
logger.warning("Upstream fetch failed %s: %s", url, e)
|
||||
return None
|
||||
|
||||
|
||||
async def _transcoded_segment_response(
|
||||
source_url: str,
|
||||
cache_key: str,
|
||||
headers: dict[str, str],
|
||||
init_bytes: Optional[bytes] = None,
|
||||
bitrate: Optional[str] = None,
|
||||
max_width: Optional[int] = None,
|
||||
max_height: Optional[int] = None,
|
||||
) -> Response:
|
||||
stream = await stream_transcode_segment_to_h264_ts(
|
||||
_stream_source_segment_bytes(source_url, headers, init_bytes),
|
||||
config.ffmpeg_path,
|
||||
bitrate or config.h264_bitrate,
|
||||
max_width or config.max_width,
|
||||
max_height or config.max_height,
|
||||
)
|
||||
|
||||
try:
|
||||
first_chunk = await stream.first_chunk()
|
||||
except TranscodeError as e:
|
||||
await stream.aclose()
|
||||
logger.warning("Transcode stream failed %s: %s", source_url, e)
|
||||
return Response(status_code=502, content=b"Transcode failed")
|
||||
|
||||
async def body() -> AsyncIterator[bytes]:
|
||||
try:
|
||||
async for chunk in stream.iter_chunks(first_chunk):
|
||||
yield chunk
|
||||
except TranscodeError as e:
|
||||
logger.warning("Transcode stream failed %s: %s", source_url, e)
|
||||
raise
|
||||
else:
|
||||
cache.set(cache_key, stream.output_bytes)
|
||||
|
||||
return StreamingResponse(
|
||||
body(),
|
||||
media_type="video/mp2t",
|
||||
headers={"Cache-Control": "private, max-age=300"},
|
||||
)
|
||||
|
||||
|
||||
@app.get("/cache")
|
||||
async def cache_info() -> dict:
|
||||
"""Return cache size and entry count (for debugging)."""
|
||||
return {
|
||||
"size_bytes": cache.size_bytes(),
|
||||
"size_mb": round(cache.size_bytes() / (1024 * 1024), 2),
|
||||
"entries": cache.count(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health() -> dict:
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.get("/{full_path:path}")
|
||||
async def vod_proxy(request: Request, full_path: str) -> Response:
|
||||
"""Handle /vod/... or /vod-transcoded/... (when path_prefix is set)."""
|
||||
path = "/" + full_path.lstrip("/")
|
||||
upstream_path = _upstream_path(path)
|
||||
if upstream_path is None or not (
|
||||
upstream_path == "/vod" or upstream_path.startswith("/vod/")
|
||||
):
|
||||
return Response(status_code=404, content=b"Not found")
|
||||
bitrate, max_width, max_height, upstream_query = _transcode_request_profile(request)
|
||||
upstream_url = f"{config.upstream_base.rstrip('/')}{upstream_path}"
|
||||
if upstream_query:
|
||||
upstream_url += f"?{upstream_query}"
|
||||
|
||||
headers = {
|
||||
k: v for k, v in request.headers.items() if k.lower() in FORWARD_HEADERS
|
||||
}
|
||||
|
||||
if upstream_path.endswith(TRANSCODED_SEGMENT_SUFFIX):
|
||||
cache_key = f"{upstream_url}|{bitrate}|{max_width}x{max_height}"
|
||||
cached = cache.get(cache_key)
|
||||
if cached is not None:
|
||||
return Response(
|
||||
content=cached,
|
||||
media_type="video/mp2t",
|
||||
headers={"Cache-Control": "private, max-age=300"},
|
||||
)
|
||||
|
||||
source_path = _source_segment_path(upstream_path)
|
||||
source_url = f"{config.upstream_base.rstrip('/')}{source_path}"
|
||||
if upstream_query:
|
||||
source_url += f"?{upstream_query}"
|
||||
|
||||
init_bytes: Optional[bytes] = None
|
||||
if source_path.endswith(".m4s"):
|
||||
init_path = _init_upstream_path(source_path)
|
||||
if init_path is None:
|
||||
return Response(status_code=502, content=b"Init segment inference failed")
|
||||
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
init_bytes = await _fetch_source_init_bytes(
|
||||
client, init_path, upstream_query, headers
|
||||
)
|
||||
|
||||
if init_bytes is None:
|
||||
return Response(status_code=502, content=b"Init segment fetch failed")
|
||||
|
||||
return await _transcoded_segment_response(
|
||||
source_url=source_url,
|
||||
cache_key=cache_key,
|
||||
headers=headers,
|
||||
init_bytes=init_bytes,
|
||||
bitrate=bitrate,
|
||||
max_width=max_width,
|
||||
max_height=max_height,
|
||||
)
|
||||
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
if _is_master_playlist(upstream_path):
|
||||
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
|
||||
if upstream_resp is None:
|
||||
return Response(status_code=502, content=b"Upstream fetch failed")
|
||||
|
||||
return Response(
|
||||
content=_rewrite_master_playlist(
|
||||
upstream_resp.content, bitrate, max_width, max_height
|
||||
),
|
||||
media_type="application/vnd.apple.mpegurl",
|
||||
headers={"Cache-Control": "no-store"},
|
||||
)
|
||||
|
||||
if upstream_path.endswith(".m3u8"):
|
||||
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
|
||||
if upstream_resp is None:
|
||||
return Response(status_code=502, content=b"Upstream fetch failed")
|
||||
|
||||
return Response(
|
||||
content=_rewrite_media_playlist(upstream_resp.content),
|
||||
media_type="application/vnd.apple.mpegurl",
|
||||
headers={"Cache-Control": "no-store"},
|
||||
)
|
||||
|
||||
upstream_resp = await _proxy_upstream_response(client, upstream_url, headers)
|
||||
if upstream_resp is None:
|
||||
return Response(status_code=502, content=b"Upstream fetch failed")
|
||||
|
||||
return Response(
|
||||
content=upstream_resp.content,
|
||||
media_type=upstream_resp.headers.get("content-type", "application/octet-stream"),
|
||||
headers={"Cache-Control": "no-store"},
|
||||
)
|
||||
|
||||
|
||||
def run() -> None:
|
||||
import uvicorn
|
||||
uvicorn.run(
|
||||
"transcode_proxy.main:app",
|
||||
host=config.host,
|
||||
port=config.port,
|
||||
log_level="info",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
5
transcode_proxy/requirements.txt
Normal file
5
transcode_proxy/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
||||
# Dependencies for running the transcode proxy standalone (e.g. in a separate container).
|
||||
# Frigate's main container may already have these; the proxy can share the same env.
|
||||
fastapi>=0.100.0
|
||||
uvicorn>=0.22.0
|
||||
httpx>=0.24.0
|
||||
256
transcode_proxy/transcode.py
Normal file
256
transcode_proxy/transcode.py
Normal file
@ -0,0 +1,256 @@
|
||||
"""Transcode media segments to H.264 transport stream bytes using FFmpeg."""
|
||||
import asyncio
|
||||
import logging
|
||||
import subprocess
|
||||
from collections.abc import AsyncIterable, AsyncIterator
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TranscodeError(RuntimeError):
|
||||
"""Raised when FFmpeg cannot produce a valid transcoded segment."""
|
||||
|
||||
|
||||
def _build_scale_filter(max_width: int, max_height: int) -> Optional[str]:
|
||||
if max_width <= 0 or max_height <= 0:
|
||||
return None
|
||||
|
||||
return (
|
||||
f"scale=w={max_width}:h={max_height}:"
|
||||
"force_original_aspect_ratio=decrease:"
|
||||
"force_divisible_by=2"
|
||||
)
|
||||
|
||||
|
||||
def _build_ffmpeg_cmd(
|
||||
ffmpeg_path: str,
|
||||
bitrate: str,
|
||||
max_width: int,
|
||||
max_height: int,
|
||||
) -> list[str]:
|
||||
cmd = [
|
||||
ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"error",
|
||||
"-i",
|
||||
"pipe:0",
|
||||
"-an",
|
||||
"-pix_fmt",
|
||||
"yuv420p",
|
||||
"-c:v",
|
||||
"libx264",
|
||||
"-preset",
|
||||
"fast",
|
||||
"-profile:v",
|
||||
"high",
|
||||
"-level:v",
|
||||
"3.1",
|
||||
"-b:v",
|
||||
bitrate,
|
||||
"-maxrate",
|
||||
bitrate,
|
||||
"-bufsize",
|
||||
bitrate,
|
||||
"-muxdelay",
|
||||
"0",
|
||||
"-muxpreload",
|
||||
"0",
|
||||
"-f",
|
||||
"mpegts",
|
||||
"-mpegts_flags",
|
||||
"+initial_discontinuity",
|
||||
"pipe:1",
|
||||
]
|
||||
|
||||
scale_filter = _build_scale_filter(max_width, max_height)
|
||||
if scale_filter:
|
||||
cmd[7:7] = ["-vf", scale_filter]
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
class H264TSStream:
|
||||
"""Manage a streaming FFmpeg transcode process."""
|
||||
|
||||
def __init__(self, process: asyncio.subprocess.Process):
|
||||
self._process = process
|
||||
self._stderr = bytearray()
|
||||
self._output = bytearray()
|
||||
self._input_error: Exception | None = None
|
||||
self._closed = False
|
||||
self._stdin_task: asyncio.Task[None] | None = None
|
||||
self._stderr_task: asyncio.Task[None] | None = None
|
||||
|
||||
@classmethod
|
||||
async def start(
|
||||
cls,
|
||||
source_chunks: AsyncIterable[bytes],
|
||||
ffmpeg_path: str,
|
||||
bitrate: str = "2M",
|
||||
max_width: int = 640,
|
||||
max_height: int = 480,
|
||||
) -> "H264TSStream":
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
stream = cls(process)
|
||||
stream._stdin_task = asyncio.create_task(stream._feed_stdin(source_chunks))
|
||||
stream._stderr_task = asyncio.create_task(stream._drain_stderr())
|
||||
return stream
|
||||
|
||||
async def _feed_stdin(self, source_chunks: AsyncIterable[bytes]) -> None:
|
||||
assert self._process.stdin is not None
|
||||
|
||||
try:
|
||||
async for chunk in source_chunks:
|
||||
if not chunk:
|
||||
continue
|
||||
self._process.stdin.write(chunk)
|
||||
await self._process.stdin.drain()
|
||||
except (BrokenPipeError, ConnectionResetError) as exc:
|
||||
self._input_error = exc
|
||||
except Exception as exc: # pragma: no cover - depends on upstream/network failures
|
||||
self._input_error = exc
|
||||
finally:
|
||||
stdin = self._process.stdin
|
||||
if stdin is not None and not stdin.is_closing():
|
||||
stdin.close()
|
||||
try:
|
||||
await stdin.wait_closed()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def _drain_stderr(self) -> None:
|
||||
assert self._process.stderr is not None
|
||||
|
||||
while True:
|
||||
chunk = await self._process.stderr.read(8192)
|
||||
if not chunk:
|
||||
break
|
||||
self._stderr.extend(chunk)
|
||||
|
||||
async def _read_stdout_chunk(self) -> bytes:
|
||||
assert self._process.stdout is not None
|
||||
chunk = await self._process.stdout.read(65536)
|
||||
if chunk:
|
||||
self._output.extend(chunk)
|
||||
return chunk
|
||||
|
||||
def _error_message(self) -> str:
|
||||
if self._input_error is not None:
|
||||
return f"Source stream failed: {self._input_error}"
|
||||
if self._stderr:
|
||||
return self._stderr.decode(errors="replace")
|
||||
return "unknown FFmpeg error"
|
||||
|
||||
async def _ensure_success(self) -> bytes:
|
||||
if self._stdin_task is not None:
|
||||
await self._stdin_task
|
||||
if self._stderr_task is not None:
|
||||
await self._stderr_task
|
||||
|
||||
returncode = await self._process.wait()
|
||||
if returncode != 0:
|
||||
raise TranscodeError(self._error_message())
|
||||
|
||||
return bytes(self._output)
|
||||
|
||||
async def first_chunk(self) -> bytes:
|
||||
chunk = await self._read_stdout_chunk()
|
||||
if chunk:
|
||||
return chunk
|
||||
|
||||
try:
|
||||
await self._ensure_success()
|
||||
finally:
|
||||
self._closed = True
|
||||
|
||||
raise TranscodeError("FFmpeg produced no output")
|
||||
|
||||
async def iter_chunks(self, first_chunk: bytes) -> AsyncIterator[bytes]:
|
||||
try:
|
||||
yield first_chunk
|
||||
while True:
|
||||
chunk = await self._read_stdout_chunk()
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
|
||||
await self._ensure_success()
|
||||
finally:
|
||||
await self.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
if self._closed:
|
||||
return
|
||||
|
||||
self._closed = True
|
||||
|
||||
if self._process.returncode is None:
|
||||
self._process.kill()
|
||||
await self._process.wait()
|
||||
|
||||
for task in (self._stdin_task, self._stderr_task):
|
||||
if task is None or task.done():
|
||||
continue
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
@property
|
||||
def output_bytes(self) -> bytes:
|
||||
return bytes(self._output)
|
||||
|
||||
|
||||
async def stream_transcode_segment_to_h264_ts(
|
||||
source_chunks: AsyncIterable[bytes],
|
||||
ffmpeg_path: str,
|
||||
bitrate: str = "2M",
|
||||
max_width: int = 640,
|
||||
max_height: int = 480,
|
||||
) -> H264TSStream:
|
||||
"""Start an FFmpeg process that streams H.264 MPEG-TS output."""
|
||||
return await H264TSStream.start(
|
||||
source_chunks,
|
||||
ffmpeg_path,
|
||||
bitrate,
|
||||
max_width,
|
||||
max_height,
|
||||
)
|
||||
|
||||
|
||||
def transcode_segment_to_h264_ts(
|
||||
segment_bytes: bytes,
|
||||
ffmpeg_path: str,
|
||||
bitrate: str = "2M",
|
||||
max_width: int = 640,
|
||||
max_height: int = 480,
|
||||
) -> Optional[bytes]:
|
||||
"""Decode a segment and re-encode it as H.264 MPEG-TS bytes."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
_build_ffmpeg_cmd(ffmpeg_path, bitrate, max_width, max_height),
|
||||
input=segment_bytes,
|
||||
capture_output=True,
|
||||
timeout=60,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.warning(
|
||||
"FFmpeg transcode failed: %s",
|
||||
result.stderr.decode(errors="replace") if result.stderr else "unknown",
|
||||
)
|
||||
return None
|
||||
return result.stdout
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning("FFmpeg transcode timed out")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning("FFmpeg transcode error: %s", e)
|
||||
return None
|
||||
@ -35,6 +35,7 @@ import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { ExportCase } from "@/types/export";
|
||||
import { CustomTimeSelector } from "./CustomTimeSelector";
|
||||
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
|
||||
|
||||
const EXPORT_OPTIONS = [
|
||||
"1",
|
||||
@ -428,11 +429,22 @@ export function ExportPreviewDialog({
|
||||
setShowPreview,
|
||||
}: ExportPreviewDialogProps) {
|
||||
const { t } = useTranslation(["components/dialog"]);
|
||||
const vodPath = range
|
||||
? `/vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`
|
||||
: `/vod/${camera}/start/0/end/0/index.m3u8`;
|
||||
const playbackSource = useRecordingPlaybackSource({
|
||||
camera,
|
||||
after: range?.after ?? 0,
|
||||
before: range?.before ?? 0,
|
||||
vodPath,
|
||||
enabled: !!range,
|
||||
});
|
||||
|
||||
if (!range) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`;
|
||||
const source = playbackSource ?? `${baseUrl}${vodPath}`;
|
||||
|
||||
return (
|
||||
<Dialog open={showPreview} onOpenChange={setShowPreview}>
|
||||
|
||||
@ -80,6 +80,7 @@ import {
|
||||
DrawerTitle,
|
||||
DrawerTrigger,
|
||||
} from "@/components/ui/drawer";
|
||||
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
|
||||
import { LuInfo } from "react-icons/lu";
|
||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||
import { FaPencilAlt } from "react-icons/fa";
|
||||
@ -1866,8 +1867,16 @@ export function VideoTab({ search }: VideoTabProps) {
|
||||
const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
|
||||
return `start/${startTime}/end/${endTime}`;
|
||||
}, [search]);
|
||||
|
||||
const source = `${baseUrl}vod/${search.camera}/${clipTimeRange}/index.m3u8`;
|
||||
const startTime = search.start_time - REVIEW_PADDING;
|
||||
const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
|
||||
const vodPath = `/vod/${search.camera}/${clipTimeRange}/index.m3u8`;
|
||||
const playbackSource = useRecordingPlaybackSource({
|
||||
camera: search.camera,
|
||||
after: startTime,
|
||||
before: endTime,
|
||||
vodPath,
|
||||
});
|
||||
const source = playbackSource ?? `${baseUrl}${vodPath}`;
|
||||
|
||||
return (
|
||||
<>
|
||||
|
||||
@ -41,6 +41,7 @@ import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator
|
||||
import ObjectTrackOverlay from "../ObjectTrackOverlay";
|
||||
import { useIsAdmin } from "@/hooks/use-is-admin";
|
||||
import { VideoResolutionType } from "@/types/live";
|
||||
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
|
||||
|
||||
type TrackingDetailsProps = {
|
||||
className?: string;
|
||||
@ -513,25 +514,36 @@ export function TrackingDetails({
|
||||
setBlueLineHeightPx(bluePx);
|
||||
}, [eventSequence, timelineSize.width, timelineSize.height, effectiveTime]);
|
||||
|
||||
const videoSource = useMemo(() => {
|
||||
// event.start_time and event.end_time are in DETECT stream time
|
||||
// Convert to record stream time, then create video clip with padding.
|
||||
// Use sourceOffsetRef (stable per event) so the HLS player doesn't
|
||||
// reload while the user is dragging the annotation offset slider.
|
||||
const videoWindow = useMemo(() => {
|
||||
const sourceOffset = sourceOffsetRef.current;
|
||||
const eventStartRec = event.start_time + sourceOffset / 1000;
|
||||
const eventEndRec =
|
||||
(event.end_time ?? Date.now() / 1000) + sourceOffset / 1000;
|
||||
const startTime = eventStartRec - REVIEW_PADDING;
|
||||
const endTime = eventEndRec + REVIEW_PADDING;
|
||||
const playlist = `${baseUrl}vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`;
|
||||
|
||||
return {
|
||||
startTime,
|
||||
endTime,
|
||||
vodPath: `/vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`,
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [event]);
|
||||
const playbackSource = useRecordingPlaybackSource({
|
||||
camera: event.camera,
|
||||
after: videoWindow.startTime,
|
||||
before: videoWindow.endTime,
|
||||
vodPath: videoWindow.vodPath,
|
||||
});
|
||||
const videoSource = useMemo(() => {
|
||||
const playlist = playbackSource ?? `${baseUrl}${videoWindow.vodPath}`;
|
||||
|
||||
return {
|
||||
playlist,
|
||||
startPosition: 0,
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [event]);
|
||||
}, [playbackSource, videoWindow]);
|
||||
|
||||
// Determine camera aspect ratio category
|
||||
const cameraAspect = useMemo(() => {
|
||||
|
||||
@ -9,7 +9,10 @@ import {
|
||||
import { useApiHost } from "@/api";
|
||||
import useSWR from "swr";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { Recording } from "@/types/record";
|
||||
import {
|
||||
Recording,
|
||||
RecordingPlaybackPreference,
|
||||
} from "@/types/record";
|
||||
import { Preview } from "@/types/preview";
|
||||
import PreviewPlayer, { PreviewController } from "../PreviewPlayer";
|
||||
import { DynamicVideoController } from "./DynamicVideoController";
|
||||
@ -21,11 +24,21 @@ import { VideoResolutionType } from "@/types/live";
|
||||
import axios from "axios";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
|
||||
import {
|
||||
calculateInpointOffset,
|
||||
calculateSeekPosition,
|
||||
} from "@/utils/videoUtil";
|
||||
import { isFirefox } from "react-device-detect";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select";
|
||||
|
||||
/**
|
||||
* Dynamically switches between video playback and scrubbing preview player.
|
||||
@ -121,6 +134,11 @@ export default function DynamicVideoPlayer({
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [isBuffering, setIsBuffering] = useState(false);
|
||||
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
|
||||
const [playbackPreference, setPlaybackPreference] =
|
||||
useUserPersistence<RecordingPlaybackPreference>(
|
||||
`${camera}-recording-playback-v2`,
|
||||
"sub",
|
||||
);
|
||||
|
||||
// Don't set source until recordings load - we need accurate startPosition
|
||||
// to avoid hls.js clamping to video end when startPosition exceeds duration
|
||||
@ -190,10 +208,29 @@ export default function DynamicVideoPlayer({
|
||||
}),
|
||||
[timeRange],
|
||||
);
|
||||
const { data: recordings } = useSWR<Recording[]>(
|
||||
[`${camera}/recordings`, recordingParams],
|
||||
const { data: allRecordings } = useSWR<Recording[]>(
|
||||
[`${camera}/recordings`, { ...recordingParams, variant: "all" }],
|
||||
{ revalidateOnFocus: false },
|
||||
);
|
||||
const recordings = useMemo(() => {
|
||||
if (!allRecordings?.length) {
|
||||
return allRecordings;
|
||||
}
|
||||
|
||||
const mainRecordings = allRecordings.filter(
|
||||
(recording) => (recording.variant || "main") === "main",
|
||||
);
|
||||
|
||||
return mainRecordings.length > 0 ? mainRecordings : allRecordings;
|
||||
}, [allRecordings]);
|
||||
const codecNames = useMemo(
|
||||
() =>
|
||||
Array.from(
|
||||
new Set((allRecordings ?? []).map((recording) => recording.codec_name)),
|
||||
),
|
||||
[allRecordings],
|
||||
);
|
||||
const playbackCapabilities = usePlaybackCapabilities(codecNames);
|
||||
|
||||
useEffect(() => {
|
||||
if (!recordings?.length) {
|
||||
@ -219,13 +256,34 @@ export default function DynamicVideoPlayer({
|
||||
);
|
||||
}
|
||||
|
||||
const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
|
||||
const decision = chooseRecordingPlayback({
|
||||
apiHost,
|
||||
config,
|
||||
recordings: allRecordings ?? recordings,
|
||||
preference: playbackPreference ?? "sub",
|
||||
vodPath,
|
||||
capabilities: playbackCapabilities,
|
||||
});
|
||||
setSource({
|
||||
playlist: `${apiHost}vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`,
|
||||
playlist: decision.url,
|
||||
startPosition,
|
||||
});
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [recordings]);
|
||||
}, [
|
||||
apiHost,
|
||||
camera,
|
||||
recordingParams.after,
|
||||
recordingParams.before,
|
||||
allRecordings,
|
||||
recordings,
|
||||
startTimestamp,
|
||||
playbackPreference,
|
||||
playbackCapabilities,
|
||||
config?.transcode_proxy?.enabled,
|
||||
config?.transcode_proxy?.vod_proxy_url,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!controller || !recordings?.length) {
|
||||
@ -324,6 +382,26 @@ export default function DynamicVideoPlayer({
|
||||
transformedOverlay={transformedOverlay}
|
||||
/>
|
||||
)}
|
||||
{!isScrubbing && source && (
|
||||
<div className="absolute right-3 top-3 z-50">
|
||||
<Select
|
||||
value={playbackPreference ?? "sub"}
|
||||
onValueChange={(value) =>
|
||||
setPlaybackPreference(value as RecordingPlaybackPreference)
|
||||
}
|
||||
>
|
||||
<SelectTrigger className="h-8 w-32 bg-background/90 text-xs backdrop-blur">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="auto">Auto</SelectItem>
|
||||
<SelectItem value="main">Main</SelectItem>
|
||||
<SelectItem value="sub">Sub</SelectItem>
|
||||
<SelectItem value="transcoded">Transcoded</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
<PreviewPlayer
|
||||
className={cn(
|
||||
className,
|
||||
|
||||
77
web/src/hooks/use-playback-capabilities.ts
Normal file
77
web/src/hooks/use-playback-capabilities.ts
Normal file
@ -0,0 +1,77 @@
|
||||
import { useMemo } from "react";
|
||||
import {
|
||||
getCodecMimeTypes,
|
||||
normalizeCodecName,
|
||||
PlaybackCapabilities,
|
||||
} from "@/utils/recordingPlayback";
|
||||
|
||||
type NavigatorConnection = {
|
||||
downlink?: number;
|
||||
effectiveType?: string;
|
||||
rtt?: number;
|
||||
saveData?: boolean;
|
||||
};
|
||||
|
||||
declare global {
|
||||
interface Navigator {
|
||||
connection?: NavigatorConnection;
|
||||
mozConnection?: NavigatorConnection;
|
||||
webkitConnection?: NavigatorConnection;
|
||||
}
|
||||
|
||||
interface Window {
|
||||
ManagedMediaSource?: typeof MediaSource;
|
||||
}
|
||||
}
|
||||
|
||||
function canPlayMimeType(mimeType?: string): boolean {
|
||||
if (!mimeType || typeof window === "undefined") {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (window.ManagedMediaSource?.isTypeSupported(mimeType)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (window.MediaSource?.isTypeSupported(mimeType)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const video = document.createElement("video");
|
||||
return video.canPlayType(mimeType) !== "";
|
||||
}
|
||||
|
||||
function canPlayAnyMimeType(mimeTypes: string[]): boolean {
|
||||
return mimeTypes.some((mimeType) => canPlayMimeType(mimeType));
|
||||
}
|
||||
|
||||
export default function usePlaybackCapabilities(codecNames: Array<string | null | undefined>) {
|
||||
return useMemo<PlaybackCapabilities>(() => {
|
||||
if (typeof window === "undefined") {
|
||||
return { estimatedBandwidthBps: undefined, saveData: false, supports: {} };
|
||||
}
|
||||
|
||||
const connection =
|
||||
navigator.connection ?? navigator.mozConnection ?? navigator.webkitConnection;
|
||||
const supports: Record<string, boolean> = {};
|
||||
|
||||
codecNames.forEach((codecName) => {
|
||||
const normalized = normalizeCodecName(codecName);
|
||||
if (!normalized || normalized in supports) {
|
||||
return;
|
||||
}
|
||||
|
||||
supports[normalized] = canPlayAnyMimeType(getCodecMimeTypes(normalized));
|
||||
});
|
||||
|
||||
const downlinkMbps = connection?.downlink;
|
||||
return {
|
||||
estimatedBandwidthBps:
|
||||
typeof downlinkMbps === "number" && downlinkMbps > 0
|
||||
? downlinkMbps * 1_000_000
|
||||
: undefined,
|
||||
saveData: connection?.saveData === true,
|
||||
supports,
|
||||
};
|
||||
}, [codecNames]);
|
||||
}
|
||||
72
web/src/hooks/use-recording-playback-source.ts
Normal file
72
web/src/hooks/use-recording-playback-source.ts
Normal file
@ -0,0 +1,72 @@
|
||||
import { useApiHost } from "@/api";
|
||||
import useSWR from "swr";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import {
|
||||
Recording,
|
||||
RecordingPlaybackPreference,
|
||||
} from "@/types/record";
|
||||
import { useMemo } from "react";
|
||||
import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||
import { chooseRecordingPlayback } from "@/utils/recordingPlayback";
|
||||
|
||||
type RecordingPlaybackSourceOptions = {
|
||||
camera: string;
|
||||
after: number;
|
||||
before: number;
|
||||
vodPath: string;
|
||||
preference?: RecordingPlaybackPreference;
|
||||
enabled?: boolean;
|
||||
};
|
||||
|
||||
export default function useRecordingPlaybackSource({
|
||||
camera,
|
||||
after,
|
||||
before,
|
||||
vodPath,
|
||||
preference,
|
||||
enabled = true,
|
||||
}: RecordingPlaybackSourceOptions) {
|
||||
const apiHost = useApiHost();
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const [storedPreference] = useUserPersistence<RecordingPlaybackPreference>(
|
||||
`${camera}-recording-playback-v2`,
|
||||
"sub",
|
||||
);
|
||||
const { data: recordings } = useSWR<Recording[]>(
|
||||
enabled ? [`${camera}/recordings`, { after, before, variant: "all" }] : null,
|
||||
{ revalidateOnFocus: false },
|
||||
);
|
||||
|
||||
const codecNames = useMemo(
|
||||
() =>
|
||||
Array.from(
|
||||
new Set((recordings ?? []).map((recording) => recording.codec_name)),
|
||||
),
|
||||
[recordings],
|
||||
);
|
||||
const capabilities = usePlaybackCapabilities(codecNames);
|
||||
|
||||
return useMemo(() => {
|
||||
if (!recordings?.length) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return chooseRecordingPlayback({
|
||||
apiHost,
|
||||
config,
|
||||
recordings,
|
||||
preference: preference ?? storedPreference ?? "sub",
|
||||
vodPath,
|
||||
capabilities,
|
||||
}).url;
|
||||
}, [
|
||||
apiHost,
|
||||
capabilities,
|
||||
config,
|
||||
preference,
|
||||
recordings,
|
||||
storedPreference,
|
||||
vodPath,
|
||||
]);
|
||||
}
|
||||
@ -540,6 +540,11 @@ export interface FrigateConfig {
|
||||
logout_url?: string;
|
||||
};
|
||||
|
||||
transcode_proxy?: {
|
||||
enabled: boolean;
|
||||
vod_proxy_url: string;
|
||||
};
|
||||
|
||||
record: {
|
||||
enabled: boolean;
|
||||
enabled_in_config: boolean | null;
|
||||
|
||||
@ -7,12 +7,17 @@ export type Recording = {
|
||||
start_time: number;
|
||||
end_time: number;
|
||||
path: string;
|
||||
variant?: string;
|
||||
segment_size: number;
|
||||
duration: number;
|
||||
motion: number;
|
||||
objects: number;
|
||||
motion_heatmap?: Record<string, number> | null;
|
||||
dBFS: number;
|
||||
codec_name?: string | null;
|
||||
width?: number | null;
|
||||
height?: number | null;
|
||||
bitrate?: number | null;
|
||||
};
|
||||
|
||||
export type RecordingSegment = {
|
||||
@ -44,6 +49,12 @@ export type RecordingStartingPoint = {
|
||||
|
||||
export type RecordingPlayerError = "stalled" | "startup";
|
||||
|
||||
export type RecordingPlaybackPreference =
|
||||
| "auto"
|
||||
| "main"
|
||||
| "sub"
|
||||
| "transcoded";
|
||||
|
||||
export const ASPECT_VERTICAL_LAYOUT = 1.5;
|
||||
export const ASPECT_PORTRAIT_LAYOUT = 1.333;
|
||||
export const ASPECT_WIDE_LAYOUT = 2;
|
||||
|
||||
44
web/src/utils/liveStreamSelection.ts
Normal file
44
web/src/utils/liveStreamSelection.ts
Normal file
@ -0,0 +1,44 @@
|
||||
const LOW_BANDWIDTH_PATTERN = /\b(sub|low|mobile|small|sd|lowres|low-res)\b/i;
|
||||
const HIGH_BANDWIDTH_PATTERN = /\b(main|high|hd|full|primary)\b/i;
|
||||
|
||||
function rankStreamLabel(label: string, preferLowBandwidth: boolean): number {
|
||||
if (preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
|
||||
return 3;
|
||||
}
|
||||
|
||||
if (!preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
|
||||
return 3;
|
||||
}
|
||||
|
||||
if (preferLowBandwidth && HIGH_BANDWIDTH_PATTERN.test(label)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!preferLowBandwidth && LOW_BANDWIDTH_PATTERN.test(label)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 2;
|
||||
}
|
||||
|
||||
export function chooseAutoLiveStream(
|
||||
streams: Record<string, string>,
|
||||
estimatedBandwidthBps?: number,
|
||||
saveData = false,
|
||||
): string {
|
||||
const entries = Object.entries(streams || {});
|
||||
if (entries.length === 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const preferLowBandwidth =
|
||||
saveData || !!(estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000);
|
||||
|
||||
return [...entries]
|
||||
.sort(([leftLabel], [rightLabel]) => {
|
||||
return (
|
||||
rankStreamLabel(rightLabel, preferLowBandwidth) -
|
||||
rankStreamLabel(leftLabel, preferLowBandwidth)
|
||||
);
|
||||
})[0][1];
|
||||
}
|
||||
324
web/src/utils/recordingPlayback.ts
Normal file
324
web/src/utils/recordingPlayback.ts
Normal file
@ -0,0 +1,324 @@
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import {
|
||||
Recording,
|
||||
RecordingPlaybackPreference,
|
||||
} from "@/types/record";
|
||||
|
||||
export type PlaybackCapabilities = {
|
||||
estimatedBandwidthBps?: number;
|
||||
saveData: boolean;
|
||||
supports: Record<string, boolean>;
|
||||
};
|
||||
|
||||
export type RecordingPlaybackDecision = {
|
||||
mode: "direct" | "transcoded";
|
||||
variant: string;
|
||||
url: string;
|
||||
reason: string;
|
||||
};
|
||||
|
||||
type DecisionOptions = {
|
||||
apiHost: string;
|
||||
config?: FrigateConfig;
|
||||
recordings: Recording[];
|
||||
preference: RecordingPlaybackPreference;
|
||||
vodPath: string;
|
||||
capabilities: PlaybackCapabilities;
|
||||
};
|
||||
|
||||
const CODEC_SAMPLES: Record<string, string[]> = {
|
||||
h264: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
|
||||
avc1: ['video/mp4; codecs="avc1.42E01E"', 'video/mp4; codecs="avc1.64001F"'],
|
||||
hevc: [
|
||||
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||
],
|
||||
h265: [
|
||||
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||
],
|
||||
hev1: [
|
||||
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||
],
|
||||
hvc1: [
|
||||
'video/mp4; codecs="hev1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hvc1.1.6.L120.90"',
|
||||
'video/mp4; codecs="hev1.1.6.L93.B0"',
|
||||
'video/mp4; codecs="hvc1.1.6.L93.B0"',
|
||||
],
|
||||
av1: ['video/mp4; codecs="av01.0.05M.08"'],
|
||||
av01: ['video/mp4; codecs="av01.0.05M.08"'],
|
||||
vp9: ['video/mp4; codecs="vp09.00.10.08"'],
|
||||
vp09: ['video/mp4; codecs="vp09.00.10.08"'],
|
||||
};
|
||||
|
||||
function trimTrailingSlash(value: string): string {
|
||||
return value.replace(/\/$/, "");
|
||||
}
|
||||
|
||||
function appendQuery(url: string, params: Record<string, string | undefined>): string {
|
||||
const entries = Object.entries(params).filter(([, value]) => value);
|
||||
if (entries.length === 0) {
|
||||
return url;
|
||||
}
|
||||
|
||||
const search = new URLSearchParams(entries as [string, string][]);
|
||||
return `${url}${url.includes("?") ? "&" : "?"}${search.toString()}`;
|
||||
}
|
||||
|
||||
function average(values: number[]): number | undefined {
|
||||
if (!values.length) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return values.reduce((sum, value) => sum + value, 0) / values.length;
|
||||
}
|
||||
|
||||
export function normalizeCodecName(codecName?: string | null): string | undefined {
|
||||
return codecName?.toLowerCase().trim() || undefined;
|
||||
}
|
||||
|
||||
export function getCodecMimeTypes(codecName?: string | null): string[] {
|
||||
const normalized = normalizeCodecName(codecName);
|
||||
if (!normalized) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return CODEC_SAMPLES[normalized] ?? [];
|
||||
}
|
||||
|
||||
export function estimateRecordingBitrate(recordings: Recording[]): number | undefined {
|
||||
const explicit = recordings
|
||||
.map((recording) => recording.bitrate)
|
||||
.filter((value): value is number => typeof value === "number" && value > 0);
|
||||
|
||||
if (explicit.length > 0) {
|
||||
return average(explicit);
|
||||
}
|
||||
|
||||
const derived = recordings
|
||||
.map((recording) => {
|
||||
if (!recording.segment_size || !recording.duration) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return (recording.segment_size * 1024 * 1024 * 8) / recording.duration;
|
||||
})
|
||||
.filter((value): value is number => typeof value === "number" && value > 0);
|
||||
|
||||
return average(derived);
|
||||
}
|
||||
|
||||
export function groupRecordingsByVariant(
|
||||
recordings: Recording[],
|
||||
): Record<string, Recording[]> {
|
||||
return recordings.reduce<Record<string, Recording[]>>((acc, recording) => {
|
||||
const variant = recording.variant || "main";
|
||||
if (!acc[variant]) {
|
||||
acc[variant] = [];
|
||||
}
|
||||
acc[variant].push(recording);
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
function canDirectPlayVariant(
|
||||
capabilities: PlaybackCapabilities,
|
||||
recordings: Recording[],
|
||||
): boolean {
|
||||
const codecName = normalizeCodecName(recordings[0]?.codec_name);
|
||||
if (!codecName) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return capabilities.supports[codecName] === true;
|
||||
}
|
||||
|
||||
function getDirectBaseUrl(apiHost: string): string {
|
||||
return trimTrailingSlash(apiHost);
|
||||
}
|
||||
|
||||
function getTranscodeBaseUrl(apiHost: string, config?: FrigateConfig): string | undefined {
|
||||
if (!config?.transcode_proxy?.enabled) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (config.transcode_proxy.vod_proxy_url?.trim()) {
|
||||
return trimTrailingSlash(config.transcode_proxy.vod_proxy_url);
|
||||
}
|
||||
|
||||
return `${trimTrailingSlash(apiHost)}/vod-transcoded`;
|
||||
}
|
||||
|
||||
function getTranscodeProfile(estimatedBandwidthBps?: number, saveData = false) {
|
||||
if (saveData || (estimatedBandwidthBps && estimatedBandwidthBps <= 1_500_000)) {
|
||||
return { bitrate: "512k", maxWidth: "640", maxHeight: "360" };
|
||||
}
|
||||
|
||||
if (estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000) {
|
||||
return { bitrate: "1200k", maxWidth: "960", maxHeight: "540" };
|
||||
}
|
||||
|
||||
return { bitrate: "2500k", maxWidth: "1280", maxHeight: "720" };
|
||||
}
|
||||
|
||||
function buildDirectUrl(apiHost: string, vodPath: string, variant: string): string {
|
||||
const baseUrl = `${getDirectBaseUrl(apiHost)}${vodPath}`;
|
||||
return appendQuery(baseUrl, {
|
||||
variant: variant !== "main" ? variant : undefined,
|
||||
});
|
||||
}
|
||||
|
||||
function buildTranscodeUrl(
|
||||
apiHost: string,
|
||||
config: FrigateConfig | undefined,
|
||||
vodPath: string,
|
||||
variant: string,
|
||||
capabilities: PlaybackCapabilities,
|
||||
): string {
|
||||
const transcodeBase = getTranscodeBaseUrl(apiHost, config);
|
||||
if (!transcodeBase) {
|
||||
return buildDirectUrl(apiHost, vodPath, variant);
|
||||
}
|
||||
|
||||
const profile = getTranscodeProfile(
|
||||
capabilities.estimatedBandwidthBps,
|
||||
capabilities.saveData,
|
||||
);
|
||||
|
||||
return appendQuery(`${transcodeBase}${vodPath}`, {
|
||||
variant,
|
||||
bitrate: profile.bitrate,
|
||||
max_width: profile.maxWidth,
|
||||
max_height: profile.maxHeight,
|
||||
});
|
||||
}
|
||||
|
||||
export function chooseRecordingPlayback({
|
||||
apiHost,
|
||||
config,
|
||||
recordings,
|
||||
preference,
|
||||
vodPath,
|
||||
capabilities,
|
||||
}: DecisionOptions): RecordingPlaybackDecision {
|
||||
const recordingsByVariant = groupRecordingsByVariant(recordings);
|
||||
const mainRecordings = recordingsByVariant.main ?? [];
|
||||
const subRecordings = recordingsByVariant.sub ?? [];
|
||||
const transcodeAvailable = !!getTranscodeBaseUrl(apiHost, config);
|
||||
const estimatedBandwidthBps =
|
||||
capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000);
|
||||
|
||||
const candidates: Record<
|
||||
"main" | "sub",
|
||||
{ recordings: Recording[]; playable: boolean; bitrate?: number }
|
||||
> = {
|
||||
main: {
|
||||
recordings: mainRecordings,
|
||||
playable: canDirectPlayVariant(capabilities, mainRecordings),
|
||||
bitrate: estimateRecordingBitrate(mainRecordings),
|
||||
},
|
||||
sub: {
|
||||
recordings: subRecordings,
|
||||
playable: canDirectPlayVariant(capabilities, subRecordings),
|
||||
bitrate: estimateRecordingBitrate(subRecordings),
|
||||
},
|
||||
};
|
||||
|
||||
const preferDirect = (variant: "main" | "sub") => {
|
||||
const candidate = candidates[variant];
|
||||
return (
|
||||
candidate.recordings.length > 0 &&
|
||||
candidate.playable &&
|
||||
(!candidate.bitrate || candidate.bitrate <= estimatedBandwidthBps * 0.85)
|
||||
);
|
||||
};
|
||||
|
||||
if (preference === "main" && candidates.main.recordings.length > 0) {
|
||||
return {
|
||||
mode: "direct",
|
||||
variant: "main",
|
||||
url: buildDirectUrl(apiHost, vodPath, "main"),
|
||||
reason: "manual-main",
|
||||
};
|
||||
}
|
||||
|
||||
if (preference === "sub" && candidates.sub.recordings.length > 0) {
|
||||
if (candidates.sub.playable) {
|
||||
return {
|
||||
mode: "direct",
|
||||
variant: "sub",
|
||||
url: buildDirectUrl(apiHost, vodPath, "sub"),
|
||||
reason: "manual-sub",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
mode: "transcoded",
|
||||
variant: "sub",
|
||||
url: buildTranscodeUrl(apiHost, config, vodPath, "sub", capabilities),
|
||||
reason: "manual-sub-transcoded",
|
||||
};
|
||||
}
|
||||
|
||||
if (preference === "transcoded") {
|
||||
const targetVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
|
||||
if (!transcodeAvailable) {
|
||||
return {
|
||||
mode: "direct",
|
||||
variant: targetVariant,
|
||||
url: buildDirectUrl(apiHost, vodPath, targetVariant),
|
||||
reason: "manual-transcoded-unavailable",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
mode: "transcoded",
|
||||
variant: targetVariant,
|
||||
url: buildTranscodeUrl(apiHost, config, vodPath, targetVariant, capabilities),
|
||||
reason: "manual-transcoded",
|
||||
};
|
||||
}
|
||||
|
||||
if (preferDirect("main")) {
|
||||
return {
|
||||
mode: "direct",
|
||||
variant: "main",
|
||||
url: buildDirectUrl(apiHost, vodPath, "main"),
|
||||
reason: "raw-main",
|
||||
};
|
||||
}
|
||||
|
||||
if (preferDirect("sub")) {
|
||||
return {
|
||||
mode: "direct",
|
||||
variant: "sub",
|
||||
url: buildDirectUrl(apiHost, vodPath, "sub"),
|
||||
reason: "raw-sub",
|
||||
};
|
||||
}
|
||||
|
||||
const transcodeVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
|
||||
if (!transcodeAvailable) {
|
||||
return {
|
||||
mode: "direct",
|
||||
variant: transcodeVariant,
|
||||
url: buildDirectUrl(apiHost, vodPath, transcodeVariant),
|
||||
reason: "direct-fallback",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
mode: "transcoded",
|
||||
variant: transcodeVariant,
|
||||
url: buildTranscodeUrl(apiHost, config, vodPath, transcodeVariant, capabilities),
|
||||
reason: "transcode-fallback",
|
||||
};
|
||||
}
|
||||
@ -50,6 +50,8 @@ import { Toaster } from "@/components/ui/sonner";
|
||||
import LiveContextMenu from "@/components/menu/LiveContextMenu";
|
||||
import { useStreamingSettings } from "@/context/streaming-settings-provider";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||
import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
|
||||
|
||||
type DraggableGridLayoutProps = {
|
||||
cameras: CameraConfig[];
|
||||
@ -96,6 +98,7 @@ export default function DraggableGridLayout({
|
||||
streamMetadata,
|
||||
}: DraggableGridLayoutProps) {
|
||||
const { t } = useTranslation(["views/live"]);
|
||||
const playbackCapabilities = usePlaybackCapabilities([]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
|
||||
|
||||
@ -588,7 +591,11 @@ export default function DraggableGridLayout({
|
||||
grow = "aspect-video";
|
||||
}
|
||||
const availableStreams = camera.live.streams || {};
|
||||
const firstStreamEntry = Object.values(availableStreams)[0] || "";
|
||||
const firstStreamEntry = chooseAutoLiveStream(
|
||||
availableStreams,
|
||||
playbackCapabilities.estimatedBandwidthBps,
|
||||
playbackCapabilities.saveData,
|
||||
);
|
||||
|
||||
const streamNameFromSettings =
|
||||
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
|
||||
|
||||
@ -122,6 +122,8 @@ import {
|
||||
SnapshotResult,
|
||||
} from "@/utils/snapshotUtil";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||
import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
|
||||
|
||||
type LiveCameraViewProps = {
|
||||
config?: FrigateConfig;
|
||||
@ -144,13 +146,23 @@ export default function LiveCameraView({
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const [{ width: windowWidth, height: windowHeight }] =
|
||||
useResizeObserver(window);
|
||||
const playbackCapabilities = usePlaybackCapabilities([]);
|
||||
const autoStreamName = useMemo(
|
||||
() =>
|
||||
chooseAutoLiveStream(
|
||||
camera.live.streams,
|
||||
playbackCapabilities.estimatedBandwidthBps,
|
||||
playbackCapabilities.saveData,
|
||||
),
|
||||
[camera.live.streams, playbackCapabilities],
|
||||
);
|
||||
|
||||
// supported features
|
||||
|
||||
const [streamName, setStreamName, streamNameLoaded] =
|
||||
useUserPersistence<string>(
|
||||
`${camera.name}-stream`,
|
||||
Object.values(camera.live.streams)[0],
|
||||
autoStreamName || Object.values(camera.live.streams)[0],
|
||||
);
|
||||
|
||||
const isRestreamed = useMemo(
|
||||
|
||||
@ -55,6 +55,8 @@ import { EmptyCard } from "@/components/card/EmptyCard";
|
||||
import { BsFillCameraVideoOffFill } from "react-icons/bs";
|
||||
import { AuthContext } from "@/context/auth-context";
|
||||
import { useIsAdmin } from "@/hooks/use-is-admin";
|
||||
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
|
||||
import { chooseAutoLiveStream } from "@/utils/liveStreamSelection";
|
||||
|
||||
type LiveDashboardViewProps = {
|
||||
cameras: CameraConfig[];
|
||||
@ -190,6 +192,7 @@ export default function LiveDashboardView({
|
||||
}, [visibilityListener]);
|
||||
|
||||
const [visibleCameras, setVisibleCameras] = useState<string[]>([]);
|
||||
const playbackCapabilities = usePlaybackCapabilities([]);
|
||||
const visibleCameraObserver = useRef<IntersectionObserver | null>(null);
|
||||
useEffect(() => {
|
||||
const visibleCameras = new Set<string>();
|
||||
@ -260,12 +263,16 @@ export default function LiveDashboardView({
|
||||
|
||||
const streamName = streamExists
|
||||
? streamNameFromSettings
|
||||
: Object.values(availableStreams)[0] || "";
|
||||
: chooseAutoLiveStream(
|
||||
availableStreams,
|
||||
playbackCapabilities.estimatedBandwidthBps,
|
||||
playbackCapabilities.saveData,
|
||||
);
|
||||
|
||||
streams[camera.name] = streamName;
|
||||
});
|
||||
return streams;
|
||||
}, [cameras, currentGroupStreamingSettings]);
|
||||
}, [cameras, currentGroupStreamingSettings, playbackCapabilities]);
|
||||
|
||||
const {
|
||||
preferredLiveModes,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user