Added substream support, dynamic substream creation, and playback methods for

This change adds first-class adaptive recording playback using main and sub recording variants. Frigate can now store multiple recording variants per camera, expose those variants through the recordings API, and serve variant-specific VOD playlists through routes such as /vod/variant/sub/....

The UI now uses the available recording variants and browser playback capability to choose an appropriate playback source, with a user-selectable Auto, Main, and Sub preference. This is applied across timeline playback, export preview, and object detail playback.

The backend also includes a fallback path for sub playback: when a native sub recording is not available for a requested time range, Frigate can generate a lower-resolution sub recording from the main segment, store it under the standard sub variant, and mark it with transcoded_from_main.

Additional changes include recording metadata for codec, resolution, bitrate, and variant; database migrations for recording variants and generated-sub tracking; tests for variant VOD selection and fallback behavior; improved storage graph sorting; and a small MQTT TLS guard so tls_insecure is only applied when TLS is configured.

Substream Configuration Examples
Record the main stream as the normal full-resolution recording and also record the camera substream as the sub variant:

cameras:
  front_door:
    ffmpeg:
      inputs:
        - path: rtsp://user:password@192.168.1.10:554/main
          roles:
            - record
          record_variant: main
        - path: rtsp://user:password@192.168.1.10:554/sub
          roles:
            - detect
            - record
          record_variant: sub
    detect:
      width: 640
      height: 360
      fps: 5
    record:
      enabled: true
Using go2rtc restreams:

go2rtc:
  streams:
    front_door:
      - rtsp://user:password@192.168.1.10:554/main
    front_door_sub:
      - rtsp://user:password@192.168.1.10:554/sub
cameras:
  front_door:
    ffmpeg:
      inputs:
        - path: rtsp://127.0.0.1:8554/front_door
          input_args: preset-rtsp-restream
          roles:
            - record
          record_variant: main
        - path: rtsp://127.0.0.1:8554/front_door_sub
          input_args: preset-rtsp-restream
          roles:
            - detect
            - record
          record_variant: sub
    detect:
      width: 640
      height: 360
      fps: 5
    record:
      enabled: true
If record_variant is omitted on a record input, it defaults to main. Each camera can only use a given recording variant once, so the main and sub recording inputs should use distinct variant names.
This commit is contained in:
3ricj 2026-04-29 19:05:59 -07:00
parent 5560af611a
commit e7684eddbf
23 changed files with 1421 additions and 454 deletions

2
.gitignore vendored
View File

@ -16,6 +16,8 @@ models
frigate/version.py frigate/version.py
web/build web/build
web/node_modules web/node_modules
node_modules
**/.vite
web/coverage web/coverage
web/.env web/.env
core core

View File

@ -44,6 +44,7 @@ from frigate.const import (
) )
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.output.preview import get_most_recent_preview_frame from frigate.output.preview import get_most_recent_preview_frame
from frigate.record.subvariant import ensure_subvariant_for_recording
from frigate.track.object_processing import TrackedObjectProcessor from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.file import get_event_thumbnail_bytes from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording from frigate.util.image import get_image_from_recording
@ -53,6 +54,73 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.media]) router = APIRouter(tags=[Tags.media])
def _resolve_vod_variant(path_variant: str | None, variant: str) -> str:
return path_variant or variant
async def _resolve_sub_vod_recordings(
config: FrigateConfig, recordings_query
) -> list[Recordings]:
native_sub_recordings = list(
recordings_query.where(Recordings.variant == "sub")
.order_by(Recordings.start_time.asc())
.iterator()
)
main_recordings = list(
recordings_query.where(Recordings.variant == "main")
.order_by(Recordings.start_time.asc())
.iterator()
)
if not main_recordings:
return native_sub_recordings
def overlaps(left: Recordings, right: Recordings) -> bool:
return left.start_time < right.end_time and left.end_time > right.start_time
main_windows = {(recording.start_time, recording.end_time) for recording in main_recordings}
filtered_native_sub_recordings = []
for recording in native_sub_recordings:
has_exact_main_window = (recording.start_time, recording.end_time) in main_windows
has_overlapping_sub_neighbor = any(
other.path != recording.path and overlaps(recording, other)
for other in native_sub_recordings
)
# If a sub row exactly mirrors a main segment while another overlapping
# sub row already exists, prefer the native sub timeline and ignore the
# exact-match segment that was likely synthesized from main.
if has_exact_main_window and has_overlapping_sub_neighbor:
continue
filtered_native_sub_recordings.append(recording)
resolved_recordings = list(filtered_native_sub_recordings)
for main_recording in main_recordings:
if any(
overlaps(main_recording, sub_recording)
for sub_recording in filtered_native_sub_recordings
):
continue
recording = await ensure_subvariant_for_recording(config, main_recording)
if recording is not None:
resolved_recordings.append(recording)
deduped_recordings = {}
for recording in resolved_recordings:
deduped_recordings[(recording.path, recording.start_time, recording.end_time)] = (
recording
)
return sorted(
deduped_recordings.values(),
key=lambda recording: (recording.start_time, recording.end_time, recording.path),
)
@router.get("/{camera_name}", dependencies=[Depends(require_camera_access)]) @router.get("/{camera_name}", dependencies=[Depends(require_camera_access)])
async def mjpeg_feed( async def mjpeg_feed(
request: Request, request: Request,
@ -526,40 +594,47 @@ async def recording_clip(
) )
@router.get(
"/vod/variant/{path_variant}/{camera_name}/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
@router.get( @router.get(
"/vod/{camera_name}/start/{start_ts}/end/{end_ts}", "/vod/{camera_name}/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(require_camera_access)], dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
async def vod_ts( async def vod_ts(
request: Request,
camera_name: str, camera_name: str,
start_ts: float, start_ts: float,
end_ts: float, end_ts: float,
force_discontinuity: bool = False, force_discontinuity: bool = False,
variant: str = "main", path_variant: str | None = None,
variant: str = Query("main", description="Recording variant to use for playback."),
): ):
selected_variant = _resolve_vod_variant(path_variant, variant)
logger.debug( logger.debug(
"VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s variant=%s", "VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s variant=%s",
camera_name, camera_name,
start_ts, start_ts,
end_ts, end_ts,
force_discontinuity, force_discontinuity,
variant, selected_variant,
) )
recordings = ( recordings_query = Recordings.select().where(
Recordings.select(
Recordings.path,
Recordings.duration,
Recordings.end_time,
Recordings.start_time,
)
.where(
Recordings.start_time.between(start_ts, end_ts) Recordings.start_time.between(start_ts, end_ts)
| Recordings.end_time.between(start_ts, end_ts) | Recordings.end_time.between(start_ts, end_ts)
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time)) | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
).where(Recordings.camera == camera_name)
if selected_variant == "sub":
recordings = await _resolve_sub_vod_recordings(
request.app.frigate_config, recordings_query
) )
.where(Recordings.camera == camera_name) else:
.where(Recordings.variant == variant) recordings = (
recordings_query.where(Recordings.variant == selected_variant)
.order_by(Recordings.start_time.asc()) .order_by(Recordings.start_time.asc())
.iterator() .iterator()
) )
@ -571,14 +646,6 @@ async def vod_ts(
recording: Recordings recording: Recordings
for recording in recordings: for recording in recordings:
logger.debug(
"VOD: processing recording: %s start=%s end=%s duration=%s",
recording.path,
recording.start_time,
recording.end_time,
recording.duration,
)
clip = {"type": "source", "path": recording.path} clip = {"type": "source", "path": recording.path}
duration = int(recording.duration * 1000) duration = int(recording.duration * 1000)
@ -587,11 +654,6 @@ async def vod_ts(
inpoint = int((start_ts - recording.start_time) * 1000) inpoint = int((start_ts - recording.start_time) * 1000)
clip["clipFrom"] = inpoint clip["clipFrom"] = inpoint
duration -= inpoint duration -= inpoint
logger.debug(
"VOD: applied clipFrom %sms to %s",
inpoint,
recording.path,
)
# adjust end if recording.end_time is after end_ts # adjust end if recording.end_time is after end_ts
if recording.end_time > end_ts: if recording.end_time > end_ts:
@ -599,23 +661,12 @@ async def vod_ts(
if duration < min_duration_ms: if duration < min_duration_ms:
# skip if the clip has no valid duration (too short to contain frames) # skip if the clip has no valid duration (too short to contain frames)
logger.debug(
"VOD: skipping recording %s - resulting duration %sms too short",
recording.path,
duration,
)
continue continue
if min_duration_ms <= duration < max_duration_ms: if min_duration_ms <= duration < max_duration_ms:
clip["keyFrameDurations"] = [duration] clip["keyFrameDurations"] = [duration]
clips.append(clip) clips.append(clip)
durations.append(duration) durations.append(duration)
logger.debug(
"VOD: added clip %s duration_ms=%s clipFrom=%s",
recording.path,
duration,
clip.get("clipFrom"),
)
else: else:
logger.warning(f"Recording clip is missing or empty: {recording.path}") logger.warning(f"Recording clip is missing or empty: {recording.path}")
@ -644,37 +695,57 @@ async def vod_ts(
) )
@router.get(
"/vod/variant/{path_variant}/{year_month}/{day}/{hour}/{camera_name}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
@router.get( @router.get(
"/vod/{year_month}/{day}/{hour}/{camera_name}", "/vod/{year_month}/{day}/{hour}/{camera_name}",
dependencies=[Depends(require_camera_access)], dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
async def vod_hour_no_timezone( async def vod_hour_no_timezone(
year_month: str, day: int, hour: int, camera_name: str, variant: str = "main" request: Request,
year_month: str,
day: int,
hour: int,
camera_name: str,
path_variant: str | None = None,
variant: str = Query("main", description="Recording variant to use for playback."),
): ):
"""VOD for specific hour. Uses the default timezone (UTC).""" """VOD for specific hour. Uses the default timezone (UTC)."""
return await vod_hour( return await vod_hour(
request,
year_month, year_month,
day, day,
hour, hour,
camera_name, camera_name,
get_localzone_name().replace("/", ","), get_localzone_name().replace("/", ","),
path_variant,
variant, variant,
) )
@router.get(
"/vod/variant/{path_variant}/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
@router.get( @router.get(
"/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}", "/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
dependencies=[Depends(require_camera_access)], dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
async def vod_hour( async def vod_hour(
request: Request,
year_month: str, year_month: str,
day: int, day: int,
hour: int, hour: int,
camera_name: str, camera_name: str,
tz_name: str, tz_name: str,
variant: str = "main", path_variant: str | None = None,
variant: str = Query("main", description="Recording variant to use for playback."),
): ):
parts = year_month.split("-") parts = year_month.split("-")
start_date = ( start_date = (
@ -685,9 +756,21 @@ async def vod_hour(
start_ts = start_date.timestamp() start_ts = start_date.timestamp()
end_ts = end_date.timestamp() end_ts = end_date.timestamp()
return await vod_ts(camera_name, start_ts, end_ts, variant=variant) return await vod_ts(
request,
camera_name,
start_ts,
end_ts,
path_variant=path_variant,
variant=variant,
)
@router.get(
"/vod/variant/{path_variant}/event/{event_id}",
dependencies=[Depends(allow_any_authenticated())],
description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
@router.get( @router.get(
"/vod/event/{event_id}", "/vod/event/{event_id}",
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
@ -697,6 +780,7 @@ async def vod_event(
request: Request, request: Request,
event_id: str, event_id: str,
padding: int = Query(0, description="Padding to apply to the vod."), padding: int = Query(0, description="Padding to apply to the vod."),
path_variant: str | None = None,
variant: str = Query("main", description="Recording variant to use for playback."), variant: str = Query("main", description="Recording variant to use for playback."),
): ):
try: try:
@ -719,7 +803,12 @@ async def vod_event(
else (event.end_time + padding) else (event.end_time + padding)
) )
vod_response = await vod_ts( vod_response = await vod_ts(
event.camera, event.start_time - padding, end_ts, variant=variant request,
event.camera,
event.start_time - padding,
end_ts,
path_variant=path_variant,
variant=variant,
) )
# If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false
@ -734,19 +823,32 @@ async def vod_event(
return vod_response return vod_response
@router.get(
"/vod/variant/{path_variant}/clip/{camera_name}/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
@router.get( @router.get(
"/vod/clip/{camera_name}/start/{start_ts}/end/{end_ts}", "/vod/clip/{camera_name}/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(require_camera_access)], dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.", description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.",
) )
async def vod_clip( async def vod_clip(
request: Request,
camera_name: str, camera_name: str,
start_ts: float, start_ts: float,
end_ts: float, end_ts: float,
path_variant: str | None = None,
variant: str = Query("main", description="Recording variant to use for playback."), variant: str = Query("main", description="Recording variant to use for playback."),
): ):
return await vod_ts( return await vod_ts(
camera_name, start_ts, end_ts, force_discontinuity=True, variant=variant request,
camera_name,
start_ts,
end_ts,
force_discontinuity=True,
path_variant=path_variant,
variant=variant,
) )

View File

@ -240,6 +240,7 @@ async def recordings(
Recordings.end_time, Recordings.end_time,
Recordings.path, Recordings.path,
Recordings.variant, Recordings.variant,
Recordings.transcoded_from_main,
Recordings.segment_size, Recordings.segment_size,
Recordings.motion, Recordings.motion,
Recordings.objects, Recordings.objects,

View File

@ -297,7 +297,9 @@ class MqttClient(Communicator):
f"{self.mqtt_config.topic_prefix}/restart", self.on_mqtt_command f"{self.mqtt_config.topic_prefix}/restart", self.on_mqtt_command
) )
if self.mqtt_config.tls_ca_certs is not None: tls_configured = self.mqtt_config.tls_ca_certs is not None
if tls_configured:
if ( if (
self.mqtt_config.tls_client_cert is not None self.mqtt_config.tls_client_cert is not None
and self.mqtt_config.tls_client_key is not None and self.mqtt_config.tls_client_key is not None
@ -309,7 +311,7 @@ class MqttClient(Communicator):
) )
else: else:
self.client.tls_set(self.mqtt_config.tls_ca_certs) self.client.tls_set(self.mqtt_config.tls_ca_certs)
if self.mqtt_config.tls_insecure is not None: if self.mqtt_config.tls_insecure is not None and tls_configured:
self.client.tls_insecure_set(self.mqtt_config.tls_insecure) self.client.tls_insecure_set(self.mqtt_config.tls_insecure)
if self.mqtt_config.user is not None: if self.mqtt_config.user is not None:
self.client.username_pw_set( self.client.username_pw_set(

View File

@ -71,6 +71,7 @@ class Recordings(Model):
camera = CharField(index=True, max_length=20) camera = CharField(index=True, max_length=20)
path = CharField(unique=True) path = CharField(unique=True)
variant = CharField(default="main", index=True, max_length=20) variant = CharField(default="main", index=True, max_length=20)
transcoded_from_main = BooleanField(default=False)
start_time = DateTimeField() start_time = DateTimeField()
end_time = DateTimeField() end_time = DateTimeField()
duration = FloatField() duration = FloatField()

View File

@ -670,6 +670,7 @@ class RecordingMaintainer(threading.Thread):
Recordings.camera.name: camera, Recordings.camera.name: camera,
Recordings.path.name: file_path, Recordings.path.name: file_path,
Recordings.variant.name: variant, Recordings.variant.name: variant,
Recordings.transcoded_from_main.name: False,
Recordings.start_time.name: start_time.timestamp(), Recordings.start_time.name: start_time.timestamp(),
Recordings.end_time.name: end_time.timestamp(), Recordings.end_time.name: end_time.timestamp(),
Recordings.duration.name: duration, Recordings.duration.name: duration,

View File

@ -0,0 +1,303 @@
import asyncio
import logging
import os
from typing import Optional
from peewee import DoesNotExist
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, FFMPEG_HWACCEL_NVIDIA
from frigate.models import Recordings
from frigate.util.services import get_video_properties, auto_detect_hwaccel
logger = logging.getLogger(__name__)
_subvariant_locks: dict[str, asyncio.Lock] = {}
SUB_VARIANT = "sub"
def _get_lock(key: str) -> asyncio.Lock:
lock = _subvariant_locks.get(key)
if lock is None:
lock = asyncio.Lock()
_subvariant_locks[key] = lock
return lock
def _sub_path_for_main_path(main_path: str) -> str:
# main: /media/frigate/recordings/YYYY-MM-DD/HH/camera/main/MM.SS.mp4
# generated sub fallback: /media/frigate/recordings/YYYY-MM-DD/HH/camera/sub/MM.SS.mp4
parts = main_path.split(os.sep)
try:
idx = parts.index("main")
except ValueError:
# Fallback: just mirror under /sub/ next to main file
directory, filename = os.path.split(main_path)
return os.path.join(directory + "_sub", filename)
parts[idx] = SUB_VARIANT
return os.sep.join(parts)
def _camera_name_for_recording(main_recording: Recordings) -> Optional[str]:
if main_recording.camera:
return main_recording.camera
parts = main_recording.path.split(os.sep)
try:
idx = parts.index("main")
except ValueError:
return None
if idx > 0:
return parts[idx - 1]
return None
def _codec_matches_family(codec_name: Optional[str], desired_family: str) -> bool:
normalized = _normalize_codec_family(codec_name)
return bool(normalized and normalized == desired_family)
def _normalize_codec_family(codec_name: Optional[str]) -> Optional[str]:
if not codec_name:
return None
normalized = codec_name.lower().strip()
if normalized in ("h264", "avc1"):
return "h264"
if normalized in ("h265", "hevc", "hev1", "hvc1"):
return "hevc"
return normalized
async def _existing_subvariant_matches(
config: FrigateConfig, path: str, desired_family: str, codec_name: Optional[str]
) -> bool:
if not os.path.exists(path):
return False
if _codec_matches_family(codec_name, desired_family):
actual_codec = codec_name
else:
media_info = await get_video_properties(config.ffmpeg, path)
actual_codec = media_info.get("codec_name")
return _codec_matches_family(actual_codec, desired_family)
def _select_hw_profile(config: FrigateConfig, desired_codec_family: str) -> list[str]:
"""Return ffmpeg args that generate a standard `sub` fallback recording."""
# Target bitrate: ~35% of original when known, otherwise a safe default.
target_bitrate = "350k"
# Try to detect decode hwaccel that implies GPU type.
detected = auto_detect_hwaccel()
if desired_codec_family == "hevc":
if detected == FFMPEG_HWACCEL_NVIDIA:
return [
"-c:v",
"hevc_nvenc",
"-b:v",
target_bitrate,
"-maxrate",
target_bitrate,
"-bufsize",
"700k",
]
return [
"-c:v",
"libx265",
"-preset",
"ultrafast",
"-x265-params",
"log-level=error",
"-b:v",
target_bitrate,
"-maxrate",
target_bitrate,
"-bufsize",
"700k",
]
if detected == FFMPEG_HWACCEL_NVIDIA:
return [
"-c:v",
"h264_nvenc",
"-b:v",
target_bitrate,
"-maxrate",
target_bitrate,
"-bufsize",
"700k",
]
return [
"-c:v",
"libx264",
"-preset:v",
"ultrafast",
"-tune:v",
"zerolatency",
"-b:v",
target_bitrate,
"-maxrate",
target_bitrate,
"-bufsize",
"700k",
]
async def ensure_subvariant_for_recording(
config: FrigateConfig,
main_recording: Recordings,
target_codec_family: Optional[str] = None,
) -> Optional[Recordings]:
"""Ensure a standard `sub` file and Recordings row exist for a main recording.
Returns the `sub` Recordings row or None on failure.
"""
if main_recording.variant == SUB_VARIANT and os.path.exists(main_recording.path):
return main_recording
camera_name = _camera_name_for_recording(main_recording)
if not camera_name:
logger.error("Unable to determine camera for recording %s", main_recording.path)
return None
desired_codec_family = (
target_codec_family
or _normalize_codec_family(main_recording.codec_name)
or "h264"
)
sub_path = _sub_path_for_main_path(main_recording.path)
# If a DB row already exists and the file is present, return it immediately.
try:
existing = Recordings.get(
(Recordings.camera == camera_name)
& (Recordings.variant == SUB_VARIANT)
& (Recordings.start_time == main_recording.start_time)
)
if await _existing_subvariant_matches(
config, existing.path, desired_codec_family, existing.codec_name
):
return existing
except DoesNotExist:
existing = None
lock_key = f"{camera_name}:{main_recording.start_time}:sub"
lock = _get_lock(lock_key)
async with lock:
# Double-check inside the lock.
try:
existing = Recordings.get(
(Recordings.camera == camera_name)
& (Recordings.variant == SUB_VARIANT)
& (Recordings.start_time == main_recording.start_time)
)
if await _existing_subvariant_matches(
config, existing.path, desired_codec_family, existing.codec_name
):
return existing
except DoesNotExist:
existing = None
if existing and existing.path:
sub_path = existing.path
# Ensure directory exists.
sub_dir = os.path.dirname(sub_path)
os.makedirs(sub_dir, exist_ok=True)
# Decide encoder profile.
extra_args = _select_hw_profile(config, desired_codec_family)
ffmpeg_bin = config.ffmpeg.ffmpeg_path
cmd = [
ffmpeg_bin,
"-hide_banner",
"-y",
"-i",
main_recording.path,
"-vf",
"scale='min(640,iw)':'min(360,ih)':force_original_aspect_ratio=decrease",
] + extra_args + [
"-an",
sub_path,
]
logger.info(
"Generating sub fallback for %s at %s -> %s",
camera_name,
main_recording.path,
sub_path,
)
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.PIPE,
)
_, stderr = await proc.communicate()
if proc.returncode != 0:
logger.error(
"Sub fallback generation failed for %s: %s",
main_recording.path,
stderr.decode(errors="ignore"),
)
return None
# Probe the new file for metadata and size.
media_info = await get_video_properties(config.ffmpeg, sub_path, get_duration=True)
try:
segment_size_mb = round(
float(os.path.getsize(sub_path)) / (1024 * 1024), 2
)
except OSError:
segment_size_mb = 0.0
record_id = (
existing.id
if existing is not None
else f"{camera_name}-{main_recording.start_time}-{SUB_VARIANT}"
)
# Upsert a Recordings row for the standard sub fallback.
data = {
Recordings.id.name: record_id,
Recordings.camera.name: camera_name,
Recordings.path.name: sub_path,
Recordings.variant.name: SUB_VARIANT,
Recordings.transcoded_from_main.name: True,
Recordings.start_time.name: main_recording.start_time,
Recordings.end_time.name: main_recording.end_time,
Recordings.duration.name: main_recording.duration,
Recordings.motion.name: main_recording.motion,
Recordings.objects.name: main_recording.objects,
Recordings.regions.name: main_recording.regions,
Recordings.dBFS.name: main_recording.dBFS,
Recordings.segment_size.name: segment_size_mb,
Recordings.codec_name.name: media_info.get("codec_name"),
Recordings.width.name: media_info.get("width"),
Recordings.height.name: media_info.get("height"),
Recordings.bitrate.name: (
int((segment_size_mb * (1024 ** 2) * 8) / main_recording.duration)
if main_recording.duration and segment_size_mb > 0
else None
),
Recordings.motion_heatmap.name: main_recording.motion_heatmap,
}
Recordings.insert(data).on_conflict_replace().execute()
return Recordings.get(Recordings.id == data[Recordings.id.name])

View File

@ -1,6 +1,7 @@
"""Unit tests for recordings/media API endpoints.""" """Unit tests for recordings/media API endpoints."""
from datetime import datetime, timezone from datetime import datetime, timezone
from unittest.mock import AsyncMock, patch
import pytz import pytz
from fastapi import Request from fastapi import Request
@ -88,14 +89,309 @@ class TestHttpMedia(BaseTestHttp):
default_recordings = default_response.json() default_recordings = default_response.json()
assert len(default_recordings) == 1 assert len(default_recordings) == 1
assert default_recordings[0]["variant"] == "main" assert default_recordings[0]["variant"] == "main"
assert default_recordings[0]["transcoded_from_main"] is False
all_response = client.get( all_response = client.get(
"/front_door/recordings", "/front_door/recordings",
params={"after": start_ts, "before": end_ts, "variant": "all"}, params={"after": start_ts, "before": end_ts, "variant": "all"},
) )
assert all_response.status_code == 200 assert all_response.status_code == 200
variants = {recording["variant"] for recording in all_response.json()} all_recordings = all_response.json()
variants = {recording["variant"] for recording in all_recordings}
assert variants == {"main", "sub"} assert variants == {"main", "sub"}
assert all(recording["transcoded_from_main"] is False for recording in all_recordings)
def test_camera_recordings_exposes_transcoded_from_main(self):
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
end_ts = start_ts + 10
with AuthTestClient(self.app) as client:
Recordings.insert(
id="generated_sub_recording",
path="/media/recordings/front/generated-sub.mp4",
camera="front_door",
variant="sub",
transcoded_from_main=True,
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
codec_name="hevc",
width=640,
height=360,
).execute()
response = client.get(
"/front_door/recordings",
params={"after": start_ts, "before": end_ts, "variant": "all"},
)
assert response.status_code == 200
recordings = response.json()
assert len(recordings) == 1
assert recordings[0]["variant"] == "sub"
assert recordings[0]["transcoded_from_main"] is True
def test_vod_variant_path_uses_requested_variant(self):
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
end_ts = start_ts + 10
with AuthTestClient(self.app) as client:
Recordings.insert(
id="vod_recording_main",
path="/media/recordings/front_door/main.mp4",
camera="front_door",
variant="main",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
).execute()
Recordings.insert(
id="vod_recording_sub",
path="/media/recordings/front_door/sub.mp4",
camera="front_door",
variant="sub",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
).execute()
response = client.get(
f"/vod/variant/sub/front_door/start/{start_ts}/end/{end_ts}"
)
assert response.status_code == 200
clips = response.json()["sequences"][0]["clips"]
assert [clip["path"] for clip in clips] == [
"/media/recordings/front_door/sub.mp4"
]
def test_vod_variant_path_uses_overlapping_native_sub_without_generation(self):
main_start_ts = datetime(
2024, 3, 9, 12, 0, 9, tzinfo=timezone.utc
).timestamp()
main_end_ts = main_start_ts + 9
native_sub_start_ts = main_start_ts - 1
native_sub_end_ts = main_end_ts - 1
with AuthTestClient(self.app) as client:
Recordings.insert(
id="vod_recording_main_offset",
path="/media/recordings/front_door/main-offset.mp4",
camera="front_door",
variant="main",
start_time=main_start_ts,
end_time=main_end_ts,
duration=9,
motion=100,
objects=5,
codec_name="hevc",
width=1920,
height=1080,
).execute()
Recordings.insert(
id="vod_recording_sub_offset",
path="/media/recordings/front_door/sub-offset.mp4",
camera="front_door",
variant="sub",
start_time=native_sub_start_ts,
end_time=native_sub_end_ts,
duration=9,
motion=100,
objects=5,
codec_name="hevc",
width=640,
height=480,
).execute()
with patch(
"frigate.api.media.ensure_subvariant_for_recording",
new=AsyncMock(),
) as ensure_subvariant:
response = client.get(
f"/vod/variant/sub/front_door/start/{main_start_ts}/end/{main_end_ts}"
)
assert response.status_code == 200
clips = response.json()["sequences"][0]["clips"]
assert [clip["path"] for clip in clips] == [
"/media/recordings/front_door/sub-offset.mp4"
]
ensure_subvariant.assert_not_awaited()
def test_vod_variant_path_generates_standard_sub_when_missing(self):
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
end_ts = start_ts + 10
generated_sub = Recordings(
id="generated_standard_sub",
path="/media/recordings/front_door/sub.mp4",
camera="front_door",
variant="sub",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
codec_name="h264",
)
with AuthTestClient(self.app) as client:
Recordings.insert(
id="vod_recording_main_missing_sub",
path="/media/recordings/front_door/main.mp4",
camera="front_door",
variant="main",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
codec_name="h264",
).execute()
with patch(
"frigate.api.media.ensure_subvariant_for_recording",
new=AsyncMock(return_value=generated_sub),
) as ensure_subvariant:
response = client.get(
f"/vod/variant/sub/front_door/start/{start_ts}/end/{end_ts}"
)
assert response.status_code == 200
clips = response.json()["sequences"][0]["clips"]
assert [clip["path"] for clip in clips] == [
"/media/recordings/front_door/sub.mp4"
]
ensure_subvariant.assert_awaited_once()
def test_vod_variant_path_filters_exact_match_generated_sub_when_native_overlap_exists(self):
main_start_ts = datetime(
2024, 3, 9, 12, 0, 9, tzinfo=timezone.utc
).timestamp()
main_end_ts = main_start_ts + 9
native_sub_start_ts = main_start_ts - 1
native_sub_end_ts = main_end_ts - 1
with AuthTestClient(self.app) as client:
Recordings.insert(
id="vod_recording_main_generated_conflict",
path="/media/recordings/front_door/main-generated-conflict.mp4",
camera="front_door",
variant="main",
start_time=main_start_ts,
end_time=main_end_ts,
duration=9,
motion=100,
objects=5,
codec_name="hevc",
width=1920,
height=1080,
).execute()
Recordings.insert(
id="vod_recording_sub_native_overlap",
path="/media/recordings/front_door/sub-native-overlap.mp4",
camera="front_door",
variant="sub",
start_time=native_sub_start_ts,
end_time=native_sub_end_ts,
duration=9,
motion=100,
objects=5,
codec_name="hevc",
width=640,
height=480,
).execute()
Recordings.insert(
id="vod_recording_sub_generated_like",
path="/media/recordings/front_door/sub-generated-like.mp4",
camera="front_door",
variant="sub",
start_time=main_start_ts,
end_time=main_end_ts,
duration=9,
motion=100,
objects=5,
codec_name="hevc",
width=640,
height=360,
).execute()
with patch(
"frigate.api.media.ensure_subvariant_for_recording",
new=AsyncMock(),
) as ensure_subvariant:
response = client.get(
f"/vod/variant/sub/front_door/start/{main_start_ts}/end/{main_end_ts}"
)
assert response.status_code == 200
clips = response.json()["sequences"][0]["clips"]
assert [clip["path"] for clip in clips] == [
"/media/recordings/front_door/sub-native-overlap.mp4"
]
ensure_subvariant.assert_not_awaited()
def test_vod_variant_path_ignores_legacy_sub_h264_rows(self):
start_ts = datetime(2024, 3, 9, 12, 0, 0, tzinfo=timezone.utc).timestamp()
end_ts = start_ts + 10
generated_sub = Recordings(
id="standard_sub_fallback",
path="/media/recordings/front_door/sub.mp4",
camera="front_door",
variant="sub",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
codec_name="h264",
)
with AuthTestClient(self.app) as client:
Recordings.insert(
id="vod_recording_main_with_legacy",
path="/media/recordings/front_door/main.mp4",
camera="front_door",
variant="main",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
codec_name="h264",
).execute()
Recordings.insert(
id="legacy_sub_h264_row",
path="/media/recordings/front_door/sub_h264.mp4",
camera="front_door",
variant="sub_h264",
start_time=start_ts,
end_time=end_ts,
duration=10,
motion=100,
objects=5,
codec_name="h264",
).execute()
with patch(
"frigate.api.media.ensure_subvariant_for_recording",
new=AsyncMock(return_value=generated_sub),
) as ensure_subvariant:
response = client.get(
f"/vod/variant/sub/front_door/start/{start_ts}/end/{end_ts}"
)
assert response.status_code == 200
clips = response.json()["sequences"][0]["clips"]
assert [clip["path"] for clip in clips] == [
"/media/recordings/front_door/sub.mp4"
]
ensure_subvariant.assert_awaited_once()
def test_recordings_summary_across_dst_spring_forward(self): def test_recordings_summary_across_dst_spring_forward(self):
""" """

View File

@ -0,0 +1,29 @@
"""Peewee migrations -- 037_add_recordings_transcoded_from_main.py."""
from peewee import OperationalError
def migrate(migrator, database, fake=False, **kwargs):
try:
database.execute_sql(
"""
ALTER TABLE "recordings"
ADD COLUMN "transcoded_from_main" INTEGER NOT NULL DEFAULT 0
"""
)
except OperationalError as exc:
if "duplicate column name" not in str(exc).lower():
raise
database.execute_sql(
"""
UPDATE recordings
SET transcoded_from_main = 1
WHERE variant = 'sub_h264'
OR (variant = 'sub' AND id LIKE '%-sub')
"""
)
def rollback(migrator, database, fake=False, **kwargs):
pass

View File

@ -2,6 +2,11 @@
"noRecordingsFoundForThisTime": "No recordings found for this time", "noRecordingsFoundForThisTime": "No recordings found for this time",
"noPreviewFound": "No Preview Found", "noPreviewFound": "No Preview Found",
"noPreviewFoundFor": "No Preview Found for {{cameraName}}", "noPreviewFoundFor": "No Preview Found for {{cameraName}}",
"playbackPreference": {
"auto": "Auto",
"main": "Main",
"sub": "Sub"
},
"submitFrigatePlus": { "submitFrigatePlus": {
"title": "Submit this frame to Frigate+?", "title": "Submit this frame to Frigate+?",
"submit": "Submit" "submit": "Submit"

View File

@ -148,6 +148,11 @@
"storageUsed": "Storage", "storageUsed": "Storage",
"percentageOfTotalUsed": "Percentage of Total", "percentageOfTotalUsed": "Percentage of Total",
"bandwidth": "Bandwidth", "bandwidth": "Bandwidth",
"sort": {
"camera": "Sort by camera",
"storage": "Sort by storage",
"bandwidth": "Sort by bandwidth"
},
"unused": { "unused": {
"title": "Unused", "title": "Unused",
"tips": "This value may not accurately represent the free space available to Frigate if you have other files stored on your drive beyond Frigate's recordings. Frigate does not track storage usage outside of its recordings." "tips": "This value may not accurately represent the free space available to Frigate if you have other files stored on your drive beyond Frigate's recordings. Frigate does not track storage usage outside of its recordings."

View File

@ -1,7 +1,8 @@
import { useTheme } from "@/context/theme-provider"; import { useTheme } from "@/context/theme-provider";
import { generateColors } from "@/utils/colorUtil"; import { generateColors } from "@/utils/colorUtil";
import { useCallback, useEffect, useMemo } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import Chart from "react-apexcharts"; import Chart from "react-apexcharts";
import { ArrowDown, ArrowUp, ChevronsUpDown } from "lucide-react";
import { import {
Table, Table,
TableBody, TableBody,
@ -39,6 +40,24 @@ type CombinedStorageGraphProps = {
cameraStorage: CameraStorage; cameraStorage: CameraStorage;
totalStorage: TotalStorage; totalStorage: TotalStorage;
}; };
type StorageSeries = {
name: string;
data: number[];
usage: number;
bandwidth: number;
color: string;
};
type SortKey = "camera" | "usage" | "bandwidth";
type SortDirection = "asc" | "desc";
const defaultSortDirections: Record<SortKey, SortDirection> = {
camera: "asc",
usage: "desc",
bandwidth: "desc",
};
export function CombinedStorageGraph({ export function CombinedStorageGraph({
graphId, graphId,
cameraStorage, cameraStorage,
@ -47,29 +66,96 @@ export function CombinedStorageGraph({
const { t } = useTranslation(["views/system"]); const { t } = useTranslation(["views/system"]);
const { theme, systemTheme } = useTheme(); const { theme, systemTheme } = useTheme();
const [sortConfig, setSortConfig] = useState<{
key: SortKey;
direction: SortDirection;
}>({
key: "camera",
direction: defaultSortDirections.camera,
});
const entities = Object.keys(cameraStorage); const entities = useMemo(() => Object.keys(cameraStorage), [cameraStorage]);
const handleSort = useCallback((key: SortKey) => {
setSortConfig((currentSort) => {
if (currentSort.key == key) {
return {
key,
direction: currentSort.direction == "asc" ? "desc" : "asc",
};
}
return { key, direction: defaultSortDirections[key] };
});
}, []);
const getAriaSort = useCallback(
(key: SortKey) => {
if (sortConfig.key != key) {
return "none";
}
return sortConfig.direction == "asc" ? "ascending" : "descending";
},
[sortConfig],
);
const getSortIcon = useCallback(
(key: SortKey) => {
if (sortConfig.key != key) {
return <ChevronsUpDown className="size-3.5 opacity-50" />;
}
return sortConfig.direction == "asc" ? (
<ArrowUp className="size-3.5" />
) : (
<ArrowDown className="size-3.5" />
);
},
[sortConfig],
);
const series = useMemo<StorageSeries[]>(() => {
const colors = generateColors(entities.length); const colors = generateColors(entities.length);
const series = entities.map((entity, index) => ({ const cameraSeries = entities.map((entity, index) => ({
name: entity, name: entity,
data: [(cameraStorage[entity].usage / totalStorage.total) * 100], data: [(cameraStorage[entity].usage / totalStorage.total) * 100],
usage: cameraStorage[entity].usage, usage: cameraStorage[entity].usage,
bandwidth: cameraStorage[entity].bandwidth, bandwidth: cameraStorage[entity].bandwidth,
color: colors[index], // Assign the corresponding color color: colors[index],
})); }));
// Add the unused percentage to the series cameraSeries.sort((left, right) => {
series.push({ let comparison = 0;
if (sortConfig.key == "camera") {
comparison = left.name
.replaceAll("_", " ")
.localeCompare(right.name.replaceAll("_", " "), undefined, {
numeric: true,
sensitivity: "base",
});
} else {
comparison = left[sortConfig.key] - right[sortConfig.key];
}
return sortConfig.direction == "asc" ? comparison : -comparison;
});
return [
...cameraSeries,
{
name: "Other", name: "Other",
data: [ data: [
((totalStorage.used - totalStorage.camera) / totalStorage.total) * 100, ((totalStorage.used - totalStorage.camera) / totalStorage.total) *
100,
], ],
usage: totalStorage.used - totalStorage.camera, usage: totalStorage.used - totalStorage.camera,
bandwidth: 0, bandwidth: 0,
color: (systemTheme || theme) == "dark" ? "#606060" : "#D5D5D5", color: (systemTheme || theme) == "dark" ? "#606060" : "#D5D5D5",
}); },
series.push({ {
name: "Unused", name: "Unused",
data: [ data: [
((totalStorage.total - totalStorage.used) / totalStorage.total) * 100, ((totalStorage.total - totalStorage.used) / totalStorage.total) * 100,
@ -77,7 +163,9 @@ export function CombinedStorageGraph({
usage: totalStorage.total - totalStorage.used, usage: totalStorage.total - totalStorage.used,
bandwidth: 0, bandwidth: 0,
color: (systemTheme || theme) == "dark" ? "#404040" : "#E5E5E5", color: (systemTheme || theme) == "dark" ? "#404040" : "#E5E5E5",
}); },
];
}, [cameraStorage, entities, sortConfig, systemTheme, theme, totalStorage]);
const options = useMemo(() => { const options = useMemo(() => {
return { return {
@ -185,6 +273,21 @@ export function CombinedStorageGraph({
[t], [t],
); );
const getSortHeader = useCallback(
(key: SortKey, label: string, ariaLabel: string) => (
<button
type="button"
className="flex items-center gap-1 text-left hover:text-primary focus:outline-none focus-visible:ring-2 focus-visible:ring-selected"
aria-label={ariaLabel}
onClick={() => handleSort(key)}
>
<span>{label}</span>
{getSortIcon(key)}
</button>
),
[getSortIcon, handleSort],
);
return ( return (
<div className="flex w-full flex-col gap-2.5"> <div className="flex w-full flex-col gap-2.5">
<div className="flex w-full items-center justify-between gap-1"> <div className="flex w-full items-center justify-between gap-1">
@ -205,12 +308,30 @@ export function CombinedStorageGraph({
<Table> <Table>
<TableHeader> <TableHeader>
<TableRow> <TableRow>
<TableHead>{t("storage.cameraStorage.camera")}</TableHead> <TableHead aria-sort={getAriaSort("camera")}>
<TableHead>{t("storage.cameraStorage.storageUsed")}</TableHead> {getSortHeader(
"camera",
t("storage.cameraStorage.camera"),
t("storage.cameraStorage.sort.camera"),
)}
</TableHead>
<TableHead aria-sort={getAriaSort("usage")}>
{getSortHeader(
"usage",
t("storage.cameraStorage.storageUsed"),
t("storage.cameraStorage.sort.storage"),
)}
</TableHead>
<TableHead> <TableHead>
{t("storage.cameraStorage.percentageOfTotalUsed")} {t("storage.cameraStorage.percentageOfTotalUsed")}
</TableHead> </TableHead>
<TableHead>{t("storage.cameraStorage.bandwidth")}</TableHead> <TableHead aria-sort={getAriaSort("bandwidth")}>
{getSortHeader(
"bandwidth",
t("storage.cameraStorage.bandwidth"),
t("storage.cameraStorage.sort.bandwidth"),
)}
</TableHead>
</TableRow> </TableRow>
</TableHeader> </TableHeader>
<TableBody> <TableBody>

View File

@ -29,13 +29,14 @@ import {
import { isDesktop, isMobile } from "react-device-detect"; import { isDesktop, isMobile } from "react-device-detect";
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer"; import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
import SaveExportOverlay from "./SaveExportOverlay"; import SaveExportOverlay from "./SaveExportOverlay";
import { baseUrl } from "@/api/baseUrl";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { GenericVideoPlayer } from "../player/GenericVideoPlayer"; import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { ExportCase } from "@/types/export"; import { ExportCase } from "@/types/export";
import { CustomTimeSelector } from "./CustomTimeSelector"; import { CustomTimeSelector } from "./CustomTimeSelector";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source"; import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
import RecordingPlaybackPreferenceSelect from "../player/RecordingPlaybackPreferenceSelect";
import ActivityIndicator from "../indicators/activity-indicator";
const EXPORT_OPTIONS = [ const EXPORT_OPTIONS = [
"1", "1",
@ -444,8 +445,6 @@ export function ExportPreviewDialog({
return null; return null;
} }
const source = playbackSource ?? `${baseUrl}${vodPath}`;
return ( return (
<Dialog open={showPreview} onOpenChange={setShowPreview}> <Dialog open={showPreview} onOpenChange={setShowPreview}>
<DialogContent <DialogContent
@ -462,7 +461,21 @@ export function ExportPreviewDialog({
{t("export.fromTimeline.previewExport")} {t("export.fromTimeline.previewExport")}
</DialogDescription> </DialogDescription>
</DialogHeader> </DialogHeader>
<GenericVideoPlayer source={source} /> {playbackSource ? (
<GenericVideoPlayer source={playbackSource.url}>
<div className="absolute right-3 top-3 z-50">
<RecordingPlaybackPreferenceSelect
className="h-8 w-32 bg-background/90 text-xs backdrop-blur"
value={playbackSource.preference}
onValueChange={playbackSource.setPreference}
/>
</div>
</GenericVideoPlayer>
) : (
<div className="flex aspect-video items-center justify-center">
<ActivityIndicator />
</div>
)}
</DialogContent> </DialogContent>
</Dialog> </Dialog>
); );

View File

@ -55,7 +55,6 @@ import {
TooltipContent, TooltipContent,
TooltipTrigger, TooltipTrigger,
} from "@/components/ui/tooltip"; } from "@/components/ui/tooltip";
import { REVIEW_PADDING } from "@/types/review";
import { capitalizeAll } from "@/utils/stringUtil"; import { capitalizeAll } from "@/utils/stringUtil";
import useGlobalMutation from "@/hooks/use-global-mutate"; import useGlobalMutation from "@/hooks/use-global-mutate";
import DetailActionsMenu from "./DetailActionsMenu"; import DetailActionsMenu from "./DetailActionsMenu";
@ -68,7 +67,6 @@ import {
import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch"; import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
import useImageLoaded from "@/hooks/use-image-loaded"; import useImageLoaded from "@/hooks/use-image-loaded";
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator"; import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
import { GenericVideoPlayer } from "@/components/player/GenericVideoPlayer";
import { import {
Popover, Popover,
PopoverContent, PopoverContent,
@ -80,7 +78,6 @@ import {
DrawerTitle, DrawerTitle,
DrawerTrigger, DrawerTrigger,
} from "@/components/ui/drawer"; } from "@/components/ui/drawer";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
import { LuInfo } from "react-icons/lu"; import { LuInfo } from "react-icons/lu";
import { TooltipPortal } from "@radix-ui/react-tooltip"; import { TooltipPortal } from "@radix-ui/react-tooltip";
import { FaPencilAlt } from "react-icons/fa"; import { FaPencilAlt } from "react-icons/fa";
@ -1857,31 +1854,3 @@ export function ObjectSnapshotTab({
); );
} }
type VideoTabProps = {
search: SearchResult;
};
export function VideoTab({ search }: VideoTabProps) {
const clipTimeRange = useMemo(() => {
const startTime = search.start_time - REVIEW_PADDING;
const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
return `start/${startTime}/end/${endTime}`;
}, [search]);
const startTime = search.start_time - REVIEW_PADDING;
const endTime = (search.end_time ?? Date.now() / 1000) + REVIEW_PADDING;
const vodPath = `/vod/${search.camera}/${clipTimeRange}/index.m3u8`;
const playbackSource = useRecordingPlaybackSource({
camera: search.camera,
after: startTime,
before: endTime,
vodPath,
});
const source = playbackSource ?? `${baseUrl}${vodPath}`;
return (
<>
<span tabIndex={0} className="sr-only" />
<GenericVideoPlayer source={source} />
</>
);
}

View File

@ -8,10 +8,9 @@ import { TrackingDetailsSequence } from "@/types/timeline";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { getIconForLabel } from "@/utils/iconUtil"; import { getIconForLabel } from "@/utils/iconUtil";
import { LuCircle, LuFolderX } from "react-icons/lu"; import { LuCircle } from "react-icons/lu";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import HlsVideoPlayer from "@/components/player/HlsVideoPlayer"; import HlsVideoPlayer from "@/components/player/HlsVideoPlayer";
import { baseUrl } from "@/api/baseUrl";
import { REVIEW_PADDING } from "@/types/review"; import { REVIEW_PADDING } from "@/types/review";
import { import {
ASPECT_PORTRAIT_LAYOUT, ASPECT_PORTRAIT_LAYOUT,
@ -35,13 +34,11 @@ import { HiDotsHorizontal } from "react-icons/hi";
import axios from "axios"; import axios from "axios";
import { toast } from "sonner"; import { toast } from "sonner";
import { useDetailStream } from "@/context/detail-stream-context"; import { useDetailStream } from "@/context/detail-stream-context";
import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect"; import { isDesktop, isMobileOnly } from "react-device-detect";
import { useApiHost } from "@/api";
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
import ObjectTrackOverlay from "../ObjectTrackOverlay";
import { useIsAdmin } from "@/hooks/use-is-admin"; import { useIsAdmin } from "@/hooks/use-is-admin";
import { VideoResolutionType } from "@/types/live"; import { VideoResolutionType } from "@/types/live";
import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source"; import useRecordingPlaybackSource from "@/hooks/use-recording-playback-source";
import RecordingPlaybackPreferenceSelect from "@/components/player/RecordingPlaybackPreferenceSelect";
type TrackingDetailsProps = { type TrackingDetailsProps = {
className?: string; className?: string;
@ -59,19 +56,9 @@ export function TrackingDetails({
}: TrackingDetailsProps) { }: TrackingDetailsProps) {
const videoRef = useRef<HTMLVideoElement | null>(null); const videoRef = useRef<HTMLVideoElement | null>(null);
const { t } = useTranslation(["views/explore"]); const { t } = useTranslation(["views/explore"]);
const apiHost = useApiHost();
const imgRef = useRef<HTMLImageElement | null>(null);
const [imgLoaded, setImgLoaded] = useState(false);
const [isVideoLoading, setIsVideoLoading] = useState(true); const [isVideoLoading, setIsVideoLoading] = useState(true);
const [displaySource, _setDisplaySource] = useState<"video" | "image">(
"video",
);
const { setSelectedObjectIds, annotationOffset } = useDetailStream(); const { setSelectedObjectIds, annotationOffset } = useDetailStream();
// manualOverride holds a record-stream timestamp explicitly chosen by the
// user (eg, clicking a lifecycle row). When null we display `currentTime`.
const [manualOverride, setManualOverride] = useState<number | null>(null);
// Capture the annotation offset used for building the video source URL. // Capture the annotation offset used for building the video source URL.
// This only updates when the event changes, NOT on every slider drag, // This only updates when the event changes, NOT on every slider drag,
// so the HLS player doesn't reload while the user is adjusting the offset. // so the HLS player doesn't reload while the user is adjusting the offset.
@ -251,13 +238,9 @@ export function TrackingDetails({
}); });
}); });
// Use manualOverride (set when seeking in image mode) if present so
// lifecycle rows and overlays follow image-mode seeks. Otherwise fall
// back to currentTime used for video mode.
const effectiveTime = useMemo(() => { const effectiveTime = useMemo(() => {
const displayedRecordTime = manualOverride ?? currentTime; return currentTime - annotationOffset / 1000;
return displayedRecordTime - annotationOffset / 1000; }, [currentTime, annotationOffset]);
}, [manualOverride, currentTime, annotationOffset]);
const containerRef = useRef<HTMLDivElement | null>(null); const containerRef = useRef<HTMLDivElement | null>(null);
const { fullscreen, toggleFullscreen, supportsFullScreen } = const { fullscreen, toggleFullscreen, supportsFullScreen } =
@ -326,7 +309,7 @@ export function TrackingDetails({
// On popover open: pause, pin first lifecycle item, and seek. // On popover open: pause, pin first lifecycle item, and seek.
useEffect(() => { useEffect(() => {
if (isAnnotationSettingsOpen && !wasAnnotationOpenRef.current) { if (isAnnotationSettingsOpen && !wasAnnotationOpenRef.current) {
if (videoRef.current && displaySource === "video") { if (videoRef.current) {
videoRef.current.pause(); videoRef.current.pause();
} }
if (eventSequence && eventSequence.length > 0) { if (eventSequence && eventSequence.length > 0) {
@ -337,14 +320,14 @@ export function TrackingDetails({
pinnedDetectTimestampRef.current = null; pinnedDetectTimestampRef.current = null;
} }
wasAnnotationOpenRef.current = isAnnotationSettingsOpen; wasAnnotationOpenRef.current = isAnnotationSettingsOpen;
}, [isAnnotationSettingsOpen, displaySource, eventSequence]); }, [isAnnotationSettingsOpen, eventSequence]);
// When the pinned timestamp or offset changes, re-seek the video and // When the pinned timestamp or offset changes, re-seek the video and
// explicitly update currentTime so the overlay shows the pinned event's box. // explicitly update currentTime so the overlay shows the pinned event's box.
useEffect(() => { useEffect(() => {
const pinned = pinnedDetectTimestampRef.current; const pinned = pinnedDetectTimestampRef.current;
if (!isAnnotationSettingsOpen || pinned == null) return; if (!isAnnotationSettingsOpen || pinned == null) return;
if (!videoRef.current || displaySource !== "video") return; if (!videoRef.current) return;
const targetTimeRecord = pinned + annotationOffset / 1000; const targetTimeRecord = pinned + annotationOffset / 1000;
const relativeTime = timestampToVideoTime(targetTimeRecord); const relativeTime = timestampToVideoTime(targetTimeRecord);
@ -354,36 +337,21 @@ export function TrackingDetails({
// resolves back to the pinned detect timestamp: // resolves back to the pinned detect timestamp:
// effectiveCurrentTime = targetTimeRecord - annotationOffset/1000 = pinned // effectiveCurrentTime = targetTimeRecord - annotationOffset/1000 = pinned
setCurrentTime(targetTimeRecord); setCurrentTime(targetTimeRecord);
}, [ }, [isAnnotationSettingsOpen, annotationOffset, timestampToVideoTime]);
isAnnotationSettingsOpen,
annotationOffset,
displaySource,
timestampToVideoTime,
]);
const handleLifecycleClick = useCallback( const handleLifecycleClick = useCallback(
(item: TrackingDetailsSequence) => { (item: TrackingDetailsSequence) => {
if (!videoRef.current && !imgRef.current) return; if (!videoRef.current) return;
// Convert lifecycle timestamp (detect stream) to record stream time // Convert lifecycle timestamp (detect stream) to record stream time
const targetTimeRecord = item.timestamp + annotationOffset / 1000; const targetTimeRecord = item.timestamp + annotationOffset / 1000;
if (displaySource === "image") { // Convert to video-relative time (accounting for motion-only gaps)
// For image mode: set a manual override timestamp and update
// currentTime so overlays render correctly.
setManualOverride(targetTimeRecord);
setCurrentTime(targetTimeRecord);
return;
}
// For video mode: convert to video-relative time (accounting for motion-only gaps)
const relativeTime = timestampToVideoTime(targetTimeRecord); const relativeTime = timestampToVideoTime(targetTimeRecord);
if (videoRef.current) {
videoRef.current.currentTime = relativeTime; videoRef.current.currentTime = relativeTime;
}
}, },
[annotationOffset, displaySource, timestampToVideoTime], [annotationOffset, timestampToVideoTime],
); );
const formattedStart = config const formattedStart = config
@ -427,14 +395,6 @@ export function TrackingDetails({
useEffect(() => { useEffect(() => {
if (seekToTimestamp === null) return; if (seekToTimestamp === null) return;
if (displaySource === "image") {
// For image mode, set the manual override so the snapshot updates to
// the exact record timestamp.
setManualOverride(seekToTimestamp);
setSeekToTimestamp(null);
return;
}
// seekToTimestamp is a record stream timestamp // seekToTimestamp is a record stream timestamp
// Convert to video position (accounting for motion-only recording gaps) // Convert to video position (accounting for motion-only recording gaps)
if (!videoRef.current) return; if (!videoRef.current) return;
@ -443,7 +403,7 @@ export function TrackingDetails({
videoRef.current.currentTime = relativeTime; videoRef.current.currentTime = relativeTime;
} }
setSeekToTimestamp(null); setSeekToTimestamp(null);
}, [seekToTimestamp, displaySource, timestampToVideoTime]); }, [seekToTimestamp, timestampToVideoTime]);
const isWithinEventRange = useMemo(() => { const isWithinEventRange = useMemo(() => {
if (effectiveTime === undefined || event.start_time === undefined) { if (effectiveTime === undefined || event.start_time === undefined) {
@ -535,15 +495,22 @@ export function TrackingDetails({
before: videoWindow.endTime, before: videoWindow.endTime,
vodPath: videoWindow.vodPath, vodPath: videoWindow.vodPath,
}); });
useEffect(() => {
if (playbackSource?.url) {
setIsVideoLoading(true);
}
}, [playbackSource?.url]);
const videoSource = useMemo(() => { const videoSource = useMemo(() => {
const playlist = playbackSource ?? `${baseUrl}${videoWindow.vodPath}`; if (!playbackSource) {
return undefined;
}
return { return {
playlist, playlist: playbackSource.url,
startPosition: 0, startPosition: 0,
}; };
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [playbackSource, videoWindow]); }, [playbackSource]);
// Determine camera aspect ratio category // Determine camera aspect ratio category
const cameraAspect = useMemo(() => { const cameraAspect = useMemo(() => {
@ -574,27 +541,6 @@ export function TrackingDetails({
[videoTimeToTimestamp], [videoTimeToTimestamp],
); );
const [src, setSrc] = useState(
`${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
);
const [hasError, setHasError] = useState(false);
// Derive the record timestamp to display: manualOverride if present,
// otherwise use currentTime.
const displayedRecordTime = manualOverride ?? currentTime;
useEffect(() => {
if (displayedRecordTime) {
const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
setSrc(newSrc);
}
setImgLoaded(false);
setHasError(false);
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [displayedRecordTime]);
const onUploadFrameToPlus = useCallback(() => { const onUploadFrameToPlus = useCallback(() => {
return axios.post(`/${event.camera}/plus/${currentTime}`); return axios.post(`/${event.camera}/plus/${currentTime}`);
}, [event.camera, currentTime]); }, [event.camera, currentTime]);
@ -632,8 +578,7 @@ export function TrackingDetails({
cameraAspect === "tall" ? "h-full" : "w-full", cameraAspect === "tall" ? "h-full" : "w-full",
)} )}
> >
{displaySource == "video" && ( {videoSource ? (
<>
<HlsVideoPlayer <HlsVideoPlayer
videoRef={videoRef} videoRef={videoRef}
containerRef={containerRef} containerRef={containerRef}
@ -653,62 +598,20 @@ export function TrackingDetails({
camera={event.camera} camera={event.camera}
currentTimeOverride={currentTime} currentTimeOverride={currentTime}
/> />
{isVideoLoading && ( ) : (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" /> <ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
)} )}
</> {playbackSource && (
)} <div className="absolute right-3 top-3 z-50">
{displaySource == "image" && ( <RecordingPlaybackPreferenceSelect
<> className="h-8 w-32 bg-background/90 text-xs backdrop-blur"
<ImageLoadingIndicator value={playbackSource.preference}
className="absolute inset-0" onValueChange={playbackSource.setPreference}
imgLoaded={imgLoaded}
/>
{hasError && (
<div className="relative aspect-video">
<div className="flex flex-col items-center justify-center p-20 text-center">
<LuFolderX className="size-16" />
{t("objectLifecycle.noImageFound")}
</div>
</div>
)}
<div
className={cn("relative", imgLoaded ? "visible" : "invisible")}
>
<div className="absolute z-50 size-full">
<ObjectTrackOverlay
key={`overlay-${displayedRecordTime}`}
camera={event.camera}
showBoundingBoxes={true}
currentTime={displayedRecordTime}
videoWidth={imgRef?.current?.naturalWidth ?? 0}
videoHeight={imgRef?.current?.naturalHeight ?? 0}
className="absolute inset-0 z-10"
onSeekToTime={handleSeekToTime}
/> />
</div> </div>
<img
key={event.id}
ref={imgRef}
className={cn(
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
)} )}
loading={isSafari ? "eager" : "lazy"} {isVideoLoading && (
style={ <ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={src}
onLoad={() => setImgLoaded(true)}
onError={() => setHasError(true)}
/>
</div>
</>
)} )}
</div> </div>
</div> </div>

View File

@ -133,7 +133,7 @@ export function GenericVideoPlayer({
}} }}
setFullResolution={setVideoResolution} setFullResolution={setVideoResolution}
/> />
{!isLoading && children} {children}
</div> </div>
</> </>
)} )}

View File

@ -0,0 +1,36 @@
import { RecordingPlaybackPreference } from "@/types/record";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { useTranslation } from "react-i18next";
type RecordingPlaybackPreferenceSelectProps = {
className?: string;
onValueChange: (value: RecordingPlaybackPreference) => void;
value: RecordingPlaybackPreference;
};
export default function RecordingPlaybackPreferenceSelect({
className,
onValueChange,
value,
}: RecordingPlaybackPreferenceSelectProps) {
const { t } = useTranslation(["components/player"]);
return (
<Select value={value} onValueChange={onValueChange}>
<SelectTrigger className={className}>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="auto">{t("playbackPreference.auto")}</SelectItem>
<SelectItem value="main">{t("playbackPreference.main")}</SelectItem>
<SelectItem value="sub">{t("playbackPreference.sub")}</SelectItem>
</SelectContent>
</Select>
);
}

View File

@ -8,6 +8,7 @@ import {
} from "@/utils/videoUtil"; } from "@/utils/videoUtil";
type PlayerMode = "playback" | "scrubbing"; type PlayerMode = "playback" | "scrubbing";
const RECORDING_SEEK_CLAMP_GAP_SECONDS = 45;
export class DynamicVideoController { export class DynamicVideoController {
// main state // main state
@ -79,8 +80,14 @@ export class DynamicVideoController {
this.playerMode = "playback"; this.playerMode = "playback";
} }
const playableTime = this.getPlayableTimestamp(time);
if (playableTime === undefined) {
this.setNoRecording(true);
return;
}
const seekSeconds = calculateSeekPosition( const seekSeconds = calculateSeekPosition(
time, playableTime,
this.recordings, this.recordings,
this.inpointOffset, this.inpointOffset,
); );
@ -103,6 +110,29 @@ export class DynamicVideoController {
} }
} }
private getPlayableTimestamp(time: number): number | undefined {
if (!this.recordings.length) {
return undefined;
}
const directSeek = calculateSeekPosition(time, this.recordings, this.inpointOffset);
if (directSeek !== undefined) {
return time;
}
// Some review items start a few seconds before the first saved segment.
// Clamp short gaps to the next recording so playback still opens.
const nextRecording = this.recordings.find((segment) => segment.start_time > time);
if (
nextRecording &&
nextRecording.start_time - time <= RECORDING_SEEK_CLAMP_GAP_SECONDS
) {
return nextRecording.start_time;
}
return undefined;
}
waitAndPlay() { waitAndPlay() {
return new Promise((resolve) => { return new Promise((resolve) => {
const onSeekedHandler = () => { const onSeekedHandler = () => {
@ -166,5 +196,3 @@ export class DynamicVideoController {
); );
} }
} }
export default typeof DynamicVideoController;

View File

@ -26,19 +26,16 @@ import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { useUserPersistence } from "@/hooks/use-user-persistence"; import { useUserPersistence } from "@/hooks/use-user-persistence";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities"; import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseRecordingPlayback } from "@/utils/recordingPlayback"; import {
chooseRecordingPlayback,
getRecordingsForPlaybackVariant,
} from "@/utils/recordingPlayback";
import { import {
calculateInpointOffset, calculateInpointOffset,
calculateSeekPosition, calculateSeekPosition,
} from "@/utils/videoUtil"; } from "@/utils/videoUtil";
import { isFirefox } from "react-device-detect"; import { isFirefox } from "react-device-detect";
import { import RecordingPlaybackPreferenceSelect from "../RecordingPlaybackPreferenceSelect";
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
/** /**
* Dynamically switches between video playback and scrubbing preview player. * Dynamically switches between video playback and scrubbing preview player.
@ -212,17 +209,6 @@ export default function DynamicVideoPlayer({
[`${camera}/recordings`, { ...recordingParams, variant: "all" }], [`${camera}/recordings`, { ...recordingParams, variant: "all" }],
{ revalidateOnFocus: false }, { revalidateOnFocus: false },
); );
const recordings = useMemo(() => {
if (!allRecordings?.length) {
return allRecordings;
}
const mainRecordings = allRecordings.filter(
(recording) => (recording.variant || "main") === "main",
);
return mainRecordings.length > 0 ? mainRecordings : allRecordings;
}, [allRecordings]);
const codecNames = useMemo( const codecNames = useMemo(
() => () =>
Array.from( Array.from(
@ -231,13 +217,60 @@ export default function DynamicVideoPlayer({
[allRecordings], [allRecordings],
); );
const playbackCapabilities = usePlaybackCapabilities(codecNames); const playbackCapabilities = usePlaybackCapabilities(codecNames);
const playbackDecision = useMemo(() => {
useEffect(() => { if (!allRecordings?.length) {
if (!recordings?.length) { return undefined;
if (recordings?.length == 0) {
setNoRecording(true);
} }
const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
return chooseRecordingPlayback({
apiHost,
recordings: allRecordings,
preference: playbackPreference ?? "sub",
vodPath,
capabilities: playbackCapabilities,
});
}, [
allRecordings,
apiHost,
camera,
playbackPreference,
playbackCapabilities,
recordingParams.after,
recordingParams.before,
]);
const recordings = useMemo(() => {
if (!allRecordings?.length) {
return allRecordings;
}
if (!playbackDecision || playbackDecision.variant === "main") {
return getRecordingsForPlaybackVariant(allRecordings, "main");
}
const selectedRecordings = getRecordingsForPlaybackVariant(allRecordings, "sub");
return selectedRecordings.length > 0 ? selectedRecordings : allRecordings;
}, [allRecordings, playbackDecision]);
useEffect(() => {
if (!allRecordings?.length) {
if (allRecordings?.length == 0) {
if (loadingTimeout) {
clearTimeout(loadingTimeout);
}
setIsLoading(false);
setIsBuffering(false);
setNoRecording(true);
setSource(undefined);
}
return;
}
if (!recordings?.length || !playbackDecision) {
return; return;
} }
@ -246,7 +279,7 @@ export default function DynamicVideoPlayer({
if (startTimestamp) { if (startTimestamp) {
const inpointOffset = calculateInpointOffset( const inpointOffset = calculateInpointOffset(
recordingParams.after, recordingParams.after,
(recordings || [])[0], recordings[0],
); );
startPosition = calculateSeekPosition( startPosition = calculateSeekPosition(
@ -256,33 +289,19 @@ export default function DynamicVideoPlayer({
); );
} }
const vodPath = `/vod/${camera}/start/${recordingParams.after}/end/${recordingParams.before}/master.m3u8`;
const decision = chooseRecordingPlayback({
apiHost,
config,
recordings: allRecordings ?? recordings,
preference: playbackPreference ?? "sub",
vodPath,
capabilities: playbackCapabilities,
});
setSource({ setSource({
playlist: decision.url, playlist: playbackDecision.url,
startPosition, startPosition,
}); });
setNoRecording(false);
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [ }, [
apiHost,
camera,
recordingParams.after,
recordingParams.before,
allRecordings, allRecordings,
recordings, recordings,
startTimestamp, startTimestamp,
playbackPreference, playbackDecision,
playbackCapabilities, recordingParams.after,
config?.transcode_proxy?.enabled,
config?.transcode_proxy?.vod_proxy_url,
]); ]);
useEffect(() => { useEffect(() => {
@ -384,22 +403,13 @@ export default function DynamicVideoPlayer({
)} )}
{!isScrubbing && source && ( {!isScrubbing && source && (
<div className="absolute right-3 top-3 z-50"> <div className="absolute right-3 top-3 z-50">
<Select <RecordingPlaybackPreferenceSelect
className="h-8 w-32 bg-background/90 text-xs backdrop-blur"
value={playbackPreference ?? "sub"} value={playbackPreference ?? "sub"}
onValueChange={(value) => onValueChange={(value) =>
setPlaybackPreference(value as RecordingPlaybackPreference) setPlaybackPreference(value as RecordingPlaybackPreference)
} }
> />
<SelectTrigger className="h-8 w-32 bg-background/90 text-xs backdrop-blur">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="auto">Auto</SelectItem>
<SelectItem value="main">Main</SelectItem>
<SelectItem value="sub">Sub</SelectItem>
<SelectItem value="transcoded">Transcoded</SelectItem>
</SelectContent>
</Select>
</div> </div>
)} )}
<PreviewPlayer <PreviewPlayer

View File

@ -1,14 +1,18 @@
import { useApiHost } from "@/api"; import { useApiHost } from "@/api";
import useSWR from "swr"; import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import { import {
Recording, Recording,
RecordingPlaybackPreference, RecordingPlaybackPreference,
} from "@/types/record"; } from "@/types/record";
import { useMemo } from "react"; import { useCallback, useMemo } from "react";
import { useUserPersistence } from "@/hooks/use-user-persistence"; import { useUserPersistence } from "@/hooks/use-user-persistence";
import usePlaybackCapabilities from "@/hooks/use-playback-capabilities"; import usePlaybackCapabilities from "@/hooks/use-playback-capabilities";
import { chooseRecordingPlayback } from "@/utils/recordingPlayback"; import {
buildDirectUrl,
chooseRecordingPlayback,
getFallbackVariantForPreference,
RecordingPlaybackDecision,
} from "@/utils/recordingPlayback";
type RecordingPlaybackSourceOptions = { type RecordingPlaybackSourceOptions = {
camera: string; camera: string;
@ -19,6 +23,14 @@ type RecordingPlaybackSourceOptions = {
enabled?: boolean; enabled?: boolean;
}; };
export type RecordingPlaybackSource = {
decision?: RecordingPlaybackDecision;
preference: RecordingPlaybackPreference;
setPreference: (value: RecordingPlaybackPreference) => void;
url: string;
variant: string;
};
export default function useRecordingPlaybackSource({ export default function useRecordingPlaybackSource({
camera, camera,
after, after,
@ -28,8 +40,8 @@ export default function useRecordingPlaybackSource({
enabled = true, enabled = true,
}: RecordingPlaybackSourceOptions) { }: RecordingPlaybackSourceOptions) {
const apiHost = useApiHost(); const apiHost = useApiHost();
const { data: config } = useSWR<FrigateConfig>("config"); const [storedPreference, setStoredPreference, preferenceLoaded] =
const [storedPreference] = useUserPersistence<RecordingPlaybackPreference>( useUserPersistence<RecordingPlaybackPreference>(
`${camera}-recording-playback-v2`, `${camera}-recording-playback-v2`,
"sub", "sub",
); );
@ -46,27 +58,56 @@ export default function useRecordingPlaybackSource({
[recordings], [recordings],
); );
const capabilities = usePlaybackCapabilities(codecNames); const capabilities = usePlaybackCapabilities(codecNames);
const activePreference = preference ?? storedPreference ?? "sub";
const setPreferenceValue = useCallback(
(value: RecordingPlaybackPreference) => {
if (preference !== undefined) {
return;
}
setStoredPreference(value);
},
[preference, setStoredPreference],
);
return useMemo(() => { return useMemo(() => {
if (!recordings?.length) { if (!preferenceLoaded) {
return undefined; return undefined;
} }
return chooseRecordingPlayback({ if (!recordings?.length) {
const fallbackVariant = getFallbackVariantForPreference(activePreference);
return {
preference: activePreference,
setPreference: setPreferenceValue,
url: buildDirectUrl(apiHost, vodPath, fallbackVariant),
variant: fallbackVariant,
};
}
const decision = chooseRecordingPlayback({
apiHost, apiHost,
config,
recordings, recordings,
preference: preference ?? storedPreference ?? "sub", preference: activePreference,
vodPath, vodPath,
capabilities, capabilities,
}).url; });
return {
decision,
preference: activePreference,
setPreference: setPreferenceValue,
url: decision.url,
variant: decision.variant,
};
}, [ }, [
activePreference,
apiHost, apiHost,
capabilities, capabilities,
config, preferenceLoaded,
preference,
recordings, recordings,
storedPreference, setPreferenceValue,
vodPath, vodPath,
]); ]);
} }

View File

@ -8,6 +8,7 @@ export type Recording = {
end_time: number; end_time: number;
path: string; path: string;
variant?: string; variant?: string;
transcoded_from_main?: boolean;
segment_size: number; segment_size: number;
duration: number; duration: number;
motion: number; motion: number;
@ -52,8 +53,7 @@ export type RecordingPlayerError = "stalled" | "startup";
export type RecordingPlaybackPreference = export type RecordingPlaybackPreference =
| "auto" | "auto"
| "main" | "main"
| "sub" | "sub";
| "transcoded";
export const ASPECT_VERTICAL_LAYOUT = 1.5; export const ASPECT_VERTICAL_LAYOUT = 1.5;
export const ASPECT_PORTRAIT_LAYOUT = 1.333; export const ASPECT_PORTRAIT_LAYOUT = 1.333;

View File

@ -0,0 +1,123 @@
import { describe, expect, it } from "vitest";
import { Recording } from "@/types/record";
import {
buildVariantVodPath,
chooseRecordingPlayback,
getRecordingsForPlaybackVariant,
getFallbackVariantForPreference,
} from "./recordingPlayback";
const apiHost = "http://frigate.test/api";
const vodPath = "/vod/front_door/start/10/end/20/index.m3u8";
const playbackCapabilities = {
estimatedBandwidthBps: 8_000_000,
saveData: false,
supports: {
h264: true,
hevc: true,
},
};
function makeRecording(
variant: "main" | "sub",
overrides: Partial<Recording> = {},
): Recording {
return {
id: `${variant}-recording`,
camera: "front_door",
start_time: 10,
end_time: 20,
path: `/media/frigate/recordings/front_door/${variant}.mp4`,
variant,
segment_size: 4,
duration: 10,
motion: 100,
objects: 5,
dBFS: 0,
codec_name: "h264",
...overrides,
};
}
describe("recordingPlayback", () => {
it("builds variant vod paths for sub recordings", () => {
expect(buildVariantVodPath(vodPath, "main")).toBe(vodPath);
expect(buildVariantVodPath(vodPath, "sub")).toBe(
"/vod/variant/sub/front_door/start/10/end/20/index.m3u8",
);
});
it("uses the sub variant URL when sub is selected manually", () => {
const decision = chooseRecordingPlayback({
apiHost,
recordings: [makeRecording("main"), makeRecording("sub")],
preference: "sub",
vodPath,
capabilities: playbackCapabilities,
});
expect(decision.variant).toBe("sub");
expect(decision.reason).toBe("manual-sub");
expect(decision.url).toBe(
"http://frigate.test/api/vod/variant/sub/front_door/start/10/end/20/index.m3u8",
);
});
it("ignores legacy sub_h264 recordings for sub playback", () => {
const decision = chooseRecordingPlayback({
apiHost,
recordings: [
makeRecording("main"),
makeRecording("sub", {
id: "sub-h264-recording",
variant: "sub_h264",
}),
],
preference: "sub",
vodPath,
capabilities: playbackCapabilities,
});
expect(decision.variant).toBe("main");
expect(decision.reason).toBe("raw-main");
});
it("ignores legacy sub_h264 rows for sub seek timelines", () => {
const subRecordings = getRecordingsForPlaybackVariant(
[
makeRecording("sub", { id: "native-sub", path: "/native-sub.mp4" }),
makeRecording("sub", {
id: "legacy-generated-sub",
path: "/legacy-generated-sub.mp4",
variant: "sub_h264",
}),
],
"sub",
);
expect(subRecordings).toHaveLength(1);
expect(subRecordings[0].id).toBe("native-sub");
});
it("still prefers playable main in auto mode", () => {
const decision = chooseRecordingPlayback({
apiHost,
recordings: [makeRecording("main"), makeRecording("sub")],
preference: "auto",
vodPath,
capabilities: playbackCapabilities,
});
expect(decision.variant).toBe("main");
expect(decision.reason).toBe("raw-main");
});
it("maps fallback variants from playback preferences", () => {
expect(getFallbackVariantForPreference("main")).toBe("main");
expect(getFallbackVariantForPreference("auto")).toBe("main");
expect(getFallbackVariantForPreference("sub")).toBe("sub");
});
});

View File

@ -1,4 +1,3 @@
import { FrigateConfig } from "@/types/frigateConfig";
import { import {
Recording, Recording,
RecordingPlaybackPreference, RecordingPlaybackPreference,
@ -11,15 +10,16 @@ export type PlaybackCapabilities = {
}; };
export type RecordingPlaybackDecision = { export type RecordingPlaybackDecision = {
mode: "direct" | "transcoded"; mode: "direct";
variant: string; variant: string;
url: string; url: string;
reason: string; reason: string;
}; };
export type PlaybackVariant = "main" | "sub";
type DecisionOptions = { type DecisionOptions = {
apiHost: string; apiHost: string;
config?: FrigateConfig;
recordings: Recording[]; recordings: Recording[];
preference: RecordingPlaybackPreference; preference: RecordingPlaybackPreference;
vodPath: string; vodPath: string;
@ -63,16 +63,6 @@ function trimTrailingSlash(value: string): string {
return value.replace(/\/$/, ""); return value.replace(/\/$/, "");
} }
function appendQuery(url: string, params: Record<string, string | undefined>): string {
const entries = Object.entries(params).filter(([, value]) => value);
if (entries.length === 0) {
return url;
}
const search = new URLSearchParams(entries as [string, string][]);
return `${url}${url.includes("?") ? "&" : "?"}${search.toString()}`;
}
function average(values: number[]): number | undefined { function average(values: number[]): number | undefined {
if (!values.length) { if (!values.length) {
return undefined; return undefined;
@ -119,14 +109,70 @@ export function estimateRecordingBitrate(recordings: Recording[]): number | unde
export function groupRecordingsByVariant( export function groupRecordingsByVariant(
recordings: Recording[], recordings: Recording[],
): Record<string, Recording[]> { ): Record<string, Recording[]> {
return recordings.reduce<Record<string, Recording[]>>((acc, recording) => { return {
const variant = recording.variant || "main"; main: getRecordingsForPlaybackVariant(recordings, "main"),
if (!acc[variant]) { sub: getRecordingsForPlaybackVariant(recordings, "sub"),
acc[variant] = []; };
}
export function normalizePlaybackVariantFamily(
variant?: string | null,
): PlaybackVariant | undefined {
const normalized = variant?.toLowerCase().trim() || "main";
if (normalized === "main") {
return "main";
} }
acc[variant].push(recording);
return acc; if (normalized === "sub") {
}, {}); return "sub";
}
return undefined;
}
function getVariantPriority(recording: Recording): number {
const normalized = recording.variant?.toLowerCase().trim();
if (normalized === "sub") {
return 1;
}
if (normalized === "main") {
return 0;
}
return -1;
}
export function getRecordingsForPlaybackVariant(
recordings: Recording[],
variant: PlaybackVariant,
): Recording[] {
const selected = recordings
.filter((recording) => normalizePlaybackVariantFamily(recording.variant) === variant)
.sort((left, right) => {
if (left.start_time !== right.start_time) {
return left.start_time - right.start_time;
}
return getVariantPriority(right) - getVariantPriority(left);
});
const deduped = new Map<string, Recording>();
for (const recording of selected) {
const key = `${recording.start_time}:${recording.end_time}`;
const existing = deduped.get(key);
if (!existing || getVariantPriority(recording) > getVariantPriority(existing)) {
deduped.set(key, recording);
}
}
return Array.from(deduped.values()).sort(
(left, right) => left.start_time - right.start_time,
);
} }
function canDirectPlayVariant( function canDirectPlayVariant(
@ -145,65 +191,34 @@ function getDirectBaseUrl(apiHost: string): string {
return trimTrailingSlash(apiHost); return trimTrailingSlash(apiHost);
} }
function getTranscodeBaseUrl(apiHost: string, config?: FrigateConfig): string | undefined { export function buildVariantVodPath(vodPath: string, variant: string): string {
if (!config?.transcode_proxy?.enabled) { if (variant === "main") {
return undefined; return vodPath;
} }
if (config.transcode_proxy.vod_proxy_url?.trim()) { return vodPath.replace(/^\/vod\//, `/vod/variant/${variant}/`);
return trimTrailingSlash(config.transcode_proxy.vod_proxy_url);
}
return `${trimTrailingSlash(apiHost)}/vod-transcoded`;
} }
function getTranscodeProfile(estimatedBandwidthBps?: number, saveData = false) { export function buildDirectUrl(
if (saveData || (estimatedBandwidthBps && estimatedBandwidthBps <= 1_500_000)) {
return { bitrate: "512k", maxWidth: "640", maxHeight: "360" };
}
if (estimatedBandwidthBps && estimatedBandwidthBps <= 3_000_000) {
return { bitrate: "1200k", maxWidth: "960", maxHeight: "540" };
}
return { bitrate: "2500k", maxWidth: "1280", maxHeight: "720" };
}
function buildDirectUrl(apiHost: string, vodPath: string, variant: string): string {
const baseUrl = `${getDirectBaseUrl(apiHost)}${vodPath}`;
return appendQuery(baseUrl, {
variant: variant !== "main" ? variant : undefined,
});
}
function buildTranscodeUrl(
apiHost: string, apiHost: string,
config: FrigateConfig | undefined,
vodPath: string, vodPath: string,
variant: string, variant: string,
capabilities: PlaybackCapabilities,
): string { ): string {
const transcodeBase = getTranscodeBaseUrl(apiHost, config); return `${getDirectBaseUrl(apiHost)}${buildVariantVodPath(vodPath, variant)}`;
if (!transcodeBase) { }
return buildDirectUrl(apiHost, vodPath, variant);
export function getFallbackVariantForPreference(
preference: RecordingPlaybackPreference,
): "main" | "sub" {
if (preference === "sub") {
return "sub";
} }
const profile = getTranscodeProfile( return "main";
capabilities.estimatedBandwidthBps,
capabilities.saveData,
);
return appendQuery(`${transcodeBase}${vodPath}`, {
variant,
bitrate: profile.bitrate,
max_width: profile.maxWidth,
max_height: profile.maxHeight,
});
} }
export function chooseRecordingPlayback({ export function chooseRecordingPlayback({
apiHost, apiHost,
config,
recordings, recordings,
preference, preference,
vodPath, vodPath,
@ -212,7 +227,6 @@ export function chooseRecordingPlayback({
const recordingsByVariant = groupRecordingsByVariant(recordings); const recordingsByVariant = groupRecordingsByVariant(recordings);
const mainRecordings = recordingsByVariant.main ?? []; const mainRecordings = recordingsByVariant.main ?? [];
const subRecordings = recordingsByVariant.sub ?? []; const subRecordings = recordingsByVariant.sub ?? [];
const transcodeAvailable = !!getTranscodeBaseUrl(apiHost, config);
const estimatedBandwidthBps = const estimatedBandwidthBps =
capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000); capabilities.estimatedBandwidthBps ?? (capabilities.saveData ? 1_000_000 : 6_000_000);
@ -251,7 +265,6 @@ export function chooseRecordingPlayback({
} }
if (preference === "sub" && candidates.sub.recordings.length > 0) { if (preference === "sub" && candidates.sub.recordings.length > 0) {
if (candidates.sub.playable) {
return { return {
mode: "direct", mode: "direct",
variant: "sub", variant: "sub",
@ -260,33 +273,6 @@ export function chooseRecordingPlayback({
}; };
} }
return {
mode: "transcoded",
variant: "sub",
url: buildTranscodeUrl(apiHost, config, vodPath, "sub", capabilities),
reason: "manual-sub-transcoded",
};
}
if (preference === "transcoded") {
const targetVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
if (!transcodeAvailable) {
return {
mode: "direct",
variant: targetVariant,
url: buildDirectUrl(apiHost, vodPath, targetVariant),
reason: "manual-transcoded-unavailable",
};
}
return {
mode: "transcoded",
variant: targetVariant,
url: buildTranscodeUrl(apiHost, config, vodPath, targetVariant, capabilities),
reason: "manual-transcoded",
};
}
if (preferDirect("main")) { if (preferDirect("main")) {
return { return {
mode: "direct", mode: "direct",
@ -305,20 +291,10 @@ export function chooseRecordingPlayback({
}; };
} }
const transcodeVariant = candidates.sub.recordings.length > 0 ? "sub" : "main";
if (!transcodeAvailable) {
return { return {
mode: "direct", mode: "direct",
variant: transcodeVariant, variant: "main",
url: buildDirectUrl(apiHost, vodPath, transcodeVariant), url: buildDirectUrl(apiHost, vodPath, "main"),
reason: "direct-fallback", reason: "direct-fallback",
}; };
}
return {
mode: "transcoded",
variant: transcodeVariant,
url: buildTranscodeUrl(apiHost, config, vodPath, transcodeVariant, capabilities),
reason: "transcode-fallback",
};
} }