Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
ed12058ffe
Merge f63938a90a into 814c497bef 2026-05-04 08:48:48 -03:00
11 changed files with 266 additions and 896 deletions

View File

@ -136,32 +136,90 @@ ffmpeg:
</TabItem>
</ConfigTabs>
### Configuring Intel GPU Stats
### Configuring Intel GPU Stats in Docker
Frigate reads Intel GPU utilization directly from the kernel's per-client DRM usage counters exposed at `/proc/<pid>/fdinfo/<fd>`. This requires:
Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. There are two options:
- Linux kernel **5.19 or newer** for the `i915` driver, or any release of the `xe` driver.
- Frigate running with permission to read other processes' fdinfo. Running as root inside the container (the default) satisfies this; non-root setups may need `CAP_SYS_PTRACE`.
1. Run the container as privileged.
2. Add the `CAP_PERFMON` capability (note: you might need to set the `perf_event_paranoid` low enough to allow access to the performance event system.)
No `intel_gpu_top` binary, `CAP_PERFMON`, privileged mode, or `perf_event_paranoid` tuning is required.
#### Run as privileged
#### Stats for SR-IOV or specific devices
This method works, but it gives more permissions to the container than are actually needed.
If the host has more than one Intel GPU (e.g. an iGPU plus a discrete GPU, or SR-IOV virtual functions), pin stats collection to a specific device by setting `intel_gpu_device` to either its PCI bus address or a DRM card/render-node path:
##### Docker Compose - Privileged
```yaml
services:
frigate:
...
image: ghcr.io/blakeblackshear/frigate:stable
# highlight-next-line
privileged: true
```
##### Docker Run CLI - Privileged
```bash {4}
docker run -d \
--name frigate \
...
--privileged \
ghcr.io/blakeblackshear/frigate:stable
```
#### CAP_PERFMON
Only recent versions of Docker support the `CAP_PERFMON` capability. You can test to see if yours supports it by running: `docker run --cap-add=CAP_PERFMON hello-world`
##### Docker Compose - CAP_PERFMON
```yaml {5,6}
services:
frigate:
...
image: ghcr.io/blakeblackshear/frigate:stable
cap_add:
- CAP_PERFMON
```
##### Docker Run CLI - CAP_PERFMON
```bash {4}
docker run -d \
--name frigate \
...
--cap-add=CAP_PERFMON \
ghcr.io/blakeblackshear/frigate:stable
```
#### perf_event_paranoid
_Note: This setting must be changed for the entire system._
For more information on the various values across different distributions, see https://askubuntu.com/questions/1400874/what-does-perf-paranoia-level-four-do.
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
#### Stats for SR-IOV or other devices
When using virtualized GPUs via SR-IOV, you need to specify the device path to use to gather stats from `intel_gpu_top`. This example may work for some systems using SR-IOV:
```yaml
telemetry:
stats:
intel_gpu_device: "0000:00:02.0"
intel_gpu_device: "sriov"
```
For other virtualized GPUs, try specifying the direct path to the device instead:
```yaml
telemetry:
stats:
intel_gpu_device: "/dev/dri/card1"
intel_gpu_device: "drm:/dev/dri/card0"
```
When passing a device path, make sure the device is also passed through to the container.
If you are passing in a device path, make sure you've passed the device through to the container.
## AMD-based CPUs

View File

@ -25,8 +25,8 @@ class StatsConfig(FrigateBaseModel):
)
intel_gpu_device: Optional[str] = Field(
default=None,
title="Intel GPU device",
description="PCI bus address or DRM device path (e.g. /dev/dri/card1) used to pin Intel GPU stats to a specific device when multiple are present.",
title="SR-IOV device",
description="Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats.",
)

View File

@ -7,6 +7,8 @@ from frigate.util.services import get_amd_gpu_stats, get_intel_gpu_stats
class TestGpuStats(unittest.TestCase):
def setUp(self):
self.amd_results = "Unknown Radeon card. <= R500 won't work, new cards might.\nDumping to -, line limit 1.\n1664070990.607556: bus 10, gpu 4.17%, ee 0.00%, vgt 0.00%, ta 0.00%, tc 0.00%, sx 0.00%, sh 0.00%, spi 0.83%, smx 0.00%, cr 0.00%, sc 0.00%, pa 0.00%, db 0.00%, cb 0.00%, vram 60.37% 294.04mb, gtt 0.33% 52.21mb, mclk 100.00% 1.800ghz, sclk 26.65% 0.533ghz\n"
self.intel_results = """{"period":{"duration":1.194033,"unit":"ms"},"frequency":{"requested":0.000000,"actual":0.000000,"unit":"MHz"},"interrupts":{"count":3349.991164,"unit":"irq/s"},"rc6":{"value":47.844741,"unit":"%"},"engines":{"Render/3D/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Blitter/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/0":{"busy":4.533124,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/1":{"busy":6.194385,"sema":0.000000,"wait":0.000000,"unit":"%"},"VideoEnhance/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"}}},{"period":{"duration":1.189291,"unit":"ms"},"frequency":{"requested":0.000000,"actual":0.000000,"unit":"MHz"},"interrupts":{"count":0.000000,"unit":"irq/s"},"rc6":{"value":100.000000,"unit":"%"},"engines":{"Render/3D/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Blitter/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/1":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"VideoEnhance/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"}}}"""
self.nvidia_results = "name, utilization.gpu [%], memory.used [MiB], memory.total [MiB]\nNVIDIA GeForce RTX 3050, 42 %, 5036 MiB, 8192 MiB\n"
@patch("subprocess.run")
def test_amd_gpu_stats(self, sp):
@ -17,76 +19,32 @@ class TestGpuStats(unittest.TestCase):
amd_stats = get_amd_gpu_stats()
assert amd_stats == {"gpu": "4.17%", "mem": "60.37%"}
@patch("frigate.util.services.time.sleep")
@patch("frigate.util.services.time.monotonic")
@patch("frigate.util.services._read_intel_drm_fdinfo")
def test_intel_gpu_stats_fdinfo(self, read_fdinfo, monotonic, sleep):
# 1 second of wall clock between snapshots
monotonic.side_effect = [0.0, 1.0]
# @patch("subprocess.run")
# def test_nvidia_gpu_stats(self, sp):
# process = MagicMock()
# process.returncode = 0
# process.stdout = self.nvidia_results
# sp.return_value = process
# nvidia_stats = get_nvidia_gpu_stats()
# assert nvidia_stats == {
# "name": "NVIDIA GeForce RTX 3050",
# "gpu": "42 %",
# "mem": "61.5 %",
# }
# Two i915 clients on the same iGPU. Engine values are cumulative ns.
# Deltas over the 1s window:
# client A (pid 100): render +200_000_000 (20%), video +500_000_000 (50%),
# video-enhance +100_000_000 (10%)
# client B (pid 200): compute +100_000_000 (10%)
# Engine totals → render 20, video 50, video-enhance 10, compute 10
# → compute = render + compute = 30
# → dec = video + video-enhance = 60
# → gpu = compute + dec = 90
snapshot_a = {
("0000:00:02.0", "1", "100"): {
"driver": "i915",
"pid": "100",
"engines": {
"render": (1_000_000_000, 0),
"video": (5_000_000_000, 0),
"video-enhance": (200_000_000, 0),
"compute": (0, 0),
},
},
("0000:00:02.0", "2", "200"): {
"driver": "i915",
"pid": "200",
"engines": {
"render": (0, 0),
"compute": (2_000_000_000, 0),
},
},
}
snapshot_b = {
("0000:00:02.0", "1", "100"): {
"driver": "i915",
"pid": "100",
"engines": {
"render": (1_200_000_000, 0),
"video": (5_500_000_000, 0),
"video-enhance": (300_000_000, 0),
"compute": (0, 0),
},
},
("0000:00:02.0", "2", "200"): {
"driver": "i915",
"pid": "200",
"engines": {
"render": (0, 0),
"compute": (2_100_000_000, 0),
},
},
}
read_fdinfo.side_effect = [snapshot_a, snapshot_b]
intel_stats = get_intel_gpu_stats(None)
sleep.assert_called_once()
@patch("subprocess.run")
def test_intel_gpu_stats(self, sp):
process = MagicMock()
process.returncode = 124
process.stdout = self.intel_results
sp.return_value = process
intel_stats = get_intel_gpu_stats(False)
# rc6 values: 47.844741 and 100.0 → avg 73.92 → gpu = 100 - 73.92 = 26.08%
# Render/3D/0: 0.0 and 0.0 → enc = 0.0%
# Video/0: 4.533124 and 0.0 → dec = 2.27%
assert intel_stats == {
"gpu": "90.0%",
"gpu": "26.08%",
"mem": "-%",
"compute": "30.0%",
"dec": "60.0%",
"clients": {"100": "80.0%", "200": "10.0%"},
"compute": "0.0%",
"dec": "2.27%",
}
@patch("frigate.util.services._read_intel_drm_fdinfo")
def test_intel_gpu_stats_no_clients(self, read_fdinfo):
read_fdinfo.return_value = {}
assert get_intel_gpu_stats(None) is None

View File

@ -264,214 +264,156 @@ def get_amd_gpu_stats() -> Optional[dict[str, str]]:
return results
_INTEL_FDINFO_SAMPLE_SECONDS = 1.0
def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, str]]:
"""Get stats using intel_gpu_top.
# Engines we track. Render/3D and Compute are pooled into "compute"; Video and
# VideoEnhance into "dec" (VideoEnhance is the post-process engine that handles
# VAAPI scaling/deinterlace/CSC, e.g. ffmpeg `-vf scale_vaapi=...`). The Copy
# (DMA blitter) engine is intentionally ignored — it represents transparent
# memory transfers, not user-visible GPU work.
# i915 fdinfo keys (cumulative ns) → logical engine name.
_I915_ENGINE_KEYS = {
"drm-engine-render": "render",
"drm-engine-video": "video",
"drm-engine-video-enhance": "video-enhance",
"drm-engine-compute": "compute",
}
# Xe fdinfo suffixes (cumulative cycles, paired with drm-total-cycles-*).
_XE_ENGINE_KEYS = {
"rcs": "render",
"vcs": "video",
"vecs": "video-enhance",
"ccs": "compute",
}
def _resolve_intel_gpu_pdev(device: Optional[str]) -> Optional[str]:
"""Map a configured GPU hint (/dev/dri/card1, renderD128, or a PCI bus
address) to its drm-pdev string so we can filter fdinfo entries to that
device. Returns None when no hint is supplied or it cannot be resolved."""
if not device:
return None
if re.match(r"^[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]$", device):
return device
name = os.path.basename(device.rstrip("/"))
try:
return os.path.basename(os.path.realpath(f"/sys/class/drm/{name}/device"))
except OSError:
return None
def _read_intel_drm_fdinfo(target_pdev: Optional[str]) -> dict:
"""Snapshot DRM fdinfo for every Intel client visible in /proc.
Returns a dict keyed by (pdev, drm-client-id, pid) so the same context
seen via multiple file descriptors on a single process collapses to one
entry.
Returns overall GPU usage derived from rc6 residency (idle time),
plus individual engine breakdowns:
- enc: Render/3D engine (compute/shader encoder, used by QSV)
- dec: Video engines (fixed-function codec, used by VAAPI)
"""
snapshot: dict = {}
def get_stats_manually(output: str) -> dict[str, str]:
"""Find global stats via regex when json fails to parse."""
reading = "".join(output)
results: dict[str, str] = {}
# rc6 residency for overall GPU usage
rc6_match = re.search(r'"rc6":\{"value":([\d.]+)', reading)
if rc6_match:
rc6_value = float(rc6_match.group(1))
results["gpu"] = f"{round(100.0 - rc6_value, 2)}%"
else:
results["gpu"] = "-%"
results["mem"] = "-%"
# Render/3D is the compute/encode engine
render = []
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[14:])
single = packet.get("busy", 0.0)
render.append(float(single))
if render:
results["compute"] = f"{round(sum(render) / len(render), 2)}%"
# Video engines are the fixed-function decode engines
video = []
for result in re.findall(r'"Video/\d":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[10:])
single = packet.get("busy", 0.0)
video.append(float(single))
if video:
results["dec"] = f"{round(sum(video) / len(video), 2)}%"
return results
intel_gpu_top_command = [
"timeout",
"0.5s",
"intel_gpu_top",
"-J",
"-o",
"-",
"-s",
"1000", # Intel changed this from seconds to milliseconds in 2024+ versions
]
if intel_gpu_device:
intel_gpu_top_command += ["-d", intel_gpu_device]
try:
proc_entries = os.listdir("/proc")
except OSError:
return snapshot
p = sp.run(
intel_gpu_top_command,
encoding="ascii",
capture_output=True,
)
except UnicodeDecodeError:
return None
for entry in proc_entries:
if not entry.isdigit():
continue
# timeout has a non-zero returncode when timeout is reached
if p.returncode != 124:
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
return None
else:
output = "".join(p.stdout.split())
fdinfo_dir = f"/proc/{entry}/fdinfo"
try:
fds = os.listdir(fdinfo_dir)
except (FileNotFoundError, PermissionError, NotADirectoryError, OSError):
continue
data = json.loads(f"[{output}]")
except json.JSONDecodeError:
return get_stats_manually(output)
for fd in fds:
try:
with open(f"{fdinfo_dir}/{fd}") as f:
content = f.read()
except (FileNotFoundError, PermissionError, OSError):
continue
results: dict[str, str] = {}
rc6_values = []
render_global = []
video_global = []
# per-client: {pid: [total_busy_per_sample, ...]}
client_usages: dict[str, list[float]] = {}
if "drm-driver" not in content:
continue
for block in data:
# rc6 residency: percentage of time GPU is idle
rc6 = block.get("rc6", {}).get("value")
if rc6 is not None:
rc6_values.append(float(rc6))
fields: dict[str, str] = {}
for line in content.splitlines():
key, sep, value = line.partition(":")
if sep:
fields[key.strip()] = value.strip()
global_engine = block.get("engines")
driver = fields.get("drm-driver")
if driver not in ("i915", "xe"):
continue
if global_engine:
render_frame = global_engine.get("Render/3D/0", {}).get("busy")
video_frame = global_engine.get("Video/0", {}).get("busy")
pdev = fields.get("drm-pdev", "")
if target_pdev and pdev != target_pdev:
continue
if render_frame is not None:
render_global.append(float(render_frame))
client_id = fields.get("drm-client-id")
if not client_id:
continue
if video_frame is not None:
video_global.append(float(video_frame))
key = (pdev, client_id, entry)
if key in snapshot:
continue
clients = block.get("clients", {})
engines: dict[str, tuple[int, int]] = {}
if clients:
for client_block in clients.values():
pid = client_block["pid"]
if driver == "i915":
for fkey, engine in _I915_ENGINE_KEYS.items():
raw = fields.get(fkey)
if not raw:
continue
try:
engines[engine] = (int(raw.split()[0]), 0)
except (ValueError, IndexError):
continue
else:
for suffix, engine in _XE_ENGINE_KEYS.items():
busy_raw = fields.get(f"drm-cycles-{suffix}")
total_raw = fields.get(f"drm-total-cycles-{suffix}")
if not (busy_raw and total_raw):
continue
try:
engines[engine] = (
int(busy_raw.split()[0]),
int(total_raw.split()[0]),
)
except (ValueError, IndexError):
continue
if pid not in client_usages:
client_usages[pid] = []
if not engines:
continue
# Sum all engine-class busy values for this client
total_busy = 0.0
for engine in client_block.get("engine-classes", {}).values():
busy = engine.get("busy")
if busy is not None:
total_busy += float(busy)
snapshot[key] = {"driver": driver, "pid": entry, "engines": engines}
client_usages[pid].append(total_busy)
return snapshot
# Overall GPU usage from rc6 (idle) residency
if rc6_values:
rc6_avg = sum(rc6_values) / len(rc6_values)
results["gpu"] = f"{round(100.0 - rc6_avg, 2)}%"
results["mem"] = "-%"
def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, Any]]:
"""Get stats by reading DRM fdinfo files.
# Compute: Render/3D engine (compute/shader workloads and QSV encode)
if render_global:
results["compute"] = f"{round(sum(render_global) / len(render_global), 2)}%"
Each DRM client FD exposes monotonic per-engine busy counters via
/proc/<pid>/fdinfo/<fd> (i915 since kernel 5.19, Xe since first release).
We sample twice and divide busy-time deltas by wall-clock to derive
utilization. Render/3D and Compute are pooled into "compute"; Video and
VideoEnhance into "dec". Overall "gpu" is the sum of those pools (clamped
to 100%).
"""
target_pdev = _resolve_intel_gpu_pdev(intel_gpu_device)
# Decoder: Video engine (fixed-function codec)
if video_global:
results["dec"] = f"{round(sum(video_global) / len(video_global), 2)}%"
snapshot_a = _read_intel_drm_fdinfo(target_pdev)
if not snapshot_a:
return None
# Per-client GPU usage (sum of all engines per process)
if client_usages:
results["clients"] = {}
start = time.monotonic()
time.sleep(_INTEL_FDINFO_SAMPLE_SECONDS)
elapsed_ns = (time.monotonic() - start) * 1e9
for pid, samples in client_usages.items():
if samples:
results["clients"][pid] = (
f"{round(sum(samples) / len(samples), 2)}%"
)
snapshot_b = _read_intel_drm_fdinfo(target_pdev)
if not snapshot_b or elapsed_ns <= 0:
return None
engine_pct: dict[str, float] = {
"render": 0.0,
"video": 0.0,
"video-enhance": 0.0,
"compute": 0.0,
}
pid_pct: dict[str, float] = {}
for key, data_b in snapshot_b.items():
data_a = snapshot_a.get(key)
if not data_a or data_a["driver"] != data_b["driver"]:
continue
client_total = 0.0
for engine, (busy_b, total_b) in data_b["engines"].items():
if engine not in engine_pct:
continue
busy_a, total_a = data_a["engines"].get(engine, (busy_b, total_b))
if data_b["driver"] == "i915":
delta = max(0, busy_b - busy_a)
pct = min(100.0, delta / elapsed_ns * 100.0)
else:
delta_busy = max(0, busy_b - busy_a)
delta_total = total_b - total_a
if delta_total <= 0:
continue
pct = min(100.0, delta_busy / delta_total * 100.0)
engine_pct[engine] += pct
client_total += pct
pid_pct[data_b["pid"]] = pid_pct.get(data_b["pid"], 0.0) + client_total
for engine in engine_pct:
engine_pct[engine] = min(100.0, engine_pct[engine])
compute_pct = min(100.0, engine_pct["render"] + engine_pct["compute"])
dec_pct = min(100.0, engine_pct["video"] + engine_pct["video-enhance"])
overall_pct = min(100.0, compute_pct + dec_pct)
results: dict[str, Any] = {
"gpu": f"{round(overall_pct, 2)}%",
"mem": "-%",
"compute": f"{round(compute_pct, 2)}%",
"dec": f"{round(dec_pct, 2)}%",
}
if pid_pct:
results["clients"] = {
pid: f"{round(min(100.0, pct), 2)}%" for pid, pct in pid_pct.items()
}
return results
return results
def get_openvino_npu_stats() -> Optional[dict[str, str]]:

View File

@ -485,10 +485,6 @@
"hwaccel_args": {
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
},
"max_concurrent": {
"label": "Maximum concurrent exports",
"description": "Maximum number of export jobs to process at the same time."
}
},
"preview": {

View File

@ -242,8 +242,8 @@
"description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)."
},
"intel_gpu_device": {
"label": "Intel GPU device",
"description": "PCI bus address or DRM device path (e.g. /dev/dri/card1) used to pin Intel GPU stats to a specific device when multiple are present."
"label": "SR-IOV device",
"description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats."
}
},
"version_check": {
@ -1000,10 +1000,6 @@
"hwaccel_args": {
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
},
"max_concurrent": {
"label": "Maximum concurrent exports",
"description": "Maximum number of export jobs to process at the same time."
}
},
"preview": {

View File

@ -20,18 +20,7 @@
"overriddenGlobal": "Overridden (Global)",
"overriddenGlobalTooltip": "This camera overrides global configuration settings in this section",
"overriddenBaseConfig": "Overridden (Base Config)",
"overriddenBaseConfigTooltip": "The {{profile}} profile overrides configuration settings in this section",
"overriddenInCameras": {
"label_one": "Overridden in {{count}} camera",
"label_other": "Overridden in {{count}} cameras",
"tooltip_one": "{{count}} camera overrides values in this section. Click to see details.",
"tooltip_other": "{{count}} cameras override values in this section. Click to see details.",
"heading_one": "This global section has fields that are overridden in {{count}} camera.",
"heading_other": "This global section has fields that are overridden in {{count}} cameras.",
"othersField_one": "{{count}} other",
"othersField_other": "{{count}} others",
"profilePrefix": "{{profile}} profile: {{fields}}"
}
"overriddenBaseConfigTooltip": "The {{profile}} profile overrides configuration settings in this section"
},
"menu": {
"general": "General",

View File

@ -25,7 +25,6 @@ import {
} from "./section-special-cases";
import { getSectionValidation } from "../section-validations";
import { useConfigOverride } from "@/hooks/use-config-override";
import { CameraOverridesBadge } from "./CameraOverridesBadge";
import { useSectionSchema } from "@/hooks/use-config-schema";
import type { FrigateConfig } from "@/types/frigateConfig";
import { Badge } from "@/components/ui/badge";
@ -1264,9 +1263,6 @@ export function ConfigSection({
</TooltipContent>
</Tooltip>
)}
{showOverrideIndicator && effectiveLevel === "global" && (
<CameraOverridesBadge sectionPath={sectionPath} />
)}
{hasChanges && (
<Badge variant="outline" className="text-xs">
{t("button.modified", {
@ -1338,9 +1334,6 @@ export function ConfigSection({
</TooltipContent>
</Tooltip>
)}
{showOverrideIndicator && effectiveLevel === "global" && (
<CameraOverridesBadge sectionPath={sectionPath} />
)}
{hasChanges && (
<Badge
variant="secondary"

View File

@ -1,303 +0,0 @@
import useSWR from "swr";
import { useMemo } from "react";
import { useTranslation } from "react-i18next";
import { Link } from "react-router-dom";
import { LuChevronDown } from "react-icons/lu";
import { Badge } from "@/components/ui/badge";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import {
CameraOverrideEntry,
FieldDelta,
useCamerasOverridingSection,
} from "@/hooks/use-config-override";
import type { FrigateConfig } from "@/types/frigateConfig";
import type { ProfilesApiResponse } from "@/types/profile";
import { humanizeKey } from "@/components/config-form/theme/utils/i18n";
import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name";
import { formatList } from "@/utils/stringUtil";
import { getSectionConfig } from "@/utils/configUtil";
const CAMERA_PAGE_BY_SECTION: Record<string, string> = {
detect: "cameraDetect",
ffmpeg: "cameraFfmpeg",
record: "cameraRecording",
snapshots: "cameraSnapshots",
motion: "cameraMotion",
objects: "cameraObjects",
review: "cameraReview",
audio: "cameraAudioEvents",
audio_transcription: "cameraAudioTranscription",
notifications: "cameraNotifications",
live: "cameraLivePlayback",
birdseye: "cameraBirdseye",
face_recognition: "cameraFaceRecognition",
lpr: "cameraLpr",
timestamp_style: "cameraTimestampStyle",
};
const MAX_FIELDS_PER_CAMERA = 5;
/**
* Enrichment sections where the cross-camera override badge should be
* suppressed because they're effectively global-only (or per-camera
* configuration there isn't a useful affordance to surface here).
* Face recognition and LPR are intentionally omitted so the badge does show
* on those enrichment pages.
*/
const SECTIONS_WITHOUT_OVERRIDE_BADGE = new Set([
"semantic_search",
"genai",
"classification",
"audio_transcription",
]);
/**
* Match a delta path against a hidden-field pattern. Supports literal prefixes
* (so a hidden field "streams" also hides "streams.foo.bar") and `*` wildcards
* matching exactly one path segment (e.g. "filters.*.mask").
*/
function pathMatchesHiddenPattern(path: string, pattern: string): boolean {
if (!pattern) return false;
if (!pattern.includes("*")) {
return path === pattern || path.startsWith(`${pattern}.`);
}
const patternSegments = pattern.split(".");
const pathSegments = path.split(".");
if (pathSegments.length < patternSegments.length) return false;
for (let i = 0; i < patternSegments.length; i += 1) {
if (patternSegments[i] === "*") continue;
if (patternSegments[i] !== pathSegments[i]) return false;
}
return true;
}
type CameraEntryProps = {
sectionPath: string;
entry: CameraOverrideEntry;
cameraPage?: string;
};
type SourceGroup = {
/** undefined → camera-level; string → profile name */
profileName: string | undefined;
deltas: FieldDelta[];
};
function groupDeltasBySource(deltas: FieldDelta[]): SourceGroup[] {
const cameraDeltas: FieldDelta[] = [];
const byProfile = new Map<string, FieldDelta[]>();
for (const delta of deltas) {
if (delta.profileName) {
const arr = byProfile.get(delta.profileName) ?? [];
arr.push(delta);
byProfile.set(delta.profileName, arr);
} else {
cameraDeltas.push(delta);
}
}
const groups: SourceGroup[] = [];
if (cameraDeltas.length > 0) {
groups.push({ profileName: undefined, deltas: cameraDeltas });
}
for (const [profileName, group] of byProfile) {
groups.push({ profileName, deltas: group });
}
return groups;
}
function CameraEntry({ sectionPath, entry, cameraPage }: CameraEntryProps) {
const { t, i18n } = useTranslation([
"config/global",
"views/settings",
"objects",
]);
const friendlyName = useCameraFriendlyName(entry.camera);
const { data: profilesData } = useSWR<ProfilesApiResponse>("profiles");
const profileFriendlyNames = useMemo(() => {
const map = new Map<string, string>();
profilesData?.profiles?.forEach((p) => map.set(p.name, p.friendly_name));
return map;
}, [profilesData]);
const fieldLabel = (fieldPath: string) => {
if (!fieldPath) {
const sectionKey = `${sectionPath}.label`;
return i18n.exists(sectionKey, { ns: "config/global" })
? t(sectionKey, { ns: "config/global" })
: humanizeKey(sectionPath);
}
const segments = fieldPath.split(".");
// Most specific: try the full nested path
const fullKey = `${sectionPath}.${fieldPath}.label`;
if (i18n.exists(fullKey, { ns: "config/global" })) {
return t(fullKey, { ns: "config/global" });
}
// Try dropping each intermediate segment in turn — those are typically
// user-defined dict keys (object class names, zone names, etc.) that
// don't have their own label entries. Prepend the dropped segment as
// context to disambiguate (e.g. "Person · Minimum object area").
for (let i = 0; i < segments.length; i++) {
const reduced = [...segments.slice(0, i), ...segments.slice(i + 1)].join(
".",
);
if (!reduced) continue;
const reducedKey = `${sectionPath}.${reduced}.label`;
if (i18n.exists(reducedKey, { ns: "config/global" })) {
const resolvedLabel = t(reducedKey, { ns: "config/global" });
const dropped = segments[i];
// Object class names ("person", "car", "fox") have translations in
// the `objects` namespace; fall back to humanizing the raw key for
// anything that isn't a known label.
const droppedLabel = i18n.exists(dropped, { ns: "objects" })
? t(dropped, { ns: "objects" })
: humanizeKey(dropped);
return `${droppedLabel} · ${resolvedLabel}`;
}
}
// Last resort: humanize the leaf segment
return humanizeKey(segments[segments.length - 1]);
};
const formatDeltas = (deltas: FieldDelta[]) => {
const visibleLabels = deltas
.slice(0, MAX_FIELDS_PER_CAMERA)
.map((delta) => fieldLabel(delta.fieldPath));
const hiddenCount = deltas.length - visibleLabels.length;
const labelsForList =
hiddenCount > 0
? [
...visibleLabels,
t("button.overriddenInCameras.othersField", {
ns: "views/settings",
count: hiddenCount,
}),
]
: visibleLabels;
return formatList(labelsForList);
};
const groups = groupDeltasBySource(entry.fieldDeltas);
return (
<div className="flex flex-col gap-0.5 text-xs">
{cameraPage ? (
<Link
to={`/settings?page=${cameraPage}&camera=${encodeURIComponent(entry.camera)}`}
className="font-medium hover:underline"
>
{friendlyName}
</Link>
) : (
<span className="font-medium">{friendlyName}</span>
)}
{groups.map((group) => (
<span
key={group.profileName ?? "__camera__"}
className="ml-2 text-muted-foreground"
>
{group.profileName
? t("button.overriddenInCameras.profilePrefix", {
ns: "views/settings",
profile:
profileFriendlyNames.get(group.profileName) ??
group.profileName,
fields: formatDeltas(group.deltas),
})
: formatDeltas(group.deltas)}
</span>
))}
</div>
);
}
type Props = {
sectionPath: string;
className?: string;
};
export function CameraOverridesBadge({ sectionPath, className }: Props) {
const { data: config } = useSWR<FrigateConfig>("config");
const { t } = useTranslation(["views/settings"]);
const rawEntries = useCamerasOverridingSection(config, sectionPath);
const entries = useMemo(() => {
const hiddenFields =
getSectionConfig(sectionPath, "global").hiddenFields ?? [];
if (hiddenFields.length === 0) return rawEntries;
return rawEntries
.map((entry) => ({
...entry,
fieldDeltas: entry.fieldDeltas.filter(
(delta) =>
!hiddenFields.some((pattern) =>
pathMatchesHiddenPattern(delta.fieldPath, pattern),
),
),
}))
.filter((entry) => entry.fieldDeltas.length > 0);
}, [rawEntries, sectionPath]);
if (SECTIONS_WITHOUT_OVERRIDE_BADGE.has(sectionPath)) {
return null;
}
if (entries.length === 0) {
return null;
}
const cameraPage = CAMERA_PAGE_BY_SECTION[sectionPath];
const count = entries.length;
return (
<Popover>
<PopoverTrigger asChild>
<Badge
variant="secondary"
className={`cursor-pointer border-2 border-selected text-xs text-primary-variant ${className ?? ""}`}
aria-label={t("button.overriddenInCameras.tooltip", {
ns: "views/settings",
count: count,
})}
>
<span>
{t("button.overriddenInCameras.label", {
ns: "views/settings",
count: count,
})}
</span>
<LuChevronDown className="ml-1 size-3" />
</Badge>
</PopoverTrigger>
<PopoverContent align="start" className="w-80 max-w-[90vw] pr-0">
<div className="flex flex-col gap-3">
<div className="pr-4 text-xs text-primary-variant">
{t("button.overriddenInCameras.heading", {
ns: "views/settings",
count: count,
})}
</div>
<div className="scrollbar-container flex max-h-[40dvh] flex-col gap-2 overflow-y-auto pr-4">
{entries.map((entry) => (
<CameraEntry
key={entry.camera}
sectionPath={sectionPath}
entry={entry}
cameraPage={cameraPage}
/>
))}
</div>
</div>
</PopoverContent>
</Popover>
);
}

View File

@ -202,49 +202,6 @@ export function useConfigOverride({
}, [config, cameraName, sectionPath, compareFields]);
}
/**
* Sections that can be overridden per-camera, with optional compareFields
* filters that scope the override comparison to a subset of fields.
*/
export const OVERRIDABLE_SECTIONS: ReadonlyArray<{
key: string;
compareFields?: string[];
}> = [
{ key: "detect" },
{ key: "record" },
{ key: "snapshots" },
{ key: "motion" },
{ key: "objects" },
{ key: "review" },
{ key: "audio" },
{ key: "notifications" },
{ key: "live" },
{ key: "timestamp_style" },
{
key: "audio_transcription",
compareFields: ["enabled", "live_enabled"],
},
{ key: "birdseye", compareFields: ["enabled", "mode"] },
{ key: "face_recognition", compareFields: ["enabled", "min_area"] },
{
key: "ffmpeg",
compareFields: [
"path",
"global_args",
"hwaccel_args",
"input_args",
"output_args",
"retry_interval",
"apple_compatibility",
"gpu",
],
},
{
key: "lpr",
compareFields: ["enabled", "min_area", "enhancement"],
},
];
/**
* Hook to get all overridden fields for a camera
*/
@ -264,7 +221,47 @@ export function useAllCameraOverrides(
const overriddenSections: string[] = [];
for (const { key, compareFields } of OVERRIDABLE_SECTIONS) {
// Check each section that can be overridden
const sectionsToCheck: Array<{
key: string;
compareFields?: string[];
}> = [
{ key: "detect" },
{ key: "record" },
{ key: "snapshots" },
{ key: "motion" },
{ key: "objects" },
{ key: "review" },
{ key: "audio" },
{ key: "notifications" },
{ key: "live" },
{ key: "timestamp_style" },
{
key: "audio_transcription",
compareFields: ["enabled", "live_enabled"],
},
{ key: "birdseye", compareFields: ["enabled", "mode"] },
{ key: "face_recognition", compareFields: ["enabled", "min_area"] },
{
key: "ffmpeg",
compareFields: [
"path",
"global_args",
"hwaccel_args",
"input_args",
"output_args",
"retry_interval",
"apple_compatibility",
"gpu",
],
},
{
key: "lpr",
compareFields: ["enabled", "min_area", "enhancement"],
},
];
for (const { key, compareFields } of sectionsToCheck) {
const globalValue = normalizeConfigValue(get(config, key));
const cameraValue = normalizeConfigValue(
getBaseCameraSectionValue(config, cameraName, key),
@ -289,252 +286,3 @@ export function useAllCameraOverrides(
return overriddenSections;
}, [config, cameraName]);
}
export interface FieldDelta {
/** Path relative to the section (e.g. "genai.enabled") */
fieldPath: string;
globalValue: unknown;
cameraValue: unknown;
/** Profile name when the override originates from a profile; undefined for camera-level overrides */
profileName?: string;
}
export interface CameraOverrideEntry {
camera: string;
fieldDeltas: FieldDelta[];
}
/**
* Collect leaf-level field differences between a global section value
* and a camera section value. When compareFields is provided, only those
* paths are compared; otherwise the objects are walked recursively.
*/
function collectFieldDeltas(
globalValue: JsonValue,
cameraValue: JsonValue,
compareFields?: string[],
pathPrefix = "",
): FieldDelta[] {
if (compareFields) {
if (compareFields.length === 0) {
return [];
}
const deltas: FieldDelta[] = [];
for (const path of compareFields) {
const g = get(globalValue, path);
const c = get(cameraValue, path);
if (!isEqual(g, c)) {
deltas.push({ fieldPath: path, globalValue: g, cameraValue: c });
}
}
return deltas;
}
if (isJsonObject(globalValue) && isJsonObject(cameraValue)) {
const deltas: FieldDelta[] = [];
const keys = new Set([
...Object.keys(globalValue),
...Object.keys(cameraValue),
]);
for (const key of keys) {
const g = (globalValue as JsonObject)[key];
const c = (cameraValue as JsonObject)[key];
if (isEqual(g, c)) continue;
const childPath = pathPrefix ? `${pathPrefix}.${key}` : key;
if (isJsonObject(g) && isJsonObject(c)) {
deltas.push(...collectFieldDeltas(g, c, undefined, childPath));
} else {
deltas.push({ fieldPath: childPath, globalValue: g, cameraValue: c });
}
}
return deltas;
}
if (!isEqual(globalValue, cameraValue)) {
return [{ fieldPath: pathPrefix, globalValue, cameraValue }];
}
return [];
}
/**
* Walk a partial config object and return the dot-paths of every leaf value
* (primitive or array) actually defined on it. Used to limit profile-vs-global
* diffs to keys the profile actually sets, avoiding false "undefined" deltas
* for fields the profile leaves unspecified.
*/
function collectDefinedLeafPaths(value: JsonValue, prefix = ""): string[] {
if (!isJsonObject(value)) {
return prefix ? [prefix] : [];
}
const paths: string[] = [];
for (const [key, val] of Object.entries(value as JsonObject)) {
const childPath = prefix ? `${prefix}.${key}` : key;
if (isJsonObject(val)) {
paths.push(...collectDefinedLeafPaths(val as JsonValue, childPath));
} else {
paths.push(childPath);
}
}
return paths;
}
function isPathAllowed(path: string, compareFields?: string[]): boolean {
if (!compareFields) return true;
return compareFields.some(
(allowed) => path === allowed || path.startsWith(`${allowed}.`),
);
}
/**
* Some Frigate sections (notably `motion`) are dumped by the backend with
* `exclude_unset=True`, so when the user hasn't explicitly written the section
* in their global YAML the API returns null even though every camera still
* gets defaults applied at runtime. To still detect cross-camera differences
* in those sections we synthesize a baseline by taking the modal (most common)
* value at each leaf path across cameras cameras whose value diverges from
* the modal are treated as overriding.
*/
function deriveSyntheticGlobalValue(
cameraSectionValues: JsonValue[],
compareFields?: string[],
): JsonObject {
const cameras = cameraSectionValues.filter(isJsonObject) as JsonObject[];
if (cameras.length === 0) return {};
const allPaths = new Set<string>();
for (const cam of cameras) {
for (const path of collectDefinedLeafPaths(cam as JsonValue)) {
if (!isPathAllowed(path, compareFields)) continue;
allPaths.add(path);
}
}
const baseline: JsonObject = {};
for (const path of allPaths) {
const counts = new Map<string, { value: unknown; count: number }>();
for (const cam of cameras) {
const v = get(cam, path);
const key = JSON.stringify(v ?? null);
const existing = counts.get(key);
if (existing) {
existing.count += 1;
} else {
counts.set(key, { value: v, count: 1 });
}
}
let modal: { value: unknown; count: number } | undefined;
for (const entry of counts.values()) {
if (!modal || entry.count > modal.count) modal = entry;
}
if (modal) {
set(baseline, path, modal.value);
}
}
return baseline;
}
/**
* Paths that are intentionally hidden from the cross-camera override summary
* because they're inherently per-camera (mask polygons, zone definitions) and
* would otherwise dominate the popover with noise. Excludes any path where
* `mask` appears as a path segment, so nested keys under a mask dict (e.g.
* `mask.global_object_mask_1.coordinates`) are also filtered.
*/
function isCrossCameraIgnoredPath(path: string): boolean {
if (!path) return false;
return path.split(".").includes("mask");
}
/**
* Hook to find every camera that overrides a given global section. Returns
* one entry per overriding camera with the specific field-level deltas.
* Considers both the camera's own (pre-profile) section value and any of its
* defined profiles, so a field overridden only inside a profile still surfaces.
*
* @example
* ```tsx
* const entries = useCamerasOverridingSection(config, "review");
* // [{ camera: "front_door", fieldDeltas: [{ fieldPath: "genai.enabled", ... }] }]
* ```
*/
export function useCamerasOverridingSection(
config: FrigateConfig | undefined,
sectionPath: string,
): CameraOverrideEntry[] {
return useMemo(() => {
if (!config?.cameras || !sectionPath) {
return [];
}
const sectionMeta = OVERRIDABLE_SECTIONS.find((s) => s.key === sectionPath);
const compareFields = sectionMeta?.compareFields;
const cameraNames = Object.keys(config.cameras);
const cameraSectionValues = cameraNames.map((name) =>
normalizeConfigValue(
getBaseCameraSectionValue(config, name, sectionPath),
),
);
const rawGlobalValue = get(config, sectionPath);
const globalValue: JsonValue =
rawGlobalValue == null
? deriveSyntheticGlobalValue(cameraSectionValues, compareFields)
: normalizeConfigValue(rawGlobalValue);
const entries: CameraOverrideEntry[] = [];
for (let idx = 0; idx < cameraNames.length; idx += 1) {
const cameraName = cameraNames[idx];
const cameraConfig = config.cameras[cameraName];
const deltasByPath = new Map<string, FieldDelta>();
// 1. Camera-level overrides (uses base_config when a profile is active)
const cameraValue = cameraSectionValues[idx];
for (const delta of collectFieldDeltas(
globalValue,
cameraValue,
compareFields,
)) {
if (isCrossCameraIgnoredPath(delta.fieldPath)) continue;
deltasByPath.set(delta.fieldPath, delta);
}
// 2. Profile-level overrides — diff only the paths each profile actually
// defines, so unspecified-in-profile fields don't register as deltas.
const profiles = cameraConfig?.profiles ?? {};
for (const profileName of Object.keys(profiles)) {
const profileSection = (
profiles[profileName] as Record<string, unknown> | undefined
)?.[sectionPath];
if (profileSection === undefined) continue;
const normalizedProfile = normalizeConfigValue(
profileSection as JsonValue,
);
for (const path of collectDefinedLeafPaths(normalizedProfile)) {
if (deltasByPath.has(path)) continue;
if (isCrossCameraIgnoredPath(path)) continue;
if (!isPathAllowed(path, compareFields)) continue;
const g = get(globalValue, path);
const p = get(normalizedProfile, path);
if (!isEqual(g, p)) {
deltasByPath.set(path, {
fieldPath: path,
globalValue: g,
cameraValue: p,
profileName,
});
}
}
}
if (deltasByPath.size > 0) {
entries.push({
camera: cameraName,
fieldDeltas: Array.from(deltasByPath.values()),
});
}
}
return entries;
}, [config, sectionPath]);
}

View File

@ -2,7 +2,6 @@ import { useCallback, useMemo, useState } from "react";
import { useTranslation } from "react-i18next";
import type { SectionConfig } from "@/components/config-form/sections";
import { ConfigSectionTemplate } from "@/components/config-form/sections";
import { CameraOverridesBadge } from "@/components/config-form/sections/CameraOverridesBadge";
import type { PolygonType } from "@/types/canvas";
import { Badge } from "@/components/ui/badge";
import {
@ -168,9 +167,6 @@ export function SingleSectionPage({
</div>
{/* Desktop: badge inline next to title */}
<div className="hidden shrink-0 sm:flex sm:flex-wrap sm:items-center sm:gap-2">
{level === "global" && showOverrideIndicator && (
<CameraOverridesBadge sectionPath={sectionKey} />
)}
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (
@ -228,9 +224,6 @@ export function SingleSectionPage({
</div>
{/* Mobile: badge below title/description */}
<div className="flex flex-wrap items-center gap-2 sm:hidden">
{level === "global" && showOverrideIndicator && (
<CameraOverridesBadge sectionPath={sectionKey} />
)}
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (