Compare commits

...

12 Commits

Author SHA1 Message Date
mathieu-d
8023c5c385
Merge 0f3dd097ec into f448b259a2 2026-05-04 20:07:48 +02:00
Josh Hawkins
f448b259a2
Settings UI improvements (#23109)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
* use badge with popover to show which cameras override each global config section

* don't use shorthand

* use label i18n
2026-05-04 09:50:00 -06:00
Nicolas Mowen
ef9d7e07b7
Rewrite intel stats (#23108)
* Rewrite intel GPU stats to use file descriptors instead of intel_gpu_top, leading to significantly better API for interaction and more accurate results

* Update tests

* Update docs

* Adjust approach

* Update strings
2026-05-04 10:36:32 -05:00
Josh Hawkins
814c497bef
Use Job infrastructure for Debug Replay (#23099)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
* use ReplayState enum

* extract shared ffmpeg progress helper

* make start call non-blocking with worker thread

* expose replay state on status endpoint and return 202 from start

* cancel in-flight ffmpeg when stop is called during preparation

* add replay i18n strings for preparing and error states

* show status in replay UI

* navigate immediately on 202 from debug replay menus and dialog

* remove unused

* simplify to use Job infrastructure

* tests

* cleanup and tweaks

* fetch schema

* update api spec

* formatting

* fix e2e test

* mypy

* clean up

* formatting

* fix

* fix test

* don't try to show camera image until status reports ready

* simplify loading logic

* fix race in latest_frame on debug replay shutdown

* remove toast when successfully stopping

it gets hidden almost immediately
2026-05-03 14:54:20 -06:00
Josh Hawkins
5bc15d4aa9
chapter and thumbnail fixes (#23100)
- Skip null end_time when building export chapter metadata
- Use plain seconds for export thumbnail ffmpeg seek
2026-05-03 13:25:53 -06:00
Josh Hawkins
7ad233ef15
fix malformed svg from breaking docs build (#23102) 2026-05-03 13:21:22 -06:00
GuoQing Liu
882b3a8ffd
docs: add docker compose generator (#22956)
* docs: add docker compose generator

* docs: add more icon support

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Rename heading from 'Generic Hardware Acceleration' to 'Generic Hardware Devices'

* Remove port 5000 configuration for security reasons

Removed unauthenticated Web UI port 5000 from configuration due to security risks.

* docs: remove 5000 port tips

* docs: improve NVIDIA GPU count input

* docs: add docker compose tabs

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/components/OtherOptions.tsx

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/components/StoragePaths.tsx

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/components/StoragePaths.tsx

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* Update docs/src/components/DockerComposeGenerator/config/config.yaml

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

* docs: Adjust the position of the RTSP password variable option

* docs: timezone change to select

* docs: add hailo and memryX mx3 driver tips

* docs: RTSP password is optional

* docs: fix select style

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2026-05-03 13:56:51 -05:00
matieu-d
0f3dd097ec Prepare for pull request. Remove specific configurations 2026-04-17 22:25:46 +02:00
matieu-d
2a4d7e4766 Prepare for pull request. Remove specific configurations 2026-04-14 23:14:31 +02:00
matieu-d
46415ffeb5 Add Hailo-10H detector configuration to global.json 2026-04-14 22:54:58 +02:00
matieu-d
e35ab0b8a1 Add support of temperature reading for hailo 10H 2026-04-14 22:54:58 +02:00
matieu-d
837373547d H10 support patch 2026-04-14 22:54:58 +02:00
57 changed files with 5275 additions and 720 deletions

5
.gitignore vendored
View File

@ -22,3 +22,8 @@ core
!/web/**/*.ts
.idea/*
.ipynb_checkpoints
# Auto-generated Docker Compose Generator config files
docs/src/components/DockerComposeGenerator/config/devices.ts
docs/src/components/DockerComposeGenerator/config/hardware.ts
docs/src/components/DockerComposeGenerator/config/ports.ts

View File

@ -21,6 +21,13 @@ local: version
--tag frigate:latest \
--load
localh10: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg HAILORT_VERSION=5.1.1 \
--build-arg HAILORT_GIT_REPO=mathieu-d/hailort \
--tag frigate:latest \
--load
debug: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg DEBUG=true \

View File

@ -12,6 +12,11 @@ services:
build:
context: .
dockerfile: docker/main/Dockerfile
# Use args to specify hailort version and location
# args:
# HAILORT_VERSION: "5.1.1"
# HAILORT_GIT_REPO: "mathieu-d/hailort"
# Use target devcontainer-trt for TensorRT dev
target: devcontainer
cache_from:
@ -29,6 +34,7 @@ services:
# devices:
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
volumes:
- .:/workspace/frigate:cached
- ./web/dist:/opt/frigate/web:cached

View File

@ -0,0 +1,7 @@
#!/bin/bash
# Update package list and install hailo driver version 5.1.1 for Hailo-10H
sudo apt update
sudo apt install -y hailo-h10-all=5.1.1

View File

@ -157,6 +157,8 @@ FROM base AS wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
ARG DEBUG=false
ARG HAILORT_VERSION=4.21.0
ARG HAILORT_GIT_REPO=frigate-nvr/hailort
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \

View File

@ -2,13 +2,11 @@
set -euxo pipefail
hailo_version="4.21.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64"
fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
wget -qO- "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-${HAILORT_VERSION}-cp311-cp311-linux_${arch}.whl"

View File

@ -136,90 +136,32 @@ ffmpeg:
</TabItem>
</ConfigTabs>
### Configuring Intel GPU Stats in Docker
### Configuring Intel GPU Stats
Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. There are two options:
Frigate reads Intel GPU utilization directly from the kernel's per-client DRM usage counters exposed at `/proc/<pid>/fdinfo/<fd>`. This requires:
1. Run the container as privileged.
2. Add the `CAP_PERFMON` capability (note: you might need to set the `perf_event_paranoid` low enough to allow access to the performance event system.)
- Linux kernel **5.19 or newer** for the `i915` driver, or any release of the `xe` driver.
- Frigate running with permission to read other processes' fdinfo. Running as root inside the container (the default) satisfies this; non-root setups may need `CAP_SYS_PTRACE`.
#### Run as privileged
No `intel_gpu_top` binary, `CAP_PERFMON`, privileged mode, or `perf_event_paranoid` tuning is required.
This method works, but it gives more permissions to the container than are actually needed.
#### Stats for SR-IOV or specific devices
##### Docker Compose - Privileged
```yaml
services:
frigate:
...
image: ghcr.io/blakeblackshear/frigate:stable
# highlight-next-line
privileged: true
```
##### Docker Run CLI - Privileged
```bash {4}
docker run -d \
--name frigate \
...
--privileged \
ghcr.io/blakeblackshear/frigate:stable
```
#### CAP_PERFMON
Only recent versions of Docker support the `CAP_PERFMON` capability. You can test to see if yours supports it by running: `docker run --cap-add=CAP_PERFMON hello-world`
##### Docker Compose - CAP_PERFMON
```yaml {5,6}
services:
frigate:
...
image: ghcr.io/blakeblackshear/frigate:stable
cap_add:
- CAP_PERFMON
```
##### Docker Run CLI - CAP_PERFMON
```bash {4}
docker run -d \
--name frigate \
...
--cap-add=CAP_PERFMON \
ghcr.io/blakeblackshear/frigate:stable
```
#### perf_event_paranoid
_Note: This setting must be changed for the entire system._
For more information on the various values across different distributions, see https://askubuntu.com/questions/1400874/what-does-perf-paranoia-level-four-do.
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
#### Stats for SR-IOV or other devices
When using virtualized GPUs via SR-IOV, you need to specify the device path to use to gather stats from `intel_gpu_top`. This example may work for some systems using SR-IOV:
If the host has more than one Intel GPU (e.g. an iGPU plus a discrete GPU, or SR-IOV virtual functions), pin stats collection to a specific device by setting `intel_gpu_device` to either its PCI bus address or a DRM card/render-node path:
```yaml
telemetry:
stats:
intel_gpu_device: "sriov"
intel_gpu_device: "0000:00:02.0"
```
For other virtualized GPUs, try specifying the direct path to the device instead:
```yaml
telemetry:
stats:
intel_gpu_device: "drm:/dev/dri/card0"
intel_gpu_device: "/dev/dri/card1"
```
If you are passing in a device path, make sure you've passed the device through to the container.
When passing a device path, make sure the device is also passed through to the container.
## AMD-based CPUs

View File

@ -4,6 +4,9 @@ title: Installation
---
import ShmCalculator from '@site/src/components/ShmCalculator'
import DockerComposeGenerator from '@site/src/components/DockerComposeGenerator'
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant App](https://www.home-assistant.io/apps/). Note that the Home Assistant App is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant App.
@ -474,6 +477,16 @@ Finally, configure [hardware object detection](/configuration/object_detectors#a
Running through Docker with Docker Compose is the recommended install method.
<Tabs>
<TabItem value="domestic" label="Docker Compose Generator" default>
Generate a Frigate Docker Compose configuration based on your hardware and requirements.
<DockerComposeGenerator/>
</TabItem>
<TabItem value="original" label="Example Docker Compose File">
```yaml
services:
frigate:
@ -507,6 +520,10 @@ services:
environment:
FRIGATE_RTSP_PASSWORD: "password"
```
</TabItem>
</Tabs>
**Docker CLI**
If you can't use Docker Compose, you can run the container with something similar to this:

View File

@ -14,9 +14,11 @@
"@docusaurus/theme-mermaid": "^3.7.0",
"@inkeep/docusaurus": "^2.0.16",
"@mdx-js/react": "^3.1.0",
"@types/js-yaml": "^4.0.9",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.5.1",
"docusaurus-theme-openapi-docs": "^4.5.1",
"js-yaml": "^4.1.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
@ -5747,6 +5749,11 @@
"@types/istanbul-lib-report": "*"
}
},
"node_modules/@types/js-yaml": {
"version": "4.0.9",
"resolved": "https://mirrors.tencent.com/npm/@types/js-yaml/-/js-yaml-4.0.9.tgz",
"integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg=="
},
"node_modules/@types/json-schema": {
"version": "7.0.15",
"resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
@ -12883,7 +12890,7 @@
},
"node_modules/js-yaml": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
"resolved": "https://mirrors.tencent.com/npm/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"license": "MIT",
"dependencies": {

View File

@ -3,9 +3,10 @@
"version": "0.0.0",
"private": true,
"scripts": {
"build:config": "node scripts/build-config.mjs",
"docusaurus": "docusaurus",
"start": "npm run regen-docs && docusaurus start --host 0.0.0.0",
"build": "npm run regen-docs && docusaurus build",
"start": "npm run build:config && npm run regen-docs && docusaurus start --host 0.0.0.0",
"build": "npm run build:config && npm run regen-docs && docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
@ -23,9 +24,11 @@
"@docusaurus/theme-mermaid": "^3.7.0",
"@inkeep/docusaurus": "^2.0.16",
"@mdx-js/react": "^3.1.0",
"@types/js-yaml": "^4.0.9",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.5.1",
"docusaurus-theme-openapi-docs": "^4.5.1",
"js-yaml": "^4.1.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.3.1",

View File

@ -0,0 +1,64 @@
#!/usr/bin/env node
/**
* Build script: reads config.yaml and generates TypeScript files
* for the Docker Compose Generator.
*
* Usage: node scripts/build-config.mjs
*/
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import yaml from "js-yaml";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const CONFIG_DIR = path.resolve(__dirname, "../src/components/DockerComposeGenerator/config");
const YAML_PATH = path.join(CONFIG_DIR, "config.yaml");
// Read & parse YAML
const raw = fs.readFileSync(YAML_PATH, "utf8");
const config = yaml.load(raw);
if (!config.devices || !config.hardware || !config.ports) {
console.error("config.yaml must contain 'devices', 'hardware', and 'ports' sections.");
process.exit(1);
}
/**
* Generate a .ts file from a section of the YAML config.
*/
function generateTsFile(sectionName, items, typeName, varName, mapVarName, yamlFilename) {
const jsonItems = JSON.stringify(items, null, 2);
// Indent JSON to fit inside the array literal
const indented = jsonItems
.split("\n")
.map((line, i) => (i === 0 ? line : " " + line))
.join("\n");
const content = `/**
* AUTO-GENERATED FILE do not edit directly.
* Source: ${yamlFilename}
* To update, edit the YAML file and run: npm run build:config
*/
import type { ${typeName} } from "./types";
export const ${varName}: ${typeName}[] = ${indented};
/** Lookup map for quick access by ID */
export const ${mapVarName}: Map<string, ${typeName}> = new Map(${varName}.map((item) => [item.id, item]));
`;
const outPath = path.join(CONFIG_DIR, `${sectionName}.ts`);
fs.writeFileSync(outPath, content, "utf8");
console.log(` ✓ Generated ${sectionName}.ts (${items.length} items)`);
}
console.log("Building config from config.yaml...");
generateTsFile("devices", config.devices, "DeviceConfig", "devices", "deviceMap", "config.yaml");
generateTsFile("hardware", config.hardware, "HardwareOption", "hardwareOptions", "hardwareMap", "config.yaml");
generateTsFile("ports", config.ports, "PortConfig", "ports", "portMap", "config.yaml");
console.log("Done!");

View File

@ -0,0 +1,108 @@
import React from "react";
import Admonition from "@theme/Admonition";
import DeviceSelector from "./components/DeviceSelector";
import HardwareOptions from "./components/HardwareOptions";
import PortConfigSection from "./components/PortConfig";
import StoragePaths from "./components/StoragePaths";
import NvidiaGpuConfig from "./components/NvidiaGpuConfig";
import OtherOptions from "./components/OtherOptions";
import GeneratedOutput from "./components/GeneratedOutput";
import { useConfigGenerator } from "./hooks/useConfigGenerator";
import styles from "./styles.module.css";
/**
* Simple markdown-link-to-React renderer for help text.
* Only supports [text](url) syntax no nested brackets.
*/
function renderHelpText(text: string): React.ReactNode {
const parts = text.split(/(\[[^\]]+\]\([^)]+\))/g);
return parts.map((part, i) => {
const match = part.match(/^\[([^\]]+)\]\(([^)]+)\)$/);
if (match) {
return (
<a key={i} href={match[2]}>
{match[1]}
</a>
);
}
return <React.Fragment key={i}>{part}</React.Fragment>;
});
}
export default function DockerComposeGenerator() {
const {
deviceId, device, hardwareEnabled,
portEnabled,
nvidiaGpuCount, nvidiaGpuDeviceId,
configPath, mediaPath, rtspPassword, timezone, shmSize,
shmSizeError, gpuDeviceIdError, configPathError, mediaPathError,
hasAnyHardware, generatedYaml,
selectDevice, toggleHardware, togglePort,
handleShmSizeChange, handleConfigPathChange, handleMediaPathChange,
handleNvidiaGpuCountChange, handleNvidiaGpuDeviceIdChange,
setRtspPassword, setTimezone, isHardwareDisabled,
} = useConfigGenerator();
return (
<div className={styles.generator}>
<div className={styles.card}>
<DeviceSelector selectedId={deviceId} onSelect={selectDevice} />
{device.helpText && (
<Admonition type={device.helpType || "info"}>
{renderHelpText(device.helpText)}
</Admonition>
)}
{device.needsNvidiaConfig && (
<NvidiaGpuConfig
gpuCount={nvidiaGpuCount}
gpuDeviceId={nvidiaGpuDeviceId}
gpuDeviceIdError={gpuDeviceIdError}
onGpuCountChange={handleNvidiaGpuCountChange}
onGpuDeviceIdChange={handleNvidiaGpuDeviceIdChange}
/>
)}
<HardwareOptions
deviceId={deviceId}
hardwareEnabled={hardwareEnabled}
onToggle={toggleHardware}
isDisabled={isHardwareDisabled}
/>
<StoragePaths
configPath={configPath}
mediaPath={mediaPath}
configPathError={configPathError}
mediaPathError={mediaPathError}
onConfigPathChange={handleConfigPathChange}
onMediaPathChange={handleMediaPathChange}
/>
<PortConfigSection
portEnabled={portEnabled}
onTogglePort={togglePort}
/>
<OtherOptions
rtspPassword={rtspPassword}
timezone={timezone}
shmSize={shmSize}
shmSizeError={shmSizeError}
onRtspPasswordChange={setRtspPassword}
onTimezoneChange={setTimezone}
onShmSizeChange={handleShmSizeChange}
/>
<GeneratedOutput
yaml={generatedYaml}
configPath={configPath}
mediaPath={mediaPath}
hasAnyHardware={hasAnyHardware}
deviceId={deviceId}
/>
</div>
</div>
);
}

View File

@ -0,0 +1,147 @@
import React from "react";
import { useColorMode } from "@docusaurus/theme-common";
import { devices } from "../config";
import type { DeviceConfig } from "../config";
import styles from "../styles.module.css";
interface Props {
selectedId: string;
onSelect: (id: string) => void;
}
/**
* Determine the icon type from the icon string:
* - Starts with "<svg" inline SVG
* - Starts with "/" or "http" image URL/path
* - Otherwise emoji text
*/
function getIconType(icon: string): "svg" | "image" | "emoji" {
const trimmed = icon.trim();
if (trimmed.startsWith("<svg")) return "svg";
if (trimmed.startsWith("/") || trimmed.startsWith("http://") || trimmed.startsWith("https://")) return "image";
return "emoji";
}
/**
* Check if the style object contains background-* properties,
* indicating the image should be rendered as a CSS background-image
* rather than an <img> tag.
*/
function hasBackgroundProps(style: React.CSSProperties | undefined): boolean {
if (!style) return false;
return Object.keys(style).some((key) => {
const k = key.toLowerCase().replace(/-/g, "");
return k === "backgroundsize" || k === "backgroundposition" || k === "backgroundrepeat" || k === "backgroundimage";
});
}
/**
* Convert a style object to CSS custom properties (e.g. { width: "24px" } { "--svg-width": "24px" })
* so they can be consumed by CSS rules targeting child elements like <svg>.
*/
function toCssVars(style: React.CSSProperties | undefined, prefix: string): React.CSSProperties {
if (!style) return {};
const vars: Record<string, string> = {};
for (const [key, value] of Object.entries(style)) {
const cssKey = key.replace(/([A-Z])/g, "-$1").toLowerCase();
vars[`--${prefix}-${cssKey}`] = value;
}
return vars as React.CSSProperties;
}
function DeviceIcon({ device }: { device: DeviceConfig }) {
const { isDarkTheme } = useColorMode();
const iconStr = isDarkTheme && device.iconDark ? device.iconDark : device.icon;
const iconStyle = (isDarkTheme && device.iconDarkStyle
? device.iconDarkStyle
: device.iconStyle) as React.CSSProperties | undefined;
const svgStyle = (isDarkTheme && device.svgDarkStyle
? device.svgDarkStyle
: device.svgStyle) as React.CSSProperties | undefined;
const iconType = getIconType(iconStr);
if (iconType === "svg") {
return (
<div
className={styles.deviceIconSvg}
style={{ ...iconStyle, ...toCssVars(svgStyle, "svg") }}
dangerouslySetInnerHTML={{ __html: iconStr }}
/>
);
}
if (iconType === "image") {
// When iconStyle contains background-* properties, render as background-image
// on the container div instead of an <img> tag, enabling background-size/position control.
if (hasBackgroundProps(iconStyle)) {
return (
<div
className={styles.deviceIconImage}
style={{
backgroundImage: `url(${iconStr})`,
backgroundRepeat: "no-repeat",
backgroundPosition: "center",
backgroundSize: "contain",
...iconStyle,
}}
/>
);
}
return (
<div className={styles.deviceIconImage}>
<img src={iconStr} alt={device.name} style={iconStyle} />
</div>
);
}
return (
<div className={styles.deviceIcon} style={iconStyle}>
{iconStr}
</div>
);
}
function DeviceCard({
device,
active,
onClick,
}: {
device: DeviceConfig;
active: boolean;
onClick: () => void;
}) {
return (
<div
className={`${styles.deviceCard} ${active ? styles.deviceCardActive : ""}`}
onClick={onClick}
role="button"
tabIndex={0}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") onClick();
}}
>
<DeviceIcon device={device} />
<div className={styles.deviceName}>{device.name}</div>
<div className={styles.deviceDesc}>{device.description}</div>
</div>
);
}
export default function DeviceSelector({ selectedId, onSelect }: Props) {
return (
<div className={styles.formSection}>
<h4>Device Type</h4>
<div className={styles.deviceGrid}>
{devices.map((d) => (
<DeviceCard
key={d.id}
device={d}
active={selectedId === d.id}
onClick={() => onSelect(d.id)}
/>
))}
</div>
</div>
);
}

View File

@ -0,0 +1,60 @@
import React, { useState, useCallback } from "react";
import CodeBlock from "@theme/CodeBlock";
import Admonition from "@theme/Admonition";
import styles from "../styles.module.css";
interface Props {
yaml: string;
configPath: string;
mediaPath: string;
hasAnyHardware: boolean;
deviceId: string;
}
export default function GeneratedOutput({
yaml,
configPath,
mediaPath,
hasAnyHardware,
deviceId,
}: Props) {
const [copied, setCopied] = useState(false);
const handleCopy = useCallback(() => {
navigator.clipboard.writeText(yaml).then(() => {
setCopied(true);
setTimeout(() => setCopied(false), 2000);
});
}, [yaml]);
return (
<div className={styles.resultSection}>
<div className={styles.resultHeader}>
<h4>Generated Configuration</h4>
<button className="button button--primary button--sm" onClick={handleCopy}>
{copied ? "Copied!" : "Copy"}
</button>
</div>
{!configPath && (
<Admonition type="tip">
<p>You haven&apos;t specified a config file directory. You may want to modify the default path.</p>
</Admonition>
)}
{!mediaPath && (
<Admonition type="tip">
<p>You haven&apos;t specified a recording storage directory. You may want to modify the default path.</p>
</Admonition>
)}
{deviceId === "stable" && !hasAnyHardware && (
<Admonition type="warning">
<p>You haven&apos;t selected any hardware acceleration. Please check if you have supported hardware available.</p>
</Admonition>
)}
<CodeBlock language="yaml" title="docker-compose.yml">
{yaml}
</CodeBlock>
</div>
);
}

View File

@ -0,0 +1,62 @@
import React from "react";
import { hardwareOptions } from "../config";
import type { HardwareOption } from "../config";
import styles from "../styles.module.css";
interface Props {
deviceId: string;
hardwareEnabled: Record<string, boolean>;
onToggle: (hwId: string) => void;
isDisabled: (hwId: string) => boolean;
}
function renderDescription(text: string): React.ReactNode {
const parts = text.split(/(\[[^\]]+\]\([^)]+\))/g);
return parts.map((part, i) => {
const match = part.match(/^\[([^\]]+)\]\(([^)]+)\)$/);
if (match) {
return <a key={i} href={match[2]}>{match[1]}</a>;
}
return <React.Fragment key={i}>{part}</React.Fragment>;
});
}
function HardwareCheckbox({
hw, disabled, checked, onToggle,
}: {
hw: HardwareOption; disabled: boolean; checked: boolean; onToggle: () => void;
}) {
return (
<div className={styles.hardwareItem}>
<label className={`${styles.checkboxLabel} ${disabled ? styles.checkboxDisabled : ""}`}>
<input type="checkbox" checked={checked} onChange={onToggle} disabled={disabled} />
<span>{hw.label}</span>
</label>
{checked && hw.description && (
<div className={styles.hardwareDescription}>{renderDescription(hw.description)}</div>
)}
</div>
);
}
export default function HardwareOptions({ deviceId, hardwareEnabled, onToggle, isDisabled }: Props) {
return (
<div className={styles.formSection}>
<h4>Generic Hardware Devices</h4>
{deviceId !== "stable" && (
<p className={styles.helpText}>
Some options have been auto-configured based on your device type.
</p>
)}
<div className={styles.checkboxGrid}>
{hardwareOptions.map((hw) => {
const disabled = isDisabled(hw.id);
const checked = disabled ? false : !!hardwareEnabled[hw.id];
return (
<HardwareCheckbox key={hw.id} hw={hw} disabled={disabled} checked={checked} onToggle={() => onToggle(hw.id)} />
);
})}
</div>
</div>
);
}

View File

@ -0,0 +1,64 @@
import React from "react";
import styles from "../styles.module.css";
interface Props {
gpuCount: string;
gpuDeviceId: string;
gpuDeviceIdError: boolean;
onGpuCountChange: (value: string) => void;
onGpuDeviceIdChange: (value: string) => void;
}
export default function NvidiaGpuConfig({
gpuCount,
gpuDeviceId,
gpuDeviceIdError,
onGpuCountChange,
onGpuDeviceIdChange,
}: Props) {
const showDeviceId = gpuCount !== "";
return (
<div className={styles.nvidiaConfig}>
<div className={styles.formGroup}>
<label htmlFor="dcg-gpu-count" className={styles.label}>
GPU count:
</label>
<input
id="dcg-gpu-count"
type="text"
inputMode="numeric"
pattern="[0-9]*"
className={styles.input}
value={gpuCount}
placeholder="all"
onChange={(e) => onGpuCountChange(e.target.value.replace(/\D/g, ""))}
/>
</div>
{showDeviceId && (
<div className={styles.formGroup}>
<label htmlFor="dcg-gpu-device-id" className={styles.label}>
GPU device IDs (required, comma-separated):
</label>
<input
id="dcg-gpu-device-id"
type="text"
className={`${styles.input} ${gpuDeviceIdError ? styles.inputError : ""}`}
value={gpuDeviceId}
placeholder="0"
onChange={(e) => onGpuDeviceIdChange(e.target.value)}
/>
{gpuDeviceIdError ? (
<p className={styles.helpText}>
GPU device IDs are required when GPU count is a number
</p>
) : (
<p className={styles.helpText}>
Single GPU: 0 &nbsp;|&nbsp; Multiple GPUs: 0,1,2
</p>
)}
</div>
)}
</div>
);
}

View File

@ -0,0 +1,122 @@
import React, { useMemo } from "react";
import CodeInline from "@theme/CodeInline";
import styles from "../styles.module.css";
const AUTO_TIMEZONE_VALUE = "__auto__";
function getTimezoneList(): string[] {
if (typeof Intl !== "undefined") {
const intl = Intl as typeof Intl & {
supportedValuesOf?: (key: string) => string[];
};
const supported = intl.supportedValuesOf?.("timeZone");
if (supported && supported.length > 0) {
return [...supported].sort();
}
}
const fallback = Intl.DateTimeFormat().resolvedOptions().timeZone;
return fallback ? [fallback] : ["UTC"];
}
interface Props {
rtspPassword: string;
timezone: string;
shmSize: string;
shmSizeError: boolean;
onRtspPasswordChange: (value: string) => void;
onTimezoneChange: (value: string) => void;
onShmSizeChange: (value: string) => void;
}
export default function OtherOptions({
rtspPassword,
timezone,
shmSize,
shmSizeError,
onRtspPasswordChange,
onTimezoneChange,
onShmSizeChange,
}: Props) {
const timezones = useMemo(() => getTimezoneList(), []);
const systemTimezone =
Intl.DateTimeFormat().resolvedOptions().timeZone || "Etc/UTC";
const selectedValue = timezone || AUTO_TIMEZONE_VALUE;
return (
<div className={styles.formSection}>
<h4>Other Options</h4>
<div className={styles.formGrid}>
<div className={styles.formGroup}>
<label htmlFor="dcg-timezone" className={styles.label}>
Timezone:
</label>
<select
id="dcg-timezone"
className={`${styles.input} ${styles.select}`}
value={selectedValue}
onChange={(e) =>
onTimezoneChange(
e.target.value === AUTO_TIMEZONE_VALUE ? "" : e.target.value
)
}
>
<option value={AUTO_TIMEZONE_VALUE}>
Use browser timezone ({systemTimezone})
</option>
{timezones.map((tz) => (
<option key={tz} value={tz}>
{tz}
</option>
))}
</select>
</div>
<div className={styles.formGroup}>
<label htmlFor="dcg-shm-size" className={styles.label}>
Shared memory (SHM):
</label>
<input
id="dcg-shm-size"
type="text"
className={`${styles.input} ${shmSizeError ? styles.inputError : ""}`}
value={shmSize}
placeholder="512mb"
onChange={(e) => onShmSizeChange(e.target.value)}
/>
{shmSizeError ? (
<p className={styles.helpText}>
Invalid format. Use a number followed by a unit (e.g. 512mb, 1gb)
</p>
) : (
<p className={styles.helpText}>
See{" "}
<a href="/frigate/installation#calculating-required-shm-size">
calculating required SHM size
</a>{" "}
for the correct value.
</p>
)}
</div>
<div className={styles.formGroup}>
<label htmlFor="dcg-rtsp-password" className={styles.label}>
RTSP password:
</label>
<input
id="dcg-rtsp-password"
type="text"
className={styles.input}
value={rtspPassword}
placeholder="password"
onChange={(e) => onRtspPasswordChange(e.target.value)}
/>
<p className={styles.helpText}>
Optional. You can specify{" "}
<CodeInline>{"{FRIGATE_RTSP_PASSWORD}"}</CodeInline>{" "}
in the config file to reference camera stream passwords. This is NOT
the Frigate login password.
</p>
</div>
</div>
</div>
);
}

View File

@ -0,0 +1,71 @@
import React from "react";
import Admonition from "@theme/Admonition";
import { ports } from "../config";
import styles from "../styles.module.css";
interface Props {
portEnabled: Record<string, boolean>;
onTogglePort: (portId: string) => void;
}
function PortItem({
port,
enabled,
onToggle,
}: {
port: typeof ports[number];
enabled: boolean;
onToggle: () => void;
}) {
const showWarning = port.warningContent && (
port.warningWhen === "checked" ? enabled :
port.warningWhen === "unchecked" ? !enabled : enabled
);
return (
<div className={styles.hardwareItem}>
<label className={`${styles.checkboxLabel} ${port.locked ? styles.checkboxDisabled : ""}`}>
<input
type="checkbox"
checked={enabled}
onChange={onToggle}
disabled={port.locked}
/>
<span>
{port.locked && "🔒 "}
Port {port.host}
{port.protocol !== "tcp" && `/${port.protocol}`}
</span>
</label>
{port.description && (
<div className={styles.hardwareDescription}>{port.description}</div>
)}
{showWarning && (
<Admonition type={port.warningType || "warning"}>
{port.warningContent}
</Admonition>
)}
</div>
);
}
export default function PortConfigSection({
portEnabled,
onTogglePort,
}: Props) {
return (
<div className={styles.formSection}>
<h4>Port Configuration</h4>
<div className={styles.checkboxGrid}>
{ports.map((port) => (
<PortItem
key={port.id}
port={port}
enabled={!!portEnabled[port.id]}
onToggle={() => onTogglePort(port.id)}
/>
))}
</div>
</div>
);
}

View File

@ -0,0 +1,66 @@
import React from "react";
import styles from "../styles.module.css";
interface Props {
configPath: string;
mediaPath: string;
configPathError: boolean;
mediaPathError: boolean;
onConfigPathChange: (value: string) => void;
onMediaPathChange: (value: string) => void;
}
export default function StoragePaths({
configPath,
mediaPath,
configPathError,
mediaPathError,
onConfigPathChange,
onMediaPathChange,
}: Props) {
return (
<div className={styles.formSection}>
<h4>Storage Paths</h4>
<div className={styles.formGrid}>
<div className={styles.formGroup}>
<label htmlFor="dcg-config-path" className={styles.label}>
Config / DB / model cache directory (on your host):
</label>
<input
id="dcg-config-path"
type="text"
className={`${styles.input} ${configPathError ? styles.inputError : ""}`}
value={configPath}
placeholder="/path/to/your/config"
onChange={(e) => onConfigPathChange(e.target.value)}
/>
{configPathError && (
<p className={styles.helpText}>
Path contains invalid characters. Only letters, numbers,
underscores, hyphens, slashes, and dots are allowed.
</p>
)}
</div>
<div className={styles.formGroup}>
<label htmlFor="dcg-media-path" className={styles.label}>
Recording storage directory (on your host):
</label>
<input
id="dcg-media-path"
type="text"
className={`${styles.input} ${mediaPathError ? styles.inputError : ""}`}
value={mediaPath}
placeholder="/path/to/your/storage"
onChange={(e) => onMediaPathChange(e.target.value)}
/>
{mediaPathError && (
<p className={styles.helpText}>
Path contains invalid characters. Only letters, numbers,
underscores, hyphens, slashes, and dots are allowed.
</p>
)}
</div>
</div>
</div>
);
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,12 @@
export { devices, deviceMap } from "./devices";
export { hardwareOptions, hardwareMap } from "./hardware";
export { ports, portMap } from "./ports";
export type {
DeviceConfig,
DeviceMapping,
VolumeMapping,
HardwareOption,
PortConfig,
NvidiaDeployConfig,
} from "./types";

View File

@ -0,0 +1,154 @@
/**
* Type definitions for the Docker Compose Generator configuration.
* All device, hardware, and port options are declaratively defined
* so that adding a new device only requires editing config files.
*/
/** A single device mapping entry (e.g. /dev/dri:/dev/dri) */
export interface DeviceMapping {
/** Host device path */
host: string;
/** Container device path (defaults to host if omitted) */
container?: string;
/** Inline comment for this device line */
comment?: string;
}
/** A single volume mapping entry */
export interface VolumeMapping {
/** Host path */
host: string;
/** Container path */
container: string;
/** Whether the mount is read-only */
readOnly?: boolean;
/** Inline comment */
comment?: string;
}
/** NVIDIA deploy configuration for docker-compose */
export interface NvidiaDeployConfig {
/** "all" or a specific number */
count: string;
/** Specific GPU device IDs (when count is a number) */
deviceIds?: string[];
}
/** Full device type definition */
export interface DeviceConfig {
/** Unique identifier, e.g. "intel" */
id: string;
/** Display name, e.g. "Intel GPU" */
name: string;
/** Short description */
description: string;
/**
* Icon for the device card. Supports:
* - Emoji string (e.g. "🖥️")
* - Image URL or static path (e.g. "/img/intel.svg", "https://example.com/icon.png")
* - Inline SVG markup (e.g. "<svg>...</svg>")
*/
icon: string;
/**
* Additional CSS properties applied to the icon element.
* - For image-type icons: if any `background-*` property (e.g. `background-size`,
* `background-position`) is present, the image is rendered as a CSS `background-image`
* on the container div, enabling full background positioning control.
* Otherwise the image is rendered as an `<img>` tag and styles apply to it.
* - For emoji/SVG icons: styles apply to the container div.
*/
iconStyle?: Record<string, string>;
/**
* Additional CSS properties applied directly to the inner `<svg>` element
* when the icon is an inline SVG. Use this to override the default
* `width: 100%; height: 100%` or set `fill`, `transform`, etc.
* Ignored for emoji and image-type icons.
*/
svgStyle?: Record<string, string>;
/**
* Icon for dark mode. Same format as `icon`. When provided, this icon
* replaces `icon` when the user is in dark mode.
*/
iconDark?: string;
/** Additional CSS properties for the dark mode icon container */
iconDarkStyle?: Record<string, string>;
/**
* SVG-specific styles for dark mode. Same as `svgStyle` but applied
* when dark mode is active. Merged over `svgStyle` in dark mode.
*/
svgDarkStyle?: Record<string, string>;
/** Docker image tag, e.g. "stable" */
imageTag: string;
/**
* Image tag suffix appended to the base tag.
* e.g. "-standard-arm64" produces "stable-standard-arm64"
*/
imageTagSuffix?: string;
/** Hardware option IDs to auto-enable when this device is selected */
autoHardware: string[];
/** Help text shown as an admonition when this device is selected */
helpText?: string;
/** Admonition type for help text */
helpType?: "info" | "warning" | "danger";
/** Device mappings always added for this device type */
devices?: DeviceMapping[];
/** Volume mappings always added for this device type */
volumes?: VolumeMapping[];
/** Extra environment variables for this device type */
env?: Record<string, string>;
/** NVIDIA deploy config (only for tensorrt) */
nvidiaDeploy?: NvidiaDeployConfig;
/** Runtime setting, e.g. "nvidia" for Jetson */
runtime?: string;
/** Extra hosts entries, e.g. "host.docker.internal:host-gateway" */
extraHosts?: string[];
/** Security options, e.g. ["apparmor=unconfined"] */
securityOpt?: string[];
/** Whether this device type needs the NVIDIA GPU config UI */
needsNvidiaConfig?: boolean;
}
/** Generic hardware acceleration option definition */
export interface HardwareOption {
/** Unique identifier, e.g. "usbCoral" */
id: string;
/** Display label */
label: string;
/**
* Description shown below the checkbox when this option is enabled.
* Supports markdown link syntax: [text](url)
*/
description?: string;
/** Device IDs that disable this option */
disabledWhen?: string[];
/** Device mappings added when this option is enabled */
devices?: DeviceMapping[];
/** Volume mappings added when this option is enabled */
volumes?: VolumeMapping[];
/** Extra environment variables */
env?: Record<string, string>;
}
/** Port definition */
export interface PortConfig {
/** Unique identifier (also the default host port as string) */
id: string;
/** Host port number */
host: number;
/** Container port number */
container: number;
/** Protocol */
protocol?: "tcp" | "udp";
/** Description of the port's purpose */
description: string;
/** Whether enabled by default */
defaultEnabled: boolean;
/** Whether this port is locked (always enabled, cannot be toggled off) */
locked?: boolean;
/** Admonition type for the warning */
warningType?: "warning" | "danger";
/** Warning content (markdown) */
warningContent?: string;
/** When to show the warning: when the port is checked or unchecked */
warningWhen?: "checked" | "unchecked";
}

View File

@ -0,0 +1,250 @@
import type {
DeviceConfig,
DeviceMapping,
VolumeMapping,
} from "../config/types";
import { hardwareMap } from "../config";
// ---------------------------------------------------------------------------
// Input type
// ---------------------------------------------------------------------------
export interface GeneratorInput {
device: DeviceConfig;
selectedHardware: string[];
enabledPorts: string[];
configPath: string;
mediaPath: string;
rtspPassword?: string;
timezone: string;
shmSize: string;
nvidiaGpuCount?: string;
nvidiaGpuDeviceId?: string;
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function deviceLine(dm: DeviceMapping): string {
const host = dm.host;
const container = dm.container ?? dm.host;
const mapping = host === container ? host : `${host}:${container}`;
const comment = dm.comment ? ` # ${dm.comment}` : "";
return ` - ${mapping}${comment}`;
}
function volumeLine(vm: VolumeMapping): string {
const ro = vm.readOnly ? ":ro" : "";
const comment = vm.comment ? ` # ${vm.comment}` : "";
return ` - ${vm.host}:${vm.container}${ro}${comment}`;
}
// ---------------------------------------------------------------------------
// YAML builder — each section returns an array of lines
// ---------------------------------------------------------------------------
function buildImage(device: DeviceConfig): string[] {
const tag = device.imageTagSuffix
? `${device.imageTag}${device.imageTagSuffix}`
: device.imageTag;
return [` image: ghcr.io/blakeblackshear/frigate:${tag}`];
}
function buildDevices(
device: DeviceConfig,
hwDevices: DeviceMapping[]
): string[] {
const all: DeviceMapping[] = [
...(device.devices ?? []),
...hwDevices,
];
if (all.length === 0) return [];
return [
" devices:",
...all.map(deviceLine),
];
}
function buildVolumes(
device: DeviceConfig,
hwVolumes: VolumeMapping[],
configPath: string,
mediaPath: string
): string[] {
const all: VolumeMapping[] = [
...(device.volumes ?? []),
...hwVolumes,
];
return [
" volumes:",
" - /etc/localtime:/etc/localtime:ro # Sync host time",
` - ${configPath}:/config # Config file directory`,
` - ${mediaPath}:/media/frigate # Recording storage directory`,
" - type: tmpfs # 1GB in-memory filesystem for recording segment storage",
" target: /tmp/cache",
" tmpfs:",
" size: 1000000000",
...all.map(volumeLine),
];
}
function buildPorts(enabledPorts: string[]): string[] {
return [
" ports:",
...enabledPorts,
];
}
function buildEnvironment(
device: DeviceConfig,
hwEnv: Record<string, string>,
rtspPassword: string | undefined,
timezone: string
): string[] {
const allEnv: Record<string, string> = {
...hwEnv,
...(device.env ?? {}),
};
const lines: string[] = [" environment:"];
if (rtspPassword) {
lines.push(
` FRIGATE_RTSP_PASSWORD: "${rtspPassword}" # RTSP password — change to your own`
);
}
lines.push(` TZ: "${timezone}" # Timezone`);
for (const [key, value] of Object.entries(allEnv)) {
lines.push(` ${key}: "${value}"`);
}
return lines;
}
function buildDeploy(device: DeviceConfig, input: GeneratorInput): string[] {
if (device.id === "stable-tensorrt") {
const count = input.nvidiaGpuCount || "all";
const isAll = count === "all";
const deviceId = input.nvidiaGpuDeviceId?.trim();
if (isAll) {
return [
" deploy:",
" resources:",
" reservations:",
" devices:",
" - driver: nvidia",
" count: all # Use all GPUs",
" capabilities: [gpu]",
];
}
if (deviceId) {
const ids = deviceId
.split(",")
.map((s) => s.trim())
.filter(Boolean)
.map((s) => `'${s}'`)
.join(", ");
return [
" deploy:",
" resources:",
" reservations:",
" devices:",
" - driver: nvidia",
` device_ids: [${ids}] # GPU device IDs`,
` count: ${count} # GPU count`,
" capabilities: [gpu]",
];
}
return [
" deploy:",
" resources:",
" reservations:",
" devices:",
" - driver: nvidia",
` count: ${count} # GPU count`,
" capabilities: [gpu]",
];
}
return [];
}
function buildRuntime(device: DeviceConfig): string[] {
if (device.runtime) {
return [` runtime: ${device.runtime}`];
}
return [];
}
function buildExtraHosts(device: DeviceConfig): string[] {
if (!device.extraHosts?.length) return [];
return [
" extra_hosts:",
...device.extraHosts.map(
(h, i) =>
` - "${h}"${i === 0 ? " # Required to talk to the NPU detector" : ""}`
),
];
}
function buildSecurityOpt(device: DeviceConfig): string[] {
if (!device.securityOpt?.length) return [];
return [
" security_opt:",
...device.securityOpt.map((s) => ` - ${s}`),
];
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Generate a docker-compose YAML string from the given input.
* The output is pure YAML with inline comments (no Shiki annotations).
*/
export function generateDockerCompose(input: GeneratorInput): string {
const { device } = input;
// Collect hardware-level devices, volumes, and env
const hwDevices: DeviceMapping[] = [];
const hwVolumes: VolumeMapping[] = [];
const hwEnv: Record<string, string> = {};
for (const hwId of input.selectedHardware) {
const hw = hardwareMap.get(hwId);
if (!hw) continue;
// Skip GPU device mapping for tensorrt images (it uses deploy instead)
if (hw.id === "gpu" && device.imageTag === "stable-tensorrt") continue;
hwDevices.push(...(hw.devices ?? []));
hwVolumes.push(...(hw.volumes ?? []));
Object.assign(hwEnv, hw.env ?? {});
}
const lines: string[] = [
"services:",
" frigate:",
" container_name: frigate",
" privileged: true # This may not be necessary for all setups",
" restart: unless-stopped",
" stop_grace_period: 30s # Allow enough time to shut down the various services",
...buildImage(device),
` shm_size: "${input.shmSize || "512mb"}" # Update for your cameras based on SHM calculation`,
...buildRuntime(device),
...buildDeploy(device, input),
...buildExtraHosts(device),
...buildSecurityOpt(device),
...buildDevices(device, hwDevices),
...buildVolumes(device, hwVolumes, input.configPath, input.mediaPath),
...buildPorts(input.enabledPorts),
...buildEnvironment(device, hwEnv, input.rtspPassword, input.timezone),
];
return lines.join("\n");
}

View File

@ -0,0 +1,195 @@
import { useState, useCallback, useMemo } from "react";
import { deviceMap, hardwareMap, portMap } from "../config";
import { generateDockerCompose } from "../generator";
import type { GeneratorInput } from "../generator";
/**
* Main hook that holds all form state and generates the Docker Compose output.
* Configuration is loaded synchronously from build-time generated .ts files.
*/
export function useConfigGenerator() {
const [deviceId, setDeviceId] = useState("stable");
const [hardwareEnabled, setHardwareEnabled] = useState<Record<string, boolean>>(() => {
const defaultDevice = deviceMap.get("stable");
const initial: Record<string, boolean> = {};
if (defaultDevice) {
for (const hwId of defaultDevice.autoHardware) {
initial[hwId] = true;
}
}
return initial;
});
const [portEnabled, setPortEnabled] = useState<Record<string, boolean>>(() => {
const initial: Record<string, boolean> = {};
for (const p of portMap.values()) {
initial[p.id] = p.defaultEnabled;
}
return initial;
});
const [nvidiaGpuCount, setNvidiaGpuCount] = useState("");
const [nvidiaGpuDeviceId, setNvidiaGpuDeviceId] = useState("");
const [configPath, setConfigPath] = useState("");
const [mediaPath, setMediaPath] = useState("");
const [rtspPassword, setRtspPassword] = useState("");
const [timezone, setTimezone] = useState("");
const [shmSize, setShmSize] = useState("512mb");
const [shmSizeError, setShmSizeError] = useState(false);
const [gpuDeviceIdError, setGpuDeviceIdError] = useState(false);
const [configPathError, setConfigPathError] = useState(false);
const [mediaPathError, setMediaPathError] = useState(false);
const device = useMemo(() => deviceMap.get(deviceId)!, [deviceId]);
const selectDevice = useCallback((id: string) => {
const newDevice = deviceMap.get(id);
if (!newDevice) return;
setDeviceId(id);
setHardwareEnabled(() => {
const next: Record<string, boolean> = {};
for (const hwId of newDevice.autoHardware) {
next[hwId] = true;
}
return next;
});
setNvidiaGpuCount("");
setNvidiaGpuDeviceId("");
setGpuDeviceIdError(false);
}, []);
const toggleHardware = useCallback((hwId: string) => {
setHardwareEnabled((prev) => ({ ...prev, [hwId]: !prev[hwId] }));
}, []);
const togglePort = useCallback((portId: string) => {
const port = portMap.get(portId);
if (port?.locked) return;
setPortEnabled((prev) => ({ ...prev, [portId]: !prev[portId] }));
}, []);
const isHardwareDisabled = useCallback(
(hwId: string): boolean => {
const hw = hardwareMap.get(hwId);
if (!hw) return false;
return hw.disabledWhen?.includes(deviceId) ?? false;
},
[deviceId]
);
const validateShmSize = useCallback((value: string): boolean => {
if (!value) return true;
return /^\d+(\.\d+)?[bkmgBKMG]{1,2}$/.test(value);
}, []);
const validatePath = useCallback((value: string): boolean => {
if (!value) return true;
return /^[a-zA-Z0-9_\-/./]+$/.test(value);
}, []);
const handleShmSizeChange = useCallback(
(value: string) => {
const filtered = value.replace(/[^0-9.bkmgBKMG]/g, "");
const valid = validateShmSize(filtered);
setShmSize(filtered);
setShmSizeError(!valid && filtered !== "");
},
[validateShmSize]
);
const handleConfigPathChange = useCallback(
(value: string) => {
const filtered = value.replace(/[^a-zA-Z0-9_\-/./]/g, "");
const valid = validatePath(filtered);
setConfigPath(filtered);
setConfigPathError(!valid && filtered !== "");
},
[validatePath]
);
const handleMediaPathChange = useCallback(
(value: string) => {
const filtered = value.replace(/[^a-zA-Z0-9_\-/./]/g, "");
const valid = validatePath(filtered);
setMediaPath(filtered);
setMediaPathError(!valid && filtered !== "");
},
[validatePath]
);
const handleNvidiaGpuCountChange = useCallback((value: string) => {
// Only allow digits
setNvidiaGpuCount(value);
if (value === "") {
setNvidiaGpuDeviceId("");
setGpuDeviceIdError(false);
} else {
setGpuDeviceIdError(false);
}
}, []);
const handleNvidiaGpuDeviceIdChange = useCallback((value: string) => {
setNvidiaGpuDeviceId(value.trim());
setGpuDeviceIdError(false);
}, []);
const enabledPortLines = useMemo(() => {
const lines: string[] = [];
for (const [id, enabled] of Object.entries(portEnabled)) {
if (!enabled) continue;
const p = portMap.get(id);
if (!p) continue;
const proto = p.protocol && p.protocol !== "tcp" ? `/${p.protocol}` : "";
const comment = p.description ? ` # ${p.description}` : "";
lines.push(` - "${p.host}:${p.container}${proto}"${comment}`);
}
return lines;
}, [portEnabled]);
const selectedHardwareIds = useMemo(() => {
return Object.entries(hardwareEnabled)
.filter(([id, enabled]) => {
if (!enabled) return false;
const hw = hardwareMap.get(id);
if (!hw) return false;
if (hw.disabledWhen?.includes(deviceId)) return false;
return true;
})
.map(([id]) => id);
}, [hardwareEnabled, deviceId]);
const generatedYaml = useMemo(() => {
const input: GeneratorInput = {
device,
selectedHardware: selectedHardwareIds,
enabledPorts: enabledPortLines,
configPath: configPath || "/path/to/your/config",
mediaPath: mediaPath || "/path/to/your/storage",
rtspPassword,
timezone: timezone || Intl.DateTimeFormat().resolvedOptions().timeZone || "Etc/UTC",
shmSize: shmSize || "512mb",
nvidiaGpuCount,
nvidiaGpuDeviceId,
};
return generateDockerCompose(input);
}, [
device, selectedHardwareIds, enabledPortLines,
configPath, mediaPath, rtspPassword, timezone, shmSize,
nvidiaGpuCount, nvidiaGpuDeviceId,
]);
const hasAnyHardware = selectedHardwareIds.length > 0 || !!device?.devices?.length;
return {
deviceId, device, hardwareEnabled, portEnabled,
nvidiaGpuCount, nvidiaGpuDeviceId,
configPath, mediaPath, rtspPassword, timezone, shmSize,
shmSizeError, gpuDeviceIdError, configPathError, mediaPathError,
hasAnyHardware, generatedYaml,
selectDevice, toggleHardware, togglePort,
handleShmSizeChange, handleConfigPathChange, handleMediaPathChange,
handleNvidiaGpuCountChange, handleNvidiaGpuDeviceIdChange,
setRtspPassword, setTimezone, isHardwareDisabled,
};
}

View File

@ -0,0 +1 @@
export { default } from "./DockerComposeGenerator";

View File

@ -0,0 +1,381 @@
/* ===================================================================
Docker Compose Generator styles
Uses Docusaurus / Infima CSS variables for theme compatibility.
=================================================================== */
.generator {
margin: 2rem 0;
}
.card {
background: var(--ifm-background-surface-color);
border: 1px solid var(--ifm-color-emphasis-400);
border-radius: 12px;
padding: 2rem;
box-shadow: var(--ifm-global-shadow-lw);
}
[data-theme="light"] .card {
background: var(--ifm-color-emphasis-100);
border: 1px solid var(--ifm-color-emphasis-300);
}
/* --- Form sections --- */
.formSection {
margin-bottom: 1.5rem;
padding-bottom: 1.5rem;
border-bottom: 1px solid var(--ifm-color-emphasis-400);
}
.formSection:last-child {
border-bottom: none;
margin-bottom: 0;
padding-bottom: 0;
}
.formSection h4 {
margin: 0 0 1rem 0;
color: var(--ifm-font-color-base);
font-size: 1.1rem;
font-weight: var(--ifm-font-weight-semibold);
}
/* --- Form controls --- */
.formGroup {
margin-bottom: 1rem;
}
.formGroup:last-child {
margin-bottom: 0;
}
.label {
display: block;
margin-bottom: 0.25rem;
color: var(--ifm-font-color-base);
font-weight: var(--ifm-font-weight-semibold);
font-size: 0.9rem;
}
.input {
width: 100%;
padding: 0.5rem 0.75rem;
border: 1px solid var(--ifm-color-emphasis-400);
border-radius: 6px;
background: var(--ifm-background-color);
color: var(--ifm-font-color-base);
font-size: 0.95rem;
transition: border-color 0.2s, box-shadow 0.2s;
}
[data-theme="light"] .input {
background: #fff;
border: 1px solid #d0d7de;
}
.input:focus {
outline: none;
border-color: var(--ifm-color-primary);
box-shadow: 0 0 0 3px var(--ifm-color-primary-lightest);
}
[data-theme="dark"] .input {
border-color: var(--ifm-color-emphasis-300);
}
.inputError {
border-color: #e74c3c;
animation: shake 0.3s ease-in-out;
}
@keyframes shake {
0%,
100% {
transform: translateX(0);
}
25% {
transform: translateX(-5px);
}
75% {
transform: translateX(5px);
}
}
/* --- Select dropdown --- */
.select {
cursor: pointer;
appearance: none;
-moz-appearance: none;
-webkit-appearance: none;
background: var(--ifm-background-color)
url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23666' d='M6 8L1 3h10z'/%3E%3C/svg%3E")
no-repeat right 0.75rem center / 12px 12px;
padding-right: 2rem;
}
[data-theme="light"] .select {
background: #fff
url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23555' d='M6 8L1 3h10z'/%3E%3C/svg%3E")
no-repeat right 0.75rem center / 12px 12px;
}
.helpText {
margin: 0.5rem 0 0 0;
font-size: 0.85rem;
color: var(--ifm-font-color-secondary);
line-height: 1.5;
}
.helpText a {
color: var(--ifm-color-primary);
}
/* --- Device grid --- */
.deviceGrid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(130px, 1fr));
gap: 0.75rem;
margin-top: 0.5rem;
}
.deviceCard {
padding: 0.75rem;
border: 2px solid var(--ifm-color-emphasis-400);
border-radius: 12px;
cursor: pointer;
transition: all 0.2s;
text-align: center;
background: var(--ifm-background-color);
display: flex;
flex-direction: column;
align-items: center;
}
[data-theme="light"] .deviceCard {
border: 2px solid #d0d7de;
background: #fff;
}
.deviceCard:hover {
border-color: var(--ifm-color-primary);
background: var(--ifm-color-emphasis-100);
transform: translateY(-2px);
}
.deviceCardActive {
border-color: var(--ifm-color-primary);
background: var(--ifm-color-primary-lightest);
box-shadow: 0 0 0 1px var(--ifm-color-primary);
}
[data-theme="light"] .deviceCardActive {
background: color-mix(in srgb, var(--ifm-color-primary) 12%, #fff);
}
[data-theme="dark"] .deviceCardActive {
background: color-mix(in srgb, var(--ifm-color-primary) 25%, #1b1b1b);
}
[data-theme="dark"] .deviceCardActive .deviceName {
color: var(--ifm-color-primary-light);
}
[data-theme="dark"] .deviceCardActive .deviceDesc {
color: var(--ifm-color-primary-light);
opacity: 0.85;
}
.deviceIcon {
font-size: 2rem;
margin-bottom: 0.25rem;
height: 40px;
width: 50px;
display: flex;
align-items: center;
justify-content: center;
}
.deviceIconSvg {
margin-bottom: 0.25rem;
height: 40px;
width: 50px;
display: flex;
align-items: center;
justify-content: center;
overflow: visible;
/* Allow iconStyle width/height to override */
flex-shrink: 0;
}
.deviceIconSvg svg {
width: var(--svg-width, 100%);
height: var(--svg-height, 100%);
fill: var(--svg-fill, currentColor);
transform: var(--svg-transform, none);
}
.deviceIconImage {
margin-bottom: 0.25rem;
height: 40px;
width: 50px;
display: flex;
align-items: center;
justify-content: center;
}
.deviceIconImage img {
max-width: 100%;
max-height: 100%;
object-fit: contain;
}
.deviceName {
font-weight: var(--ifm-font-weight-semibold);
color: var(--ifm-font-color-base);
margin-bottom: 0.15rem;
font-size: 0.9rem;
}
.deviceDesc {
font-size: 0.75rem;
color: var(--ifm-font-color-secondary);
line-height: 1.3;
}
/* --- Checkbox grid --- */
.checkboxGrid {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 0.5rem;
}
@media (max-width: 576px) {
.checkboxGrid {
grid-template-columns: 1fr;
}
}
.hardwareItem {
margin-bottom: 0;
}
.hardwareDescription {
margin: 0.15rem 0 0.4rem 1.6rem;
font-size: 0.8rem;
color: var(--ifm-font-color-secondary);
line-height: 1.5;
}
.hardwareDescription a {
color: var(--ifm-color-primary);
text-decoration: underline;
text-underline-offset: 2px;
}
.checkboxLabel {
display: flex;
align-items: center;
gap: 0.5rem;
cursor: pointer;
padding: 0.4rem 0.5rem;
border-radius: 6px;
transition: background-color 0.2s;
font-size: 0.9rem;
}
.checkboxLabel:hover {
background: var(--ifm-color-emphasis-100);
}
.checkboxLabel input[type="checkbox"] {
width: 1.1rem;
height: 1.1rem;
cursor: pointer;
flex-shrink: 0;
}
.checkboxLabel span {
color: var(--ifm-font-color-base);
}
.checkboxDisabled {
cursor: not-allowed;
}
.checkboxDisabled:hover {
background: transparent;
}
.checkboxDisabled input[type="checkbox"] {
cursor: not-allowed;
opacity: 0.5;
}
/* --- Form grid (side-by-side) --- */
.formGrid {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 1rem;
}
@media (max-width: 576px) {
.formGrid {
grid-template-columns: 1fr;
}
}
.formGrid .formGroup {
margin-bottom: 0;
}
/* --- Port section --- */
.portSection {
margin-bottom: 0.75rem;
}
.warningBadge {
margin-left: auto;
color: #e67e22;
font-size: 0.85rem;
}
/* --- NVIDIA config --- */
.nvidiaConfig {
margin-top: 1rem;
margin-bottom: 1.5rem;
padding: 1rem;
background: var(--ifm-background-color);
border-radius: 8px;
border-left: 3px solid var(--ifm-color-primary);
}
[data-theme="light"] .nvidiaConfig {
background: #f6f8fa;
border-left: 3px solid var(--ifm-color-primary);
}
/* --- Result section --- */
.resultSection {
margin-top: 2rem;
}
.resultHeader {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 1rem;
}
.resultHeader h4 {
margin: 0;
color: var(--ifm-font-color-base);
}

View File

@ -5997,7 +5997,10 @@ paths:
tags:
- App
summary: Start debug replay
description: Start a debug replay session from camera recordings.
description:
Start a debug replay session from camera recordings. Returns
immediately while clip generation runs as a background job; subscribe
to the 'debug_replay' job_state WS topic to track progress.
operationId: start_debug_replay_debug_replay_start_post
requestBody:
required: true
@ -6006,12 +6009,16 @@ paths:
schema:
$ref: "#/components/schemas/DebugReplayStartBody"
responses:
"200":
"202":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/DebugReplayStartResponse"
"400":
description: Invalid camera, time range, or no recordings
"409":
description: A replay session is already active
"422":
description: Validation Error
content:
@ -6272,10 +6279,14 @@ components:
replay_camera:
type: string
title: Replay Camera
job_id:
type: string
title: Job Id
type: object
required:
- success
- replay_camera
- job_id
title: DebugReplayStartResponse
description: Response for starting a debug replay session.
DebugReplayStatusResponse:

View File

@ -10,6 +10,7 @@ from pydantic import BaseModel, Field
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags
from frigate.jobs.debug_replay import start_debug_replay_job
logger = logging.getLogger(__name__)
@ -29,10 +30,17 @@ class DebugReplayStartResponse(BaseModel):
success: bool
replay_camera: str
job_id: str
class DebugReplayStatusResponse(BaseModel):
"""Response for debug replay status."""
"""Response for debug replay status.
Returns only session-presence fields. Startup progress and error
details flow through the job_state WebSocket topic via the
debug_replay job (see frigate.jobs.debug_replay); the
Replay page subscribes there with useJobStatus("debug_replay").
"""
active: bool
replay_camera: str | None = None
@ -51,15 +59,32 @@ class DebugReplayStopResponse(BaseModel):
@router.post(
"/debug_replay/start",
response_model=DebugReplayStartResponse,
status_code=202,
responses={
400: {"description": "Invalid camera, time range, or no recordings"},
409: {"description": "A replay session is already active"},
},
dependencies=[Depends(require_role(["admin"]))],
summary="Start debug replay",
description="Start a debug replay session from camera recordings.",
description="Start a debug replay session from camera recordings. Returns "
"immediately while clip generation runs as a background job; subscribe "
"to the 'debug_replay' job_state WS topic to track progress.",
)
async def start_debug_replay(request: Request, body: DebugReplayStartBody):
"""Start a debug replay session."""
"""Start a debug replay session asynchronously."""
replay_manager = request.app.replay_manager
if replay_manager.active:
try:
job_id = await asyncio.to_thread(
start_debug_replay_job,
source_camera=body.camera,
start_ts=body.start_time,
end_ts=body.end_time,
frigate_config=request.app.frigate_config,
config_publisher=request.app.config_publisher,
replay_manager=replay_manager,
)
except RuntimeError:
return JSONResponse(
content={
"success": False,
@ -67,38 +92,23 @@ async def start_debug_replay(request: Request, body: DebugReplayStartBody):
},
status_code=409,
)
try:
replay_camera = await asyncio.to_thread(
replay_manager.start,
source_camera=body.camera,
start_ts=body.start_time,
end_ts=body.end_time,
frigate_config=request.app.frigate_config,
config_publisher=request.app.config_publisher,
)
except ValueError:
logger.exception("Invalid parameters for debug replay start request")
logger.exception("Rejected debug replay start request")
return JSONResponse(
content={
"success": False,
"message": "Invalid debug replay request parameters",
"message": "Invalid debug replay parameters",
},
status_code=400,
)
except RuntimeError:
logger.exception("Error while starting debug replay session")
return JSONResponse(
content={
"success": False,
"message": "An internal error occurred while starting debug replay",
},
status_code=500,
)
return DebugReplayStartResponse(
success=True,
replay_camera=replay_camera,
return JSONResponse(
content={
"success": True,
"replay_camera": replay_manager.replay_camera_name,
"job_id": job_id,
},
status_code=202,
)
@ -118,12 +128,16 @@ def get_debug_replay_status(request: Request):
if replay_manager.active and replay_camera:
frame_processor = request.app.detected_frames_processor
frame = frame_processor.get_current_frame(replay_camera)
frame = (
frame_processor.get_current_frame(replay_camera)
if frame_processor is not None
else None
)
if frame is not None:
frame_time = frame_processor.get_current_frame_time(replay_camera)
camera_config = request.app.frigate_config.cameras.get(replay_camera)
retry_interval = 10
retry_interval = 10.0
if camera_config is not None:
retry_interval = float(camera_config.ffmpeg.retry_interval or 10)

View File

@ -174,12 +174,10 @@ async def latest_frame(
}
quality_params = get_image_quality_params(extension.value, params.quality)
if camera_name in request.app.frigate_config.cameras:
camera_config = request.app.frigate_config.cameras.get(camera_name)
if camera_config is not None:
frame = frame_processor.get_current_frame(camera_name, draw_options)
retry_interval = float(
request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
or 10
)
retry_interval = float(camera_config.ffmpeg.retry_interval or 10)
is_offline = False
if frame is None or datetime.now().timestamp() > (

View File

@ -25,8 +25,8 @@ class StatsConfig(FrigateBaseModel):
)
intel_gpu_device: Optional[str] = Field(
default=None,
title="SR-IOV device",
description="Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats.",
title="Intel GPU device",
description="PCI bus address or DRM device path (e.g. /dev/dri/card1) used to pin Intel GPU stats to a specific device when multiple are present.",
)

View File

@ -1,9 +1,13 @@
"""Debug replay camera management for replaying recordings with detection overlays."""
"""Debug replay camera management for replaying recordings with detection overlays.
The startup work (ffmpeg concat + camera config publish) lives in
frigate.jobs.debug_replay. This module owns only session presence
(active), session metadata, and post-session cleanup.
"""
import logging
import os
import shutil
import subprocess as sp
import threading
from ruamel.yaml import YAML
@ -21,7 +25,7 @@ from frigate.const import (
REPLAY_DIR,
THUMB_DIR,
)
from frigate.models import Recordings
from frigate.jobs.debug_replay import cancel_debug_replay_job, wait_for_runner
from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files
from frigate.util.config import find_config_file
@ -29,7 +33,14 @@ logger = logging.getLogger(__name__)
class DebugReplayManager:
"""Manages a single debug replay session."""
"""Owns the lifecycle pointers for a single debug replay session.
A session exists from the moment mark_starting is called (synchronously,
inside the API handler) until clear_session runs (on success cleanup,
failure, or stop). The active property is the source of truth that the
status bar consumes broader than the startup job, which only covers the
preparing_clip / starting_camera window.
"""
def __init__(self) -> None:
self._lock = threading.Lock()
@ -41,144 +52,66 @@ class DebugReplayManager:
@property
def active(self) -> bool:
"""Whether a replay session is currently active."""
"""True from mark_starting until clear_session."""
return self.replay_camera_name is not None
def start(
def mark_starting(
self,
source_camera: str,
replay_camera_name: str,
start_ts: float,
end_ts: float,
frigate_config: FrigateConfig,
config_publisher: CameraConfigUpdatePublisher,
) -> str:
"""Start a debug replay session.
) -> None:
"""Synchronously claim the session before the job runner starts.
Args:
source_camera: Name of the source camera to replay
start_ts: Start timestamp
end_ts: End timestamp
frigate_config: Current Frigate configuration
config_publisher: Publisher for camera config updates
Returns:
The replay camera name
Raises:
ValueError: If a session is already active or parameters are invalid
RuntimeError: If clip generation fails
Called inside the API handler so the status bar sees active=True
immediately, before the worker thread does any ffmpeg work.
"""
with self._lock:
return self._start_locked(
source_camera, start_ts, end_ts, frigate_config, config_publisher
)
self.replay_camera_name = replay_camera_name
self.source_camera = source_camera
self.start_ts = start_ts
self.end_ts = end_ts
self.clip_path = None
def _start_locked(
def mark_session_ready(self, clip_path: str) -> None:
"""Record the on-disk clip path after the camera has been published."""
with self._lock:
self.clip_path = clip_path
def clear_session(self) -> None:
"""Reset session pointers without publishing camera removal.
Used by the job runner on failure paths. stop() does the camera
teardown plus this clear in one step.
"""
with self._lock:
self._clear_locked()
def _clear_locked(self) -> None:
self.replay_camera_name = None
self.source_camera = None
self.clip_path = None
self.start_ts = None
self.end_ts = None
def publish_camera(
self,
source_camera: str,
start_ts: float,
end_ts: float,
replay_name: str,
clip_path: str,
frigate_config: FrigateConfig,
config_publisher: CameraConfigUpdatePublisher,
) -> str:
if self.active:
raise ValueError("A replay session is already active")
) -> None:
"""Build the in-memory replay camera config and publish the add event.
if source_camera not in frigate_config.cameras:
raise ValueError(f"Camera '{source_camera}' not found")
if end_ts <= start_ts:
raise ValueError("End time must be after start time")
# Query recordings for the source camera in the time range
recordings = (
Recordings.select(
Recordings.path,
Recordings.start_time,
Recordings.end_time,
)
.where(
Recordings.start_time.between(start_ts, end_ts)
| Recordings.end_time.between(start_ts, end_ts)
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
)
.where(Recordings.camera == source_camera)
.order_by(Recordings.start_time.asc())
)
if not recordings.count():
raise ValueError(
f"No recordings found for camera '{source_camera}' in the specified time range"
)
# Create replay directory
os.makedirs(REPLAY_DIR, exist_ok=True)
# Generate replay camera name
replay_name = f"{REPLAY_CAMERA_PREFIX}{source_camera}"
# Build concat file for ffmpeg
concat_file = os.path.join(REPLAY_DIR, f"{replay_name}_concat.txt")
clip_path = os.path.join(REPLAY_DIR, f"{replay_name}.mp4")
with open(concat_file, "w") as f:
for recording in recordings:
f.write(f"file '{recording.path}'\n")
# Concatenate recordings into a single clip with -c copy (fast)
ffmpeg_cmd = [
frigate_config.ffmpeg.ffmpeg_path,
"-hide_banner",
"-y",
"-f",
"concat",
"-safe",
"0",
"-i",
concat_file,
"-c",
"copy",
"-movflags",
"+faststart",
clip_path,
]
logger.info(
"Generating replay clip for %s (%.1f - %.1f)",
source_camera,
start_ts,
end_ts,
)
try:
result = sp.run(
ffmpeg_cmd,
capture_output=True,
text=True,
timeout=120,
)
if result.returncode != 0:
logger.error("FFmpeg error: %s", result.stderr)
raise RuntimeError(
f"Failed to generate replay clip: {result.stderr[-500:]}"
)
except sp.TimeoutExpired:
raise RuntimeError("Clip generation timed out")
finally:
# Clean up concat file
if os.path.exists(concat_file):
os.remove(concat_file)
if not os.path.exists(clip_path):
raise RuntimeError("Clip file was not created")
# Build camera config dict for the replay camera
Called by the job runner during the starting_camera phase.
"""
source_config = frigate_config.cameras[source_camera]
camera_dict = self._build_camera_config_dict(
source_config, replay_name, clip_path
)
# Build an in-memory config with the replay camera added
config_file = find_config_file()
yaml_parser = YAML()
with open(config_file, "r") as f:
@ -191,75 +124,48 @@ class DebugReplayManager:
try:
new_config = FrigateConfig.parse_object(config_data)
except Exception as e:
raise RuntimeError(f"Failed to validate replay camera config: {e}")
# Update the running config
raise RuntimeError(f"Failed to validate replay camera config: {e}") from e
frigate_config.cameras[replay_name] = new_config.cameras[replay_name]
# Publish the add event
config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.add, replay_name),
new_config.cameras[replay_name],
)
# Store session state
self.replay_camera_name = replay_name
self.source_camera = source_camera
self.clip_path = clip_path
self.start_ts = start_ts
self.end_ts = end_ts
logger.info("Debug replay started: %s -> %s", source_camera, replay_name)
return replay_name
def stop(
self,
frigate_config: FrigateConfig,
config_publisher: CameraConfigUpdatePublisher,
) -> None:
"""Stop the active replay session and clean up all artifacts.
"""Cancel any in-flight startup job and tear down the active session.
Args:
frigate_config: Current Frigate configuration
config_publisher: Publisher for camera config updates
Safe to call when no session is active (no-op with a warning).
"""
cancel_debug_replay_job()
wait_for_runner(timeout=2.0)
with self._lock:
self._stop_locked(frigate_config, config_publisher)
if not self.active:
logger.warning("No active replay session to stop")
return
def _stop_locked(
self,
frigate_config: FrigateConfig,
config_publisher: CameraConfigUpdatePublisher,
) -> None:
if not self.active:
logger.warning("No active replay session to stop")
return
replay_name = self.replay_camera_name
replay_name = self.replay_camera_name
# Only publish remove if the camera was actually added to the live
# config (i.e. the runner reached the starting_camera phase).
if replay_name is not None and replay_name in frigate_config.cameras:
config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.remove, replay_name),
frigate_config.cameras[replay_name],
)
# Publish remove event so subscribers stop and remove from their config
if replay_name in frigate_config.cameras:
config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.remove, replay_name),
frigate_config.cameras[replay_name],
)
# Do NOT pop here — let subscribers handle removal from the shared
# config dict when they process the ZMQ message to avoid race conditions
if replay_name is not None:
self._cleanup_db(replay_name)
self._cleanup_files(replay_name)
# Defensive DB cleanup
self._cleanup_db(replay_name)
self._clear_locked()
# Remove filesystem artifacts
self._cleanup_files(replay_name)
# Reset state
self.replay_camera_name = None
self.source_camera = None
self.clip_path = None
self.start_ts = None
self.end_ts = None
logger.info("Debug replay stopped and cleaned up: %s", replay_name)
logger.info("Debug replay stopped and cleaned up: %s", replay_name)
def _build_camera_config_dict(
self,
@ -267,16 +173,7 @@ class DebugReplayManager:
replay_name: str,
clip_path: str,
) -> dict:
"""Build a camera config dictionary for the replay camera.
Args:
source_config: Source camera's CameraConfig
replay_name: Name for the replay camera
clip_path: Path to the replay clip file
Returns:
Camera config as a dictionary
"""
"""Build a camera config dictionary for the replay camera."""
# Extract detect config (exclude computed fields)
detect_dict = source_config.detect.model_dump(
exclude={"min_initialized", "max_disappeared", "enabled_in_config"}
@ -311,7 +208,6 @@ class DebugReplayManager:
zone_dump = zone_config.model_dump(
exclude={"contour", "color"}, exclude_defaults=True
)
# Always include required fields
zone_dump.setdefault("coordinates", zone_config.coordinates)
zones_dict[zone_name] = zone_dump

View File

@ -0,0 +1,415 @@
import logging
import os
import subprocess
import threading
import urllib.request
from functools import partial
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from pydantic import ConfigDict, Field
from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
)
from frigate.object_detection.util import RequestStore, ResponseStore
logger = logging.getLogger(__name__)
# ----------------- Utility Functions ----------------- #
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
"""
Resize an image with unchanged aspect ratio using padding.
Assumes input image shape is (H, W, 3).
"""
if image.ndim == 4 and image.shape[0] == 1:
image = image[0]
h, w = image.shape[:2]
scale = min(model_w / w, model_h / h)
new_w, new_h = int(w * scale), int(h * scale)
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
x_offset = (model_w - new_w) // 2
y_offset = (model_h - new_h) // 2
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
resized_image
)
return padded_image
# ----------------- Global Constants ----------------- #
DETECTOR_KEY = "hailo10h"
ARCH = None
H10H_DEFAULT_MODEL = "yolov6n.hef"
H10H_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v5.2.0/hailo10h/yolov6n.hef"
def detect_hailo_arch():
try:
result = subprocess.run(
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
)
if result.returncode != 0:
logger.error(f"Inference error: {result.stderr}")
return None
for line in result.stdout.split("\n"):
if "Device Architecture" in line:
if "HAILO10H" in line:
return "hailo10h"
logger.error("Inference error: Could not determine Hailo architecture.")
return None
except Exception as e:
logger.error(f"Inference error: {e}")
return None
# ----------------- HailoAsyncInference Class ----------------- #
class HailoAsyncInference:
def __init__(
self,
hef_path: str,
input_store: RequestStore,
output_store: ResponseStore,
batch_size: int = 1,
input_type: Optional[str] = None,
output_type: Optional[Dict[str, str]] = None,
send_original_frame: bool = False,
) -> None:
# when importing hailo it activates the driver
# which leaves processes running even though it may not be used.
try:
from hailo_platform import (
HEF,
FormatType,
HailoSchedulingAlgorithm,
VDevice,
)
except ModuleNotFoundError:
pass
self.input_store = input_store
self.output_store = output_store
params = VDevice.create_params()
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
self.hef = HEF(hef_path)
self.target = VDevice(params)
self.infer_model = self.target.create_infer_model(hef_path)
self.infer_model.set_batch_size(batch_size)
if input_type is not None:
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
if output_type is not None:
for output_name, output_type in output_type.items():
self.infer_model.output(output_name).set_format_type(
getattr(FormatType, output_type)
)
self.output_type = output_type
self.send_original_frame = send_original_frame
def callback(
self,
completion_info,
bindings_list: List,
input_batch: List,
request_ids: List[int],
):
if completion_info.exception:
logger.error(f"Inference error: {completion_info.exception}")
else:
for i, bindings in enumerate(bindings_list):
if len(bindings._output_names) == 1:
result = bindings.output().get_buffer()
else:
result = {
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
for name in bindings._output_names
}
self.output_store.put(request_ids[i], (input_batch[i], result))
def _create_bindings(self, configured_infer_model) -> object:
if self.output_type is None:
output_buffers = {
output_info.name: np.empty(
self.infer_model.output(output_info.name).shape,
dtype=getattr(
np, str(output_info.format.type).split(".")[1].lower()
),
)
for output_info in self.hef.get_output_vstream_infos()
}
else:
output_buffers = {
name: np.empty(
self.infer_model.output(name).shape,
dtype=getattr(np, self.output_type[name].lower()),
)
for name in self.output_type
}
return configured_infer_model.create_bindings(output_buffers=output_buffers)
def get_input_shape(self) -> Tuple[int, ...]:
return self.hef.get_input_vstream_infos()[0].shape
def run(self) -> None:
job = None
with self.infer_model.configure() as configured_infer_model:
while True:
batch_data = self.input_store.get()
if batch_data is None:
break
request_id, frame_data = batch_data
preprocessed_batch = [frame_data]
request_ids = [request_id]
input_batch = preprocessed_batch # non-send_original_frame mode
bindings_list = []
for frame in preprocessed_batch:
bindings = self._create_bindings(configured_infer_model)
bindings.input().set_buffer(np.array(frame))
bindings_list.append(bindings)
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
job = configured_infer_model.run_async(
bindings_list,
partial(
self.callback,
input_batch=input_batch,
request_ids=request_ids,
bindings_list=bindings_list,
),
)
if job is not None:
job.wait(100)
# ----------------- HailoDetector Class ----------------- #
class HailoDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: "HailoDetectorConfig"):
global ARCH
ARCH = detect_hailo_arch()
self.cache_dir = MODEL_CACHE_DIR
self.device_type = detector_config.device
self.model_height = (
detector_config.model.height
if hasattr(detector_config.model, "height")
else None
)
self.model_width = (
detector_config.model.width
if hasattr(detector_config.model, "width")
else None
)
self.model_type = (
detector_config.model.model_type
if hasattr(detector_config.model, "model_type")
else None
)
self.tensor_format = (
detector_config.model.input_tensor
if hasattr(detector_config.model, "input_tensor")
else None
)
self.pixel_format = (
detector_config.model.input_pixel_format
if hasattr(detector_config.model, "input_pixel_format")
else None
)
self.input_dtype = (
detector_config.model.input_dtype
if hasattr(detector_config.model, "input_dtype")
else None
)
self.output_type = "FLOAT32"
self.set_path_and_url(detector_config.model.path)
self.working_model_path = self.check_and_prepare()
self.batch_size = 1
self.input_store = RequestStore()
self.response_store = ResponseStore()
try:
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
self.inference_engine = HailoAsyncInference(
self.working_model_path,
self.input_store,
self.response_store,
self.batch_size,
)
self.input_shape = self.inference_engine.get_input_shape()
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
self.inference_thread = threading.Thread(
target=self.inference_engine.run, daemon=True
)
self.inference_thread.start()
except Exception as e:
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
raise
def set_path_and_url(self, path: str = None):
if not path:
self.model_path = None
self.url = None
return
if self.is_url(path):
self.url = path
self.model_path = None
else:
self.model_path = path
self.url = None
def is_url(self, url: str) -> bool:
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("www.")
)
@staticmethod
def extract_model_name(path: str = None, url: str = None) -> str:
if path and path.endswith(".hef"):
return os.path.basename(path)
elif url and url.endswith(".hef"):
return os.path.basename(url)
else:
return H10H_DEFAULT_MODEL
@staticmethod
def download_model(url: str, destination: str):
if not url.endswith(".hef"):
raise ValueError("Invalid model URL. Only .hef files are supported.")
try:
urllib.request.urlretrieve(url, destination)
logger.debug(f"Downloaded model to {destination}")
except Exception as e:
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
def check_and_prepare(self) -> str:
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
model_name = self.extract_model_name(self.model_path, self.url)
cached_model_path = os.path.join(self.cache_dir, model_name)
if not self.model_path and not self.url:
if os.path.exists(cached_model_path):
logger.debug(f"Model found in cache: {cached_model_path}")
return cached_model_path
else:
logger.debug(f"Downloading default model: {model_name}")
self.download_model(H10H_DEFAULT_URL, cached_model_path)
elif self.url:
logger.debug(f"Downloading model from URL: {self.url}")
self.download_model(self.url, cached_model_path)
elif self.model_path:
if os.path.exists(self.model_path):
logger.debug(f"Using existing model at: {self.model_path}")
return self.model_path
else:
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
return cached_model_path
def detect_raw(self, tensor_input):
tensor_input = self.preprocess(tensor_input)
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
tensor_input = np.expand_dims(tensor_input, axis=0)
request_id = self.input_store.put(tensor_input)
try:
_, infer_results = self.response_store.get(request_id, timeout=1.0)
except TimeoutError:
logger.error(
f"Timeout waiting for inference results for request {request_id}"
)
if not self.inference_thread.is_alive():
raise RuntimeError(
"HailoRT inference thread has stopped, restart required."
)
return np.zeros((20, 6), dtype=np.float32)
if isinstance(infer_results, list) and len(infer_results) == 1:
infer_results = infer_results[0]
threshold = 0.4
all_detections = []
for class_id, detection_set in enumerate(infer_results):
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
continue
for det in detection_set:
if det.shape[0] < 5:
continue
score = float(det[4])
if score < threshold:
continue
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
if len(all_detections) == 0:
detections_array = np.zeros((20, 6), dtype=np.float32)
else:
detections_array = np.array(all_detections, dtype=np.float32)
if detections_array.shape[0] > 20:
detections_array = detections_array[:20, :]
elif detections_array.shape[0] < 20:
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
detections_array = np.vstack((detections_array, pad))
return detections_array
def preprocess(self, image):
if isinstance(image, np.ndarray):
processed = preprocess_tensor(
image, self.input_shape[1], self.input_shape[0]
)
return np.expand_dims(processed, axis=0)
else:
raise ValueError("Unsupported image format for preprocessing")
def close(self):
"""Properly shuts down the inference engine and releases the VDevice."""
logger.debug("[CLOSE] Closing HailoDetector")
try:
if hasattr(self, "inference_engine"):
if hasattr(self.inference_engine, "target"):
self.inference_engine.target.release()
logger.debug("Hailo VDevice released successfully")
except Exception as e:
logger.error(f"Failed to close Hailo device: {e}")
raise
def __del__(self):
"""Destructor to ensure cleanup when the object is deleted."""
self.close()
# ----------------- HailoDetectorConfig Class ----------------- #
class HailoDetectorConfig(BaseDetectorConfig):
"""Hailo10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
model_config = ConfigDict(
title="Hailo-10H",
)
type: Literal[DETECTOR_KEY]
device: str = Field(
default="PCIe",
title="Device Type",
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
)

View File

@ -0,0 +1,386 @@
"""Debug replay startup job: ffmpeg concat + camera config publish.
The runner orchestrates the async portion of starting a debug replay
session. The DebugReplayManager (in frigate.debug_replay) owns session
presence so the status bar can keep reading a single `active` flag from
/debug_replay/status for the entire session window which is broader
than this job's lifetime.
"""
import logging
import os
import subprocess as sp
import threading
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Optional, cast
from peewee import ModelSelect
from frigate.config import FrigateConfig
from frigate.config.camera.updater import CameraConfigUpdatePublisher
from frigate.const import REPLAY_CAMERA_PREFIX, REPLAY_DIR
from frigate.jobs.export import JobStatePublisher
from frigate.jobs.job import Job
from frigate.jobs.manager import job_is_running, set_current_job
from frigate.models import Recordings
from frigate.types import JobStatusTypesEnum
from frigate.util.ffmpeg import run_ffmpeg_with_progress
if TYPE_CHECKING:
from frigate.debug_replay import DebugReplayManager
logger = logging.getLogger(__name__)
# Coalesce frequent ffmpeg progress callbacks so the WS isn't flooded.
PROGRESS_BROADCAST_MIN_INTERVAL = 1.0
JOB_TYPE = "debug_replay"
STEP_PREPARING_CLIP = "preparing_clip"
STEP_STARTING_CAMERA = "starting_camera"
_active_runner: Optional["DebugReplayJobRunner"] = None
_runner_lock = threading.Lock()
def _set_active_runner(runner: Optional["DebugReplayJobRunner"]) -> None:
global _active_runner
with _runner_lock:
_active_runner = runner
def get_active_runner() -> Optional["DebugReplayJobRunner"]:
with _runner_lock:
return _active_runner
@dataclass
class DebugReplayJob(Job):
"""Job state for a debug replay startup."""
job_type: str = JOB_TYPE
source_camera: str = ""
replay_camera_name: str = ""
start_ts: float = 0.0
end_ts: float = 0.0
current_step: Optional[str] = None
progress_percent: float = 0.0
def to_dict(self) -> dict[str, Any]:
"""Whitelisted payload for the job_state WS topic.
Replay-specific fields land in results so the frontend's
generic Job<TResults> type can be parameterised cleanly.
"""
return {
"id": self.id,
"job_type": self.job_type,
"status": self.status,
"start_time": self.start_time,
"end_time": self.end_time,
"error_message": self.error_message,
"results": {
"current_step": self.current_step,
"progress_percent": self.progress_percent,
"source_camera": self.source_camera,
"replay_camera_name": self.replay_camera_name,
"start_ts": self.start_ts,
"end_ts": self.end_ts,
},
}
def query_recordings(source_camera: str, start_ts: float, end_ts: float) -> ModelSelect:
"""Return the Recordings query for the time range.
Module-level so tests can patch it without instantiating a runner.
"""
query = (
Recordings.select(
Recordings.path,
Recordings.start_time,
Recordings.end_time,
)
.where(
Recordings.start_time.between(start_ts, end_ts)
| Recordings.end_time.between(start_ts, end_ts)
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
)
.where(Recordings.camera == source_camera)
.order_by(Recordings.start_time.asc())
)
return cast(ModelSelect, query)
class DebugReplayJobRunner(threading.Thread):
"""Worker thread that drives the startup job to completion.
Owns the live ffmpeg Popen reference for cancellation. Cancellation
is two-step (threading.Event + proc.terminate()) so the runner
both knows it should stop and is unblocked from its blocking subprocess
wait.
"""
def __init__(
self,
job: DebugReplayJob,
frigate_config: FrigateConfig,
config_publisher: CameraConfigUpdatePublisher,
replay_manager: "DebugReplayManager",
publisher: Optional[JobStatePublisher] = None,
) -> None:
super().__init__(daemon=True, name=f"debug_replay_{job.id}")
self.job = job
self.frigate_config = frigate_config
self.config_publisher = config_publisher
self.replay_manager = replay_manager
self.publisher = publisher if publisher is not None else JobStatePublisher()
self._cancel_event = threading.Event()
self._active_process: sp.Popen | None = None
self._proc_lock = threading.Lock()
self._last_broadcast_monotonic: float = 0.0
def cancel(self) -> None:
"""Request cancellation. Idempotent."""
self._cancel_event.set()
with self._proc_lock:
proc = self._active_process
if proc is not None:
try:
proc.terminate()
except Exception as exc:
logger.warning("Failed to terminate ffmpeg subprocess: %s", exc)
def is_cancelled(self) -> bool:
return self._cancel_event.is_set()
def _record_proc(self, proc: sp.Popen) -> None:
with self._proc_lock:
self._active_process = proc
# Race: cancel arrived between Popen and _record_proc.
if self._cancel_event.is_set():
try:
proc.terminate()
except Exception:
pass
def _broadcast(self, force: bool = False) -> None:
now = time.monotonic()
if (
not force
and now - self._last_broadcast_monotonic < PROGRESS_BROADCAST_MIN_INTERVAL
):
return
self._last_broadcast_monotonic = now
try:
self.publisher.publish(self.job.to_dict())
except Exception as err:
logger.warning("Publisher raised during job state broadcast: %s", err)
def run(self) -> None:
replay_name = self.job.replay_camera_name
os.makedirs(REPLAY_DIR, exist_ok=True)
concat_file = os.path.join(REPLAY_DIR, f"{replay_name}_concat.txt")
clip_path = os.path.join(REPLAY_DIR, f"{replay_name}.mp4")
self.job.status = JobStatusTypesEnum.running
self.job.start_time = time.time()
self.job.current_step = STEP_PREPARING_CLIP
self._broadcast(force=True)
try:
recordings = query_recordings(
self.job.source_camera, self.job.start_ts, self.job.end_ts
)
with open(concat_file, "w") as f:
for recording in recordings:
f.write(f"file '{recording.path}'\n")
ffmpeg_cmd = [
self.frigate_config.ffmpeg.ffmpeg_path,
"-hide_banner",
"-y",
"-f",
"concat",
"-safe",
"0",
"-i",
concat_file,
"-c",
"copy",
"-movflags",
"+faststart",
clip_path,
]
logger.info(
"Generating replay clip for %s (%.1f - %.1f)",
self.job.source_camera,
self.job.start_ts,
self.job.end_ts,
)
def _on_progress(percent: float) -> None:
self.job.progress_percent = percent
self._broadcast()
try:
returncode, stderr = run_ffmpeg_with_progress(
ffmpeg_cmd,
expected_duration_seconds=max(
0.0, self.job.end_ts - self.job.start_ts
),
on_progress=_on_progress,
process_started=self._record_proc,
use_low_priority=True,
)
finally:
with self._proc_lock:
self._active_process = None
if self._cancel_event.is_set():
self._finalize_cancelled(clip_path)
return
if returncode != 0:
raise RuntimeError(f"FFmpeg failed: {stderr[-500:]}")
if not os.path.exists(clip_path):
raise RuntimeError("Clip file was not created")
self.job.current_step = STEP_STARTING_CAMERA
self.job.progress_percent = 100.0
self._broadcast(force=True)
if self._cancel_event.is_set():
self._finalize_cancelled(clip_path)
return
self.replay_manager.publish_camera(
source_camera=self.job.source_camera,
replay_name=replay_name,
clip_path=clip_path,
frigate_config=self.frigate_config,
config_publisher=self.config_publisher,
)
self.replay_manager.mark_session_ready(clip_path)
self.job.status = JobStatusTypesEnum.success
self.job.end_time = time.time()
self._broadcast(force=True)
logger.info(
"Debug replay started: %s -> %s",
self.job.source_camera,
replay_name,
)
except Exception as exc:
logger.exception("Debug replay startup failed")
self.job.status = JobStatusTypesEnum.failed
self.job.error_message = str(exc)
self.job.end_time = time.time()
self._broadcast(force=True)
self.replay_manager.clear_session()
_remove_silent(clip_path)
finally:
_remove_silent(concat_file)
_set_active_runner(None)
def _finalize_cancelled(self, clip_path: str) -> None:
logger.info("Debug replay startup cancelled")
self.job.status = JobStatusTypesEnum.cancelled
self.job.end_time = time.time()
self._broadcast(force=True)
# The caller of cancel_debug_replay_job (DebugReplayManager.stop) owns
# session cleanup — db rows, filesystem artifacts, clear_session. We
# only clean up the partial concat output we created.
_remove_silent(clip_path)
def _remove_silent(path: str) -> None:
try:
if os.path.exists(path):
os.remove(path)
except OSError:
pass
def start_debug_replay_job(
*,
source_camera: str,
start_ts: float,
end_ts: float,
frigate_config: FrigateConfig,
config_publisher: CameraConfigUpdatePublisher,
replay_manager: "DebugReplayManager",
) -> str:
"""Validate, create job, start runner. Returns the job id.
Raises ValueError for bad params (camera missing, time range
invalid, no recordings) and RuntimeError if a session is already
active.
"""
if job_is_running(JOB_TYPE) or replay_manager.active:
raise RuntimeError("A replay session is already active")
if source_camera not in frigate_config.cameras:
raise ValueError(f"Camera '{source_camera}' not found")
if end_ts <= start_ts:
raise ValueError("End time must be after start time")
recordings = query_recordings(source_camera, start_ts, end_ts)
if not recordings.count():
raise ValueError(
f"No recordings found for camera '{source_camera}' in the specified time range"
)
replay_name = f"{REPLAY_CAMERA_PREFIX}{source_camera}"
replay_manager.mark_starting(
source_camera=source_camera,
replay_camera_name=replay_name,
start_ts=start_ts,
end_ts=end_ts,
)
job = DebugReplayJob(
source_camera=source_camera,
replay_camera_name=replay_name,
start_ts=start_ts,
end_ts=end_ts,
)
set_current_job(job)
runner = DebugReplayJobRunner(
job=job,
frigate_config=frigate_config,
config_publisher=config_publisher,
replay_manager=replay_manager,
)
_set_active_runner(runner)
runner.start()
return job.id
def cancel_debug_replay_job() -> bool:
"""Signal the active runner to cancel.
Returns True if a runner was signalled, False if no job was active.
"""
runner = get_active_runner()
if runner is None:
return False
runner.cancel()
return True
def wait_for_runner(timeout: float = 2.0) -> bool:
"""Join the active runner. Returns True if the runner ended in time."""
runner = get_active_runner()
if runner is None:
return True
runner.join(timeout=timeout)
return not runner.is_alive()

View File

@ -23,13 +23,13 @@ from frigate.const import (
EXPORT_DIR,
MAX_PLAYLIST_SECONDS,
PREVIEW_FRAME_TYPE,
PROCESS_PRIORITY_LOW,
)
from frigate.ffmpeg_presets import (
EncodeTypeEnum,
parse_preset_hardware_acceleration_encode,
)
from frigate.models import Export, Previews, Recordings, ReviewSegment
from frigate.util.ffmpeg import run_ffmpeg_with_progress
from frigate.util.time import is_current_hour
logger = logging.getLogger(__name__)
@ -243,107 +243,29 @@ class RecordingExporter(threading.Thread):
return total
def _inject_progress_flags(self, ffmpeg_cmd: list[str]) -> list[str]:
"""Insert FFmpeg progress reporting flags before the output path.
``-progress pipe:2`` writes structured key=value lines to stderr,
``-nostats`` suppresses the noisy default stats output.
"""
if not ffmpeg_cmd:
return ffmpeg_cmd
return ffmpeg_cmd[:-1] + ["-progress", "pipe:2", "-nostats", ffmpeg_cmd[-1]]
def _run_ffmpeg_with_progress(
self,
ffmpeg_cmd: list[str],
playlist_lines: str | list[str],
step: str = "encoding",
) -> tuple[int, str]:
"""Run an FFmpeg export command, parsing progress events from stderr.
"""Delegate to the shared helper, mapping percent → (step, percent).
Returns ``(returncode, captured_stderr)``. Stdout is left attached to
the parent process so we don't have to drain it (and risk a deadlock
if the buffer fills). Progress percent is computed against the
expected output duration; values are clamped to [0, 100] inside
:py:meth:`_emit_progress`.
Returns ``(returncode, captured_stderr)``.
"""
cmd = ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + self._inject_progress_flags(
ffmpeg_cmd
)
if isinstance(playlist_lines, list):
stdin_payload = "\n".join(playlist_lines)
else:
stdin_payload = playlist_lines
expected_duration = self._expected_output_duration_seconds()
self._emit_progress(step, 0.0)
proc = sp.Popen(
cmd,
stdin=sp.PIPE,
stderr=sp.PIPE,
text=True,
encoding="ascii",
errors="replace",
return run_ffmpeg_with_progress(
ffmpeg_cmd,
expected_duration_seconds=self._expected_output_duration_seconds(),
on_progress=lambda percent: self._emit_progress(step, percent),
stdin_payload=stdin_payload,
use_low_priority=True,
)
assert proc.stdin is not None
assert proc.stderr is not None
try:
proc.stdin.write(stdin_payload)
except (BrokenPipeError, OSError):
# FFmpeg may have rejected the input early; still wait for it
# to terminate so the returncode is meaningful.
pass
finally:
try:
proc.stdin.close()
except (BrokenPipeError, OSError):
pass
captured: list[str] = []
try:
for raw_line in proc.stderr:
captured.append(raw_line)
line = raw_line.strip()
if not line:
continue
if line.startswith("out_time_us="):
if expected_duration <= 0:
continue
try:
out_time_us = int(line.split("=", 1)[1])
except (ValueError, IndexError):
continue
if out_time_us < 0:
continue
out_seconds = out_time_us / 1_000_000.0
percent = (out_seconds / expected_duration) * 100.0
self._emit_progress(step, percent)
elif line == "progress=end":
self._emit_progress(step, 100.0)
break
except Exception:
logger.exception("Failed reading FFmpeg progress for %s", self.export_id)
proc.wait()
# Drain any remaining stderr so callers can log it on failure.
try:
remaining = proc.stderr.read()
if remaining:
captured.append(remaining)
except Exception:
pass
return proc.returncode, "".join(captured)
def get_datetime_from_timestamp(self, timestamp: int) -> str:
# return in iso format using the configured ui.timezone when set,
# so the auto-generated export name reflects local time rather
@ -420,6 +342,7 @@ class RecordingExporter(threading.Thread):
return None
total_output = windows[-1][2] + (windows[-1][1] - windows[-1][0])
last_recorded_end = windows[-1][1]
def wall_to_output(t: float) -> float:
t = max(float(self.start_time), min(float(self.end_time), t))
@ -432,8 +355,18 @@ class RecordingExporter(threading.Thread):
chapter_blocks: list[str] = []
for review in review_rows:
if review.start_time is None:
continue
# In-progress segments have a NULL end_time until the activity
# closes; clamp to the last recorded second so the chapter never
# extends past the actual video.
review_end = (
float(review.end_time)
if review.end_time is not None
else last_recorded_end
)
start_out = wall_to_output(float(review.start_time))
end_out = wall_to_output(float(review.end_time))
end_out = wall_to_output(review_end)
# Drop chapters that fall entirely in a recording gap, or are
# too short to be navigable in a player.
@ -516,16 +449,14 @@ class RecordingExporter(threading.Thread):
except DoesNotExist:
return ""
diff = self.start_time - preview.start_time
minutes = int(diff / 60)
seconds = int(diff % 60)
diff = max(0.0, float(self.start_time) - float(preview.start_time))
ffmpeg_cmd = [
"/usr/lib/ffmpeg/7.0/bin/ffmpeg", # hardcode path for exports thumbnail due to missing libwebp support
"-hide_banner",
"-loglevel",
"warning",
"-ss",
f"00:{minutes}:{seconds}",
f"{diff:.3f}",
"-i",
preview.path,
"-frames",

View File

@ -123,6 +123,15 @@ def get_detector_temperature(
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "hailo10h":
# Get temperatures for Hailo devices
hailo_temps = get_hailo_temps()
if hailo_temps:
hailo_device_names = sorted(hailo_temps.keys())
index = detector_index_by_type.get("hailo10h", 0)
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "rknn":
# Rockchip temperatures are handled by the GPU / NPU stats
# as there are not detector specific temperatures

View File

@ -0,0 +1,123 @@
"""Tests for /debug_replay API endpoints."""
from unittest.mock import patch
from frigate.models import Event, Recordings, ReviewSegment
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
class TestDebugReplayAPI(BaseTestHttp):
def setUp(self):
super().setUp([Event, Recordings, ReviewSegment])
self.app = self.create_app()
def test_start_returns_202_with_job_id(self):
# Stub the factory to skip validation/threading and just record the
# name on the manager the way the real factory's mark_starting would.
def fake_start(**kwargs):
kwargs["replay_manager"].mark_starting(
source_camera=kwargs["source_camera"],
replay_camera_name="_replay_front",
start_ts=kwargs["start_ts"],
end_ts=kwargs["end_ts"],
)
return "job-1234"
with patch(
"frigate.api.debug_replay.start_debug_replay_job",
side_effect=fake_start,
):
with AuthTestClient(self.app) as client:
resp = client.post(
"/debug_replay/start",
json={
"camera": "front",
"start_time": 100,
"end_time": 200,
},
)
self.assertEqual(resp.status_code, 202)
body = resp.json()
self.assertTrue(body["success"])
self.assertEqual(body["job_id"], "job-1234")
self.assertEqual(body["replay_camera"], "_replay_front")
def test_start_returns_400_on_validation_error(self):
with patch(
"frigate.api.debug_replay.start_debug_replay_job",
side_effect=ValueError("Camera 'missing' not found"),
):
with AuthTestClient(self.app) as client:
resp = client.post(
"/debug_replay/start",
json={
"camera": "missing",
"start_time": 100,
"end_time": 200,
},
)
self.assertEqual(resp.status_code, 400)
body = resp.json()
self.assertFalse(body["success"])
# Message is hard-coded so we don't echo exception text back to clients
# (CodeQL: information exposure through an exception).
self.assertEqual(body["message"], "Invalid debug replay parameters")
def test_start_returns_409_when_session_already_active(self):
with patch(
"frigate.api.debug_replay.start_debug_replay_job",
side_effect=RuntimeError("A replay session is already active"),
):
with AuthTestClient(self.app) as client:
resp = client.post(
"/debug_replay/start",
json={
"camera": "front",
"start_time": 100,
"end_time": 200,
},
)
self.assertEqual(resp.status_code, 409)
body = resp.json()
self.assertFalse(body["success"])
def test_status_inactive_when_no_session(self):
with AuthTestClient(self.app) as client:
resp = client.get("/debug_replay/status")
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertFalse(body["active"])
self.assertIsNone(body["replay_camera"])
self.assertIsNone(body["source_camera"])
self.assertIsNone(body["start_time"])
self.assertIsNone(body["end_time"])
self.assertFalse(body["live_ready"])
# Make sure deprecated fields are gone
self.assertNotIn("state", body)
self.assertNotIn("progress_percent", body)
self.assertNotIn("error_message", body)
def test_status_active_after_mark_starting(self):
manager = self.app.replay_manager
manager.mark_starting(
source_camera="front",
replay_camera_name="_replay_front",
start_ts=100.0,
end_ts=200.0,
)
with AuthTestClient(self.app) as client:
resp = client.get("/debug_replay/status")
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertTrue(body["active"])
self.assertEqual(body["replay_camera"], "_replay_front")
self.assertEqual(body["source_camera"], "front")
self.assertEqual(body["start_time"], 100.0)
self.assertEqual(body["end_time"], 200.0)
self.assertFalse(body["live_ready"])

View File

@ -0,0 +1,242 @@
"""Tests for the simplified DebugReplayManager.
Startup orchestration lives in ``frigate.jobs.debug_replay`` (covered by
``test_debug_replay_job``). The manager owns only session presence and
cleanup.
"""
import unittest
import unittest.mock
from unittest.mock import MagicMock, patch
class TestDebugReplayManagerSession(unittest.TestCase):
def test_inactive_by_default(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
self.assertFalse(manager.active)
self.assertIsNone(manager.replay_camera_name)
self.assertIsNone(manager.source_camera)
self.assertIsNone(manager.clip_path)
self.assertIsNone(manager.start_ts)
self.assertIsNone(manager.end_ts)
def test_mark_starting_sets_session_pointers_and_active(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
manager.mark_starting(
source_camera="front",
replay_camera_name="_replay_front",
start_ts=100.0,
end_ts=200.0,
)
self.assertTrue(manager.active)
self.assertEqual(manager.replay_camera_name, "_replay_front")
self.assertEqual(manager.source_camera, "front")
self.assertEqual(manager.start_ts, 100.0)
self.assertEqual(manager.end_ts, 200.0)
self.assertIsNone(manager.clip_path)
def test_mark_session_ready_sets_clip_path(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
manager.mark_starting("front", "_replay_front", 100.0, 200.0)
manager.mark_session_ready(clip_path="/tmp/replay/_replay_front.mp4")
self.assertEqual(manager.clip_path, "/tmp/replay/_replay_front.mp4")
self.assertTrue(manager.active)
def test_clear_session_resets_all_pointers(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
manager.mark_starting("front", "_replay_front", 100.0, 200.0)
manager.mark_session_ready("/tmp/replay/clip.mp4")
manager.clear_session()
self.assertFalse(manager.active)
self.assertIsNone(manager.replay_camera_name)
self.assertIsNone(manager.source_camera)
self.assertIsNone(manager.clip_path)
self.assertIsNone(manager.start_ts)
self.assertIsNone(manager.end_ts)
class TestDebugReplayManagerStop(unittest.TestCase):
def test_stop_when_inactive_is_a_noop(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
frigate_config = MagicMock()
frigate_config.cameras = {}
publisher = MagicMock()
# Should not raise; should not publish any events.
manager.stop(frigate_config=frigate_config, config_publisher=publisher)
publisher.publish_update.assert_not_called()
def test_stop_publishes_remove_when_camera_was_published(self) -> None:
from frigate.config.camera.updater import CameraConfigUpdateEnum
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
manager.mark_starting("front", "_replay_front", 100.0, 200.0)
manager.mark_session_ready("/tmp/replay/_replay_front.mp4")
camera_config = MagicMock()
frigate_config = MagicMock()
frigate_config.cameras = {"_replay_front": camera_config}
publisher = MagicMock()
with (
patch.object(manager, "_cleanup_db"),
patch.object(manager, "_cleanup_files"),
patch("frigate.debug_replay.cancel_debug_replay_job", return_value=False),
):
manager.stop(frigate_config=frigate_config, config_publisher=publisher)
# One publish_update call with a remove topic.
self.assertEqual(publisher.publish_update.call_count, 1)
topic_arg = publisher.publish_update.call_args.args[0]
self.assertEqual(topic_arg.update_type, CameraConfigUpdateEnum.remove)
self.assertFalse(manager.active)
def test_stop_skips_remove_publish_when_camera_not_in_config(self) -> None:
"""Cancellation during preparing_clip: no camera was published yet."""
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
manager.mark_starting("front", "_replay_front", 100.0, 200.0)
# clip_path stays None because we cancelled before camera publish.
frigate_config = MagicMock()
frigate_config.cameras = {} # _replay_front not present
publisher = MagicMock()
with (
patch.object(manager, "_cleanup_db"),
patch.object(manager, "_cleanup_files"),
patch("frigate.debug_replay.cancel_debug_replay_job", return_value=True),
):
manager.stop(frigate_config=frigate_config, config_publisher=publisher)
publisher.publish_update.assert_not_called()
self.assertFalse(manager.active)
def test_stop_calls_cancel_debug_replay_job(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
manager.mark_starting("front", "_replay_front", 100.0, 200.0)
frigate_config = MagicMock()
frigate_config.cameras = {}
publisher = MagicMock()
with (
patch.object(manager, "_cleanup_db"),
patch.object(manager, "_cleanup_files"),
patch(
"frigate.debug_replay.cancel_debug_replay_job",
return_value=True,
) as mock_cancel,
):
manager.stop(frigate_config=frigate_config, config_publisher=publisher)
mock_cancel.assert_called_once()
class TestDebugReplayManagerPublishCamera(unittest.TestCase):
def test_publish_camera_invokes_publisher_with_add_topic(self) -> None:
from frigate.config.camera.updater import CameraConfigUpdateEnum
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
source_config = MagicMock()
new_camera_config = MagicMock()
frigate_config = MagicMock()
frigate_config.cameras = {"front": source_config}
publisher = MagicMock()
with (
patch.object(
manager,
"_build_camera_config_dict",
return_value={"enabled": True},
),
patch("frigate.debug_replay.find_config_file", return_value="/cfg.yml"),
patch("frigate.debug_replay.YAML") as yaml_cls,
patch("frigate.debug_replay.FrigateConfig.parse_object") as parse_object,
patch("builtins.open", unittest.mock.mock_open(read_data="cameras:\n")),
):
yaml_instance = yaml_cls.return_value
yaml_instance.load.return_value = {"cameras": {}}
parsed = MagicMock()
parsed.cameras = {"_replay_front": new_camera_config}
parse_object.return_value = parsed
manager.publish_camera(
source_camera="front",
replay_name="_replay_front",
clip_path="/tmp/clip.mp4",
frigate_config=frigate_config,
config_publisher=publisher,
)
# Camera registered into the live config dict
self.assertIn("_replay_front", frigate_config.cameras)
# Publisher invoked with an add topic
self.assertEqual(publisher.publish_update.call_count, 1)
topic_arg = publisher.publish_update.call_args.args[0]
self.assertEqual(topic_arg.update_type, CameraConfigUpdateEnum.add)
def test_publish_camera_wraps_parse_failure_in_runtime_error(self) -> None:
from frigate.debug_replay import DebugReplayManager
manager = DebugReplayManager()
frigate_config = MagicMock()
frigate_config.cameras = {"front": MagicMock()}
publisher = MagicMock()
with (
patch.object(
manager,
"_build_camera_config_dict",
return_value={"enabled": True},
),
patch("frigate.debug_replay.find_config_file", return_value="/cfg.yml"),
patch("frigate.debug_replay.YAML") as yaml_cls,
patch(
"frigate.debug_replay.FrigateConfig.parse_object",
side_effect=ValueError("zone foo has invalid coordinates"),
),
patch("builtins.open", unittest.mock.mock_open(read_data="cameras:\n")),
):
yaml_cls.return_value.load.return_value = {"cameras": {}}
with self.assertRaises(RuntimeError) as ctx:
manager.publish_camera(
source_camera="front",
replay_name="_replay_front",
clip_path="/tmp/clip.mp4",
frigate_config=frigate_config,
config_publisher=publisher,
)
self.assertIn("replay camera config", str(ctx.exception))
self.assertIn("invalid coordinates", str(ctx.exception))
publisher.publish_update.assert_not_called()
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,460 @@
"""Tests for the debug replay job runner and factory."""
import threading
import time
import unittest
import unittest.mock
from unittest.mock import MagicMock, patch
from frigate.debug_replay import DebugReplayManager
from frigate.jobs.debug_replay import (
DebugReplayJob,
cancel_debug_replay_job,
get_active_runner,
start_debug_replay_job,
)
from frigate.jobs.export import JobStatePublisher
from frigate.jobs.manager import _completed_jobs, _current_jobs
from frigate.types import JobStatusTypesEnum
def _reset_job_manager() -> None:
"""Clear the global job manager state between tests."""
_current_jobs.clear()
_completed_jobs.clear()
def _patch_publisher(test_case: unittest.TestCase) -> None:
"""Replace JobStatePublisher.publish with a no-op to avoid hanging on IPC."""
publisher_patch = patch.object(
JobStatePublisher, "publish", lambda self, payload: None
)
publisher_patch.start()
test_case.addCleanup(publisher_patch.stop)
class TestDebugReplayJob(unittest.TestCase):
def test_default_fields(self) -> None:
job = DebugReplayJob()
self.assertEqual(job.job_type, "debug_replay")
self.assertEqual(job.status, JobStatusTypesEnum.queued)
self.assertIsNone(job.current_step)
self.assertEqual(job.progress_percent, 0.0)
def test_to_dict_whitelist(self) -> None:
job = DebugReplayJob(
source_camera="front",
replay_camera_name="_replay_front",
start_ts=100.0,
end_ts=200.0,
)
job.current_step = "preparing_clip"
job.progress_percent = 42.5
payload = job.to_dict()
# Top-level matches the standard Job<TResults> shape.
for key in (
"id",
"job_type",
"status",
"start_time",
"end_time",
"error_message",
"results",
):
self.assertIn(key, payload, f"missing top-level field: {key}")
results = payload["results"]
self.assertEqual(results["source_camera"], "front")
self.assertEqual(results["replay_camera_name"], "_replay_front")
self.assertEqual(results["current_step"], "preparing_clip")
self.assertEqual(results["progress_percent"], 42.5)
self.assertEqual(results["start_ts"], 100.0)
self.assertEqual(results["end_ts"], 200.0)
class TestStartDebugReplayJob(unittest.TestCase):
def setUp(self) -> None:
_reset_job_manager()
_patch_publisher(self)
self.manager = DebugReplayManager()
self.frigate_config = MagicMock()
self.frigate_config.cameras = {"front": MagicMock()}
self.frigate_config.ffmpeg.ffmpeg_path = "/bin/true"
self.publisher = MagicMock()
self.recordings_qs = MagicMock()
self.recordings_qs.count.return_value = 1
self.recordings_qs.__iter__.return_value = iter([MagicMock(path="/tmp/r1.mp4")])
def tearDown(self) -> None:
runner = get_active_runner()
if runner is not None:
runner.cancel()
runner.join(timeout=2.0)
_reset_job_manager()
def test_rejects_unknown_camera(self) -> None:
with self.assertRaises(ValueError):
start_debug_replay_job(
source_camera="missing",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
def test_rejects_invalid_time_range(self) -> None:
with self.assertRaises(ValueError):
start_debug_replay_job(
source_camera="front",
start_ts=200.0,
end_ts=100.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
def test_rejects_when_no_recordings(self) -> None:
empty_qs = MagicMock()
empty_qs.count.return_value = 0
with patch("frigate.jobs.debug_replay.query_recordings", return_value=empty_qs):
with self.assertRaises(ValueError):
start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
def test_returns_job_id_and_marks_session_starting(self) -> None:
block = threading.Event()
def slow_helper(cmd, **kwargs):
block.wait(timeout=5)
return 0, ""
with (
patch(
"frigate.jobs.debug_replay.query_recordings",
return_value=self.recordings_qs,
),
patch(
"frigate.jobs.debug_replay.run_ffmpeg_with_progress",
side_effect=slow_helper,
),
patch.object(self.manager, "publish_camera"),
patch("os.path.exists", return_value=True),
patch("os.makedirs"),
patch("builtins.open", unittest.mock.mock_open()),
):
job_id = start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
self.assertIsInstance(job_id, str)
self.assertTrue(self.manager.active)
self.assertEqual(self.manager.replay_camera_name, "_replay_front")
self.assertEqual(self.manager.source_camera, "front")
block.set()
def test_rejects_concurrent_calls(self) -> None:
block = threading.Event()
def slow_helper(cmd, **kwargs):
block.wait(timeout=5)
return 0, ""
with (
patch(
"frigate.jobs.debug_replay.query_recordings",
return_value=self.recordings_qs,
),
patch(
"frigate.jobs.debug_replay.run_ffmpeg_with_progress",
side_effect=slow_helper,
),
patch.object(self.manager, "publish_camera"),
patch("os.path.exists", return_value=True),
patch("os.makedirs"),
patch("builtins.open", unittest.mock.mock_open()),
):
start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
with self.assertRaises(RuntimeError):
start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
block.set()
class TestRunnerHappyPath(unittest.TestCase):
def setUp(self) -> None:
_reset_job_manager()
_patch_publisher(self)
self.manager = DebugReplayManager()
self.frigate_config = MagicMock()
self.frigate_config.cameras = {"front": MagicMock()}
self.frigate_config.ffmpeg.ffmpeg_path = "/bin/true"
self.publisher = MagicMock()
self.recordings_qs = MagicMock()
self.recordings_qs.count.return_value = 1
self.recordings_qs.__iter__.return_value = iter([MagicMock(path="/tmp/r1.mp4")])
def tearDown(self) -> None:
runner = get_active_runner()
if runner is not None:
runner.cancel()
runner.join(timeout=2.0)
_reset_job_manager()
def _wait_for(self, predicate, timeout: float = 5.0) -> bool:
deadline = time.time() + timeout
while time.time() < deadline:
if predicate():
return True
time.sleep(0.02)
return False
def test_progress_callback_updates_job_percent(self) -> None:
captured: list[float] = []
def fake_helper(cmd, *, on_progress=None, **kwargs):
on_progress(0.0)
on_progress(50.0)
on_progress(100.0)
return 0, ""
with (
patch(
"frigate.jobs.debug_replay.query_recordings",
return_value=self.recordings_qs,
),
patch(
"frigate.jobs.debug_replay.run_ffmpeg_with_progress",
side_effect=fake_helper,
),
patch.object(
self.manager,
"publish_camera",
side_effect=lambda *a, **kw: captured.append("published"),
),
patch("os.path.exists", return_value=True),
patch("os.makedirs"),
patch("builtins.open", unittest.mock.mock_open()),
):
start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
self.assertTrue(
self._wait_for(lambda: get_active_runner() is None),
"runner did not finish",
)
from frigate.jobs.manager import get_current_job
job = get_current_job("debug_replay")
self.assertIsNotNone(job)
self.assertEqual(job.status, JobStatusTypesEnum.success)
self.assertEqual(job.progress_percent, 100.0)
self.assertEqual(captured, ["published"])
# Manager should have been told the session is ready with the clip path.
self.assertIsNotNone(self.manager.clip_path)
class TestRunnerFailurePath(unittest.TestCase):
def setUp(self) -> None:
_reset_job_manager()
_patch_publisher(self)
self.manager = DebugReplayManager()
self.frigate_config = MagicMock()
self.frigate_config.cameras = {"front": MagicMock()}
self.frigate_config.ffmpeg.ffmpeg_path = "/bin/true"
self.publisher = MagicMock()
self.recordings_qs = MagicMock()
self.recordings_qs.count.return_value = 1
self.recordings_qs.__iter__.return_value = iter([MagicMock(path="/tmp/r1.mp4")])
def tearDown(self) -> None:
runner = get_active_runner()
if runner is not None:
runner.cancel()
runner.join(timeout=2.0)
_reset_job_manager()
def _wait_for(self, predicate, timeout: float = 5.0) -> bool:
deadline = time.time() + timeout
while time.time() < deadline:
if predicate():
return True
time.sleep(0.02)
return False
def test_ffmpeg_failure_marks_job_failed_and_clears_session(self) -> None:
def failing_helper(cmd, **kwargs):
return 1, "ffmpeg exploded"
with (
patch(
"frigate.jobs.debug_replay.query_recordings",
return_value=self.recordings_qs,
),
patch(
"frigate.jobs.debug_replay.run_ffmpeg_with_progress",
side_effect=failing_helper,
),
patch("os.path.exists", return_value=True),
patch("os.makedirs"),
patch("os.remove"),
patch("builtins.open", unittest.mock.mock_open()),
):
start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
self.assertTrue(
self._wait_for(lambda: get_active_runner() is None),
"runner did not finish",
)
from frigate.jobs.manager import get_current_job
job = get_current_job("debug_replay")
self.assertIsNotNone(job)
self.assertEqual(job.status, JobStatusTypesEnum.failed)
self.assertIsNotNone(job.error_message)
self.assertIn("ffmpeg", job.error_message.lower())
# Session cleared so a new /start is allowed
self.assertFalse(self.manager.active)
class TestRunnerCancellation(unittest.TestCase):
def setUp(self) -> None:
_reset_job_manager()
_patch_publisher(self)
self.manager = DebugReplayManager()
self.frigate_config = MagicMock()
self.frigate_config.cameras = {"front": MagicMock()}
self.frigate_config.ffmpeg.ffmpeg_path = "/bin/true"
self.publisher = MagicMock()
self.recordings_qs = MagicMock()
self.recordings_qs.count.return_value = 1
self.recordings_qs.__iter__.return_value = iter([MagicMock(path="/tmp/r1.mp4")])
def tearDown(self) -> None:
runner = get_active_runner()
if runner is not None:
runner.cancel()
runner.join(timeout=2.0)
_reset_job_manager()
def _wait_for(self, predicate, timeout: float = 5.0) -> bool:
deadline = time.time() + timeout
while time.time() < deadline:
if predicate():
return True
time.sleep(0.02)
return False
def test_cancel_terminates_ffmpeg_and_marks_cancelled(self) -> None:
terminated = threading.Event()
fake_proc = MagicMock()
fake_proc.terminate = MagicMock(side_effect=lambda: terminated.set())
def fake_helper(cmd, *, process_started=None, **kwargs):
if process_started is not None:
process_started(fake_proc)
terminated.wait(timeout=5)
return -15, "killed"
with (
patch(
"frigate.jobs.debug_replay.query_recordings",
return_value=self.recordings_qs,
),
patch(
"frigate.jobs.debug_replay.run_ffmpeg_with_progress",
side_effect=fake_helper,
),
patch("os.path.exists", return_value=True),
patch("os.makedirs"),
patch("os.remove"),
patch("builtins.open", unittest.mock.mock_open()),
):
start_debug_replay_job(
source_camera="front",
start_ts=100.0,
end_ts=200.0,
frigate_config=self.frigate_config,
config_publisher=self.publisher,
replay_manager=self.manager,
)
# Wait for the runner to register the active process.
self.assertTrue(
self._wait_for(
lambda: (
get_active_runner() is not None
and get_active_runner()._active_process is fake_proc
)
)
)
cancelled = cancel_debug_replay_job()
self.assertTrue(cancelled)
self.assertTrue(fake_proc.terminate.called)
self.assertTrue(
self._wait_for(lambda: get_active_runner() is None),
"runner did not finish",
)
from frigate.jobs.manager import get_current_job
job = get_current_job("debug_replay")
self.assertEqual(job.status, JobStatusTypesEnum.cancelled)
# Runner must not clear the manager session on cancellation —
# that belongs to the caller of cancel_debug_replay_job (stop()).
# If the runner cleared it, stop() would log "no active session"
# and skip its cleanup_db / cleanup_files calls.
self.assertTrue(self.manager.active)
if __name__ == "__main__":
unittest.main()

View File

@ -14,6 +14,7 @@ from frigate.jobs.export import (
)
from frigate.record.export import PlaybackSourceEnum, RecordingExporter
from frigate.types import JobStatusTypesEnum
from frigate.util.ffmpeg import inject_progress_flags
def _make_exporter(
@ -118,10 +119,9 @@ class TestExpectedOutputDuration(unittest.TestCase):
class TestProgressFlagInjection(unittest.TestCase):
def test_inserts_before_output_path(self) -> None:
exporter = _make_exporter()
cmd = ["ffmpeg", "-i", "input.m3u8", "-c", "copy", "/tmp/output.mp4"]
result = exporter._inject_progress_flags(cmd)
result = inject_progress_flags(cmd)
assert result == [
"ffmpeg",
@ -136,8 +136,7 @@ class TestProgressFlagInjection(unittest.TestCase):
]
def test_handles_empty_cmd(self) -> None:
exporter = _make_exporter()
assert exporter._inject_progress_flags([]) == []
assert inject_progress_flags([]) == []
class TestFfmpegProgressParsing(unittest.TestCase):
@ -167,7 +166,7 @@ class TestFfmpegProgressParsing(unittest.TestCase):
fake_proc.returncode = 0
fake_proc.wait = MagicMock(return_value=0)
with patch("frigate.record.export.sp.Popen", return_value=fake_proc):
with patch("frigate.util.ffmpeg.sp.Popen", return_value=fake_proc):
returncode, _stderr = exporter._run_ffmpeg_with_progress(
["ffmpeg", "-i", "x.m3u8", "/tmp/out.mp4"], "playlist", step="encoding"
)
@ -499,5 +498,56 @@ class TestSchedulesCleanup(unittest.TestCase):
assert job.id not in manager.jobs
class TestChapterMetadataInProgressReview(unittest.TestCase):
"""Regression: in-progress review segments have end_time=NULL until the
activity closes. The chapter builder must clamp the chapter end to the
last recorded second instead of crashing on float(None)."""
def _fake_select_returning(self, rows: list) -> MagicMock:
mock_query = MagicMock()
mock_query.where.return_value = mock_query
mock_query.order_by.return_value = mock_query
mock_query.iterator.return_value = iter(rows)
return mock_query
def test_in_progress_review_does_not_crash_and_clamps_to_last_recording(
self,
) -> None:
exporter = _make_exporter(end_minus_start=200)
# Recordings cover [1000, 1150]; export window is [1000, 1200] so
# the last recorded second is 1150 (a 50s gap at the tail).
recordings = [
MagicMock(start_time=1000.0, end_time=1150.0),
]
in_progress = MagicMock(
start_time=1100.0,
end_time=None,
severity="alert",
data={"objects": ["person"]},
)
with tempfile.TemporaryDirectory() as tmpdir:
chapter_path = os.path.join(tmpdir, "chapters.txt")
exporter._chapter_metadata_path = lambda: chapter_path # type: ignore[method-assign]
with patch(
"frigate.record.export.ReviewSegment.select",
return_value=self._fake_select_returning([in_progress]),
):
result = exporter._build_chapter_metadata_file(recordings)
assert result == chapter_path
with open(chapter_path) as f:
content = f.read()
# Output time is windows[-1][1] - windows[-1][0] = 150s.
# Review starts at wall=1100, output offset = 100s -> 100000ms.
# Clamped end = last_recorded_end (1150) -> output offset = 150s -> 150000ms.
assert "[CHAPTER]" in content
assert "START=100000" in content
assert "END=150000" in content
assert "title=Alert: person" in content
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,111 @@
"""Tests for the shared ffmpeg progress helper."""
import unittest
from unittest.mock import MagicMock, patch
from frigate.util.ffmpeg import inject_progress_flags, run_ffmpeg_with_progress
class TestInjectProgressFlags(unittest.TestCase):
def test_inserts_flags_before_output_path(self):
cmd = ["ffmpeg", "-i", "in.mp4", "-c", "copy", "out.mp4"]
result = inject_progress_flags(cmd)
self.assertEqual(
result,
[
"ffmpeg",
"-i",
"in.mp4",
"-c",
"copy",
"-progress",
"pipe:2",
"-nostats",
"out.mp4",
],
)
def test_empty_cmd_returns_empty(self):
self.assertEqual(inject_progress_flags([]), [])
class TestRunFfmpegWithProgress(unittest.TestCase):
def _make_fake_proc(self, stderr_lines, returncode=0):
proc = MagicMock()
proc.stderr = iter(stderr_lines)
proc.stdin = MagicMock()
proc.returncode = returncode
proc.wait = MagicMock()
return proc
def test_emits_percent_from_out_time_us_lines(self):
captured: list[float] = []
def on_progress(percent: float) -> None:
captured.append(percent)
stderr_lines = [
"out_time_us=1000000\n",
"out_time_us=5000000\n",
"progress=end\n",
]
proc = self._make_fake_proc(stderr_lines)
proc.stderr = MagicMock()
proc.stderr.__iter__ = lambda self: iter(stderr_lines)
proc.stderr.read = MagicMock(return_value="")
with patch("subprocess.Popen", return_value=proc):
returncode, _stderr = run_ffmpeg_with_progress(
["ffmpeg", "-i", "in", "out"],
expected_duration_seconds=10.0,
on_progress=on_progress,
use_low_priority=False,
)
self.assertEqual(returncode, 0)
self.assertEqual(len(captured), 4) # initial 0.0 + two parsed + final 100.0
self.assertAlmostEqual(captured[0], 0.0)
self.assertAlmostEqual(captured[1], 10.0)
self.assertAlmostEqual(captured[2], 50.0)
self.assertAlmostEqual(captured[3], 100.0)
def test_passes_started_process_to_callback(self):
proc = self._make_fake_proc([])
proc.stderr = MagicMock()
proc.stderr.__iter__ = lambda self: iter([])
proc.stderr.read = MagicMock(return_value="")
seen: list = []
with patch("subprocess.Popen", return_value=proc):
run_ffmpeg_with_progress(
["ffmpeg", "out"],
expected_duration_seconds=1.0,
process_started=lambda p: seen.append(p),
use_low_priority=False,
)
self.assertEqual(seen, [proc])
def test_clamps_percent_to_0_100(self):
captured: list[float] = []
def on_progress(percent: float) -> None:
captured.append(percent)
stderr_lines = ["out_time_us=999999999999\n"]
proc = self._make_fake_proc(stderr_lines)
proc.stderr = MagicMock()
proc.stderr.__iter__ = lambda self: iter(stderr_lines)
proc.stderr.read = MagicMock(return_value="")
with patch("subprocess.Popen", return_value=proc):
run_ffmpeg_with_progress(
["ffmpeg", "out"],
expected_duration_seconds=10.0,
on_progress=on_progress,
use_low_priority=False,
)
# initial 0.0 then a clamped reading
self.assertEqual(captured[-1], 100.0)

View File

@ -7,8 +7,6 @@ from frigate.util.services import get_amd_gpu_stats, get_intel_gpu_stats
class TestGpuStats(unittest.TestCase):
def setUp(self):
self.amd_results = "Unknown Radeon card. <= R500 won't work, new cards might.\nDumping to -, line limit 1.\n1664070990.607556: bus 10, gpu 4.17%, ee 0.00%, vgt 0.00%, ta 0.00%, tc 0.00%, sx 0.00%, sh 0.00%, spi 0.83%, smx 0.00%, cr 0.00%, sc 0.00%, pa 0.00%, db 0.00%, cb 0.00%, vram 60.37% 294.04mb, gtt 0.33% 52.21mb, mclk 100.00% 1.800ghz, sclk 26.65% 0.533ghz\n"
self.intel_results = """{"period":{"duration":1.194033,"unit":"ms"},"frequency":{"requested":0.000000,"actual":0.000000,"unit":"MHz"},"interrupts":{"count":3349.991164,"unit":"irq/s"},"rc6":{"value":47.844741,"unit":"%"},"engines":{"Render/3D/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Blitter/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/0":{"busy":4.533124,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/1":{"busy":6.194385,"sema":0.000000,"wait":0.000000,"unit":"%"},"VideoEnhance/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"}}},{"period":{"duration":1.189291,"unit":"ms"},"frequency":{"requested":0.000000,"actual":0.000000,"unit":"MHz"},"interrupts":{"count":0.000000,"unit":"irq/s"},"rc6":{"value":100.000000,"unit":"%"},"engines":{"Render/3D/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Blitter/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/1":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"VideoEnhance/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"}}}"""
self.nvidia_results = "name, utilization.gpu [%], memory.used [MiB], memory.total [MiB]\nNVIDIA GeForce RTX 3050, 42 %, 5036 MiB, 8192 MiB\n"
@patch("subprocess.run")
def test_amd_gpu_stats(self, sp):
@ -19,32 +17,76 @@ class TestGpuStats(unittest.TestCase):
amd_stats = get_amd_gpu_stats()
assert amd_stats == {"gpu": "4.17%", "mem": "60.37%"}
# @patch("subprocess.run")
# def test_nvidia_gpu_stats(self, sp):
# process = MagicMock()
# process.returncode = 0
# process.stdout = self.nvidia_results
# sp.return_value = process
# nvidia_stats = get_nvidia_gpu_stats()
# assert nvidia_stats == {
# "name": "NVIDIA GeForce RTX 3050",
# "gpu": "42 %",
# "mem": "61.5 %",
# }
@patch("frigate.util.services.time.sleep")
@patch("frigate.util.services.time.monotonic")
@patch("frigate.util.services._read_intel_drm_fdinfo")
def test_intel_gpu_stats_fdinfo(self, read_fdinfo, monotonic, sleep):
# 1 second of wall clock between snapshots
monotonic.side_effect = [0.0, 1.0]
@patch("subprocess.run")
def test_intel_gpu_stats(self, sp):
process = MagicMock()
process.returncode = 124
process.stdout = self.intel_results
sp.return_value = process
intel_stats = get_intel_gpu_stats(False)
# rc6 values: 47.844741 and 100.0 → avg 73.92 → gpu = 100 - 73.92 = 26.08%
# Render/3D/0: 0.0 and 0.0 → enc = 0.0%
# Video/0: 4.533124 and 0.0 → dec = 2.27%
assert intel_stats == {
"gpu": "26.08%",
"mem": "-%",
"compute": "0.0%",
"dec": "2.27%",
# Two i915 clients on the same iGPU. Engine values are cumulative ns.
# Deltas over the 1s window:
# client A (pid 100): render +200_000_000 (20%), video +500_000_000 (50%),
# video-enhance +100_000_000 (10%)
# client B (pid 200): compute +100_000_000 (10%)
# Engine totals → render 20, video 50, video-enhance 10, compute 10
# → compute = render + compute = 30
# → dec = video + video-enhance = 60
# → gpu = compute + dec = 90
snapshot_a = {
("0000:00:02.0", "1", "100"): {
"driver": "i915",
"pid": "100",
"engines": {
"render": (1_000_000_000, 0),
"video": (5_000_000_000, 0),
"video-enhance": (200_000_000, 0),
"compute": (0, 0),
},
},
("0000:00:02.0", "2", "200"): {
"driver": "i915",
"pid": "200",
"engines": {
"render": (0, 0),
"compute": (2_000_000_000, 0),
},
},
}
snapshot_b = {
("0000:00:02.0", "1", "100"): {
"driver": "i915",
"pid": "100",
"engines": {
"render": (1_200_000_000, 0),
"video": (5_500_000_000, 0),
"video-enhance": (300_000_000, 0),
"compute": (0, 0),
},
},
("0000:00:02.0", "2", "200"): {
"driver": "i915",
"pid": "200",
"engines": {
"render": (0, 0),
"compute": (2_100_000_000, 0),
},
},
}
read_fdinfo.side_effect = [snapshot_a, snapshot_b]
intel_stats = get_intel_gpu_stats(None)
sleep.assert_called_once()
assert intel_stats == {
"gpu": "90.0%",
"mem": "-%",
"compute": "30.0%",
"dec": "60.0%",
"clients": {"100": "80.0%", "200": "10.0%"},
}
@patch("frigate.util.services._read_intel_drm_fdinfo")
def test_intel_gpu_stats_no_clients(self, read_fdinfo):
read_fdinfo.return_value = {}
assert get_intel_gpu_stats(None) is None

View File

@ -2,8 +2,9 @@
import logging
import subprocess as sp
from typing import Any
from typing import Any, Callable, Optional
from frigate.const import PROCESS_PRIORITY_LOW
from frigate.log import LogPipe
@ -46,3 +47,124 @@ def start_or_restart_ffmpeg(
start_new_session=True,
)
return process
logger = logging.getLogger(__name__)
def inject_progress_flags(cmd: list[str]) -> list[str]:
"""Insert `-progress pipe:2 -nostats` immediately before the output path.
`-progress pipe:2` writes structured key=value lines to stderr;
`-nostats` suppresses the noisy default stats output. The output path
is conventionally the last token in an FFmpeg argv.
"""
if not cmd:
return cmd
return cmd[:-1] + ["-progress", "pipe:2", "-nostats", cmd[-1]]
def run_ffmpeg_with_progress(
cmd: list[str],
*,
expected_duration_seconds: float,
on_progress: Optional[Callable[[float], None]] = None,
stdin_payload: Optional[str] = None,
process_started: Optional[Callable[[sp.Popen], None]] = None,
use_low_priority: bool = True,
) -> tuple[int, str]:
"""Run an ffmpeg command, streaming progress via `-progress pipe:2`.
Args:
cmd: ffmpeg argv. Output path must be the last token.
expected_duration_seconds: Duration of the expected output clip in
seconds. Used to convert ffmpeg's `out_time_us` into a percent.
on_progress: Optional callback invoked with a percent in [0, 100].
Called once with 0.0 at start, again on each `out_time_us=`
stderr line, and once with 100.0 on `progress=end`.
stdin_payload: Optional string written to ffmpeg stdin (used by
export for concat playlists).
process_started: Optional callback invoked with the live `Popen`
once spawned lets callers store the ref for cancellation.
use_low_priority: When True, prepend `nice -n PROCESS_PRIORITY_LOW`
so concat doesn't starve detection.
Returns:
Tuple of `(returncode, captured_stderr)`. Stdout is left attached
to the parent process to avoid buffer-full deadlocks.
"""
full_cmd = inject_progress_flags(cmd)
if use_low_priority:
full_cmd = ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + full_cmd
def emit(percent: float) -> None:
if on_progress is None:
return
try:
on_progress(max(0.0, min(100.0, percent)))
except Exception:
logger.exception("FFmpeg progress callback failed")
emit(0.0)
proc = sp.Popen(
full_cmd,
stdin=sp.PIPE if stdin_payload is not None else None,
stderr=sp.PIPE,
text=True,
encoding="ascii",
errors="replace",
)
if process_started is not None:
try:
process_started(proc)
except Exception:
logger.exception("FFmpeg process_started callback failed")
if stdin_payload is not None and proc.stdin is not None:
try:
proc.stdin.write(stdin_payload)
except (BrokenPipeError, OSError):
pass
finally:
try:
proc.stdin.close()
except (BrokenPipeError, OSError):
pass
captured: list[str] = []
if proc.stderr is not None:
try:
for raw_line in proc.stderr:
captured.append(raw_line)
line = raw_line.strip()
if not line:
continue
if line.startswith("out_time_us="):
if expected_duration_seconds <= 0:
continue
try:
out_time_us = int(line.split("=", 1)[1])
except (ValueError, IndexError):
continue
if out_time_us < 0:
continue
out_seconds = out_time_us / 1_000_000.0
emit((out_seconds / expected_duration_seconds) * 100.0)
elif line == "progress=end":
emit(100.0)
break
except Exception:
logger.exception("Failed reading FFmpeg progress stream")
proc.wait()
if proc.stderr is not None:
try:
remaining = proc.stderr.read()
if remaining:
captured.append(remaining)
except Exception:
pass
return proc.returncode or 0, "".join(captured)

View File

@ -264,156 +264,214 @@ def get_amd_gpu_stats() -> Optional[dict[str, str]]:
return results
def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, str]]:
"""Get stats using intel_gpu_top.
_INTEL_FDINFO_SAMPLE_SECONDS = 1.0
Returns overall GPU usage derived from rc6 residency (idle time),
plus individual engine breakdowns:
- enc: Render/3D engine (compute/shader encoder, used by QSV)
- dec: Video engines (fixed-function codec, used by VAAPI)
# Engines we track. Render/3D and Compute are pooled into "compute"; Video and
# VideoEnhance into "dec" (VideoEnhance is the post-process engine that handles
# VAAPI scaling/deinterlace/CSC, e.g. ffmpeg `-vf scale_vaapi=...`). The Copy
# (DMA blitter) engine is intentionally ignored — it represents transparent
# memory transfers, not user-visible GPU work.
# i915 fdinfo keys (cumulative ns) → logical engine name.
_I915_ENGINE_KEYS = {
"drm-engine-render": "render",
"drm-engine-video": "video",
"drm-engine-video-enhance": "video-enhance",
"drm-engine-compute": "compute",
}
# Xe fdinfo suffixes (cumulative cycles, paired with drm-total-cycles-*).
_XE_ENGINE_KEYS = {
"rcs": "render",
"vcs": "video",
"vecs": "video-enhance",
"ccs": "compute",
}
def _resolve_intel_gpu_pdev(device: Optional[str]) -> Optional[str]:
"""Map a configured GPU hint (/dev/dri/card1, renderD128, or a PCI bus
address) to its drm-pdev string so we can filter fdinfo entries to that
device. Returns None when no hint is supplied or it cannot be resolved."""
if not device:
return None
if re.match(r"^[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]$", device):
return device
name = os.path.basename(device.rstrip("/"))
try:
return os.path.basename(os.path.realpath(f"/sys/class/drm/{name}/device"))
except OSError:
return None
def _read_intel_drm_fdinfo(target_pdev: Optional[str]) -> dict:
"""Snapshot DRM fdinfo for every Intel client visible in /proc.
Returns a dict keyed by (pdev, drm-client-id, pid) so the same context
seen via multiple file descriptors on a single process collapses to one
entry.
"""
def get_stats_manually(output: str) -> dict[str, str]:
"""Find global stats via regex when json fails to parse."""
reading = "".join(output)
results: dict[str, str] = {}
# rc6 residency for overall GPU usage
rc6_match = re.search(r'"rc6":\{"value":([\d.]+)', reading)
if rc6_match:
rc6_value = float(rc6_match.group(1))
results["gpu"] = f"{round(100.0 - rc6_value, 2)}%"
else:
results["gpu"] = "-%"
results["mem"] = "-%"
# Render/3D is the compute/encode engine
render = []
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[14:])
single = packet.get("busy", 0.0)
render.append(float(single))
if render:
results["compute"] = f"{round(sum(render) / len(render), 2)}%"
# Video engines are the fixed-function decode engines
video = []
for result in re.findall(r'"Video/\d":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[10:])
single = packet.get("busy", 0.0)
video.append(float(single))
if video:
results["dec"] = f"{round(sum(video) / len(video), 2)}%"
return results
intel_gpu_top_command = [
"timeout",
"0.5s",
"intel_gpu_top",
"-J",
"-o",
"-",
"-s",
"1000", # Intel changed this from seconds to milliseconds in 2024+ versions
]
if intel_gpu_device:
intel_gpu_top_command += ["-d", intel_gpu_device]
snapshot: dict = {}
try:
p = sp.run(
intel_gpu_top_command,
encoding="ascii",
capture_output=True,
)
except UnicodeDecodeError:
return None
proc_entries = os.listdir("/proc")
except OSError:
return snapshot
# timeout has a non-zero returncode when timeout is reached
if p.returncode != 124:
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
return None
else:
output = "".join(p.stdout.split())
for entry in proc_entries:
if not entry.isdigit():
continue
fdinfo_dir = f"/proc/{entry}/fdinfo"
try:
data = json.loads(f"[{output}]")
except json.JSONDecodeError:
return get_stats_manually(output)
fds = os.listdir(fdinfo_dir)
except (FileNotFoundError, PermissionError, NotADirectoryError, OSError):
continue
results: dict[str, str] = {}
rc6_values = []
render_global = []
video_global = []
# per-client: {pid: [total_busy_per_sample, ...]}
client_usages: dict[str, list[float]] = {}
for fd in fds:
try:
with open(f"{fdinfo_dir}/{fd}") as f:
content = f.read()
except (FileNotFoundError, PermissionError, OSError):
continue
for block in data:
# rc6 residency: percentage of time GPU is idle
rc6 = block.get("rc6", {}).get("value")
if rc6 is not None:
rc6_values.append(float(rc6))
if "drm-driver" not in content:
continue
global_engine = block.get("engines")
fields: dict[str, str] = {}
for line in content.splitlines():
key, sep, value = line.partition(":")
if sep:
fields[key.strip()] = value.strip()
if global_engine:
render_frame = global_engine.get("Render/3D/0", {}).get("busy")
video_frame = global_engine.get("Video/0", {}).get("busy")
driver = fields.get("drm-driver")
if driver not in ("i915", "xe"):
continue
if render_frame is not None:
render_global.append(float(render_frame))
pdev = fields.get("drm-pdev", "")
if target_pdev and pdev != target_pdev:
continue
if video_frame is not None:
video_global.append(float(video_frame))
client_id = fields.get("drm-client-id")
if not client_id:
continue
clients = block.get("clients", {})
key = (pdev, client_id, entry)
if key in snapshot:
continue
if clients:
for client_block in clients.values():
pid = client_block["pid"]
engines: dict[str, tuple[int, int]] = {}
if pid not in client_usages:
client_usages[pid] = []
if driver == "i915":
for fkey, engine in _I915_ENGINE_KEYS.items():
raw = fields.get(fkey)
if not raw:
continue
try:
engines[engine] = (int(raw.split()[0]), 0)
except (ValueError, IndexError):
continue
else:
for suffix, engine in _XE_ENGINE_KEYS.items():
busy_raw = fields.get(f"drm-cycles-{suffix}")
total_raw = fields.get(f"drm-total-cycles-{suffix}")
if not (busy_raw and total_raw):
continue
try:
engines[engine] = (
int(busy_raw.split()[0]),
int(total_raw.split()[0]),
)
except (ValueError, IndexError):
continue
# Sum all engine-class busy values for this client
total_busy = 0.0
for engine in client_block.get("engine-classes", {}).values():
busy = engine.get("busy")
if busy is not None:
total_busy += float(busy)
if not engines:
continue
client_usages[pid].append(total_busy)
snapshot[key] = {"driver": driver, "pid": entry, "engines": engines}
# Overall GPU usage from rc6 (idle) residency
if rc6_values:
rc6_avg = sum(rc6_values) / len(rc6_values)
results["gpu"] = f"{round(100.0 - rc6_avg, 2)}%"
return snapshot
results["mem"] = "-%"
# Compute: Render/3D engine (compute/shader workloads and QSV encode)
if render_global:
results["compute"] = f"{round(sum(render_global) / len(render_global), 2)}%"
def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, Any]]:
"""Get stats by reading DRM fdinfo files.
# Decoder: Video engine (fixed-function codec)
if video_global:
results["dec"] = f"{round(sum(video_global) / len(video_global), 2)}%"
Each DRM client FD exposes monotonic per-engine busy counters via
/proc/<pid>/fdinfo/<fd> (i915 since kernel 5.19, Xe since first release).
We sample twice and divide busy-time deltas by wall-clock to derive
utilization. Render/3D and Compute are pooled into "compute"; Video and
VideoEnhance into "dec". Overall "gpu" is the sum of those pools (clamped
to 100%).
"""
target_pdev = _resolve_intel_gpu_pdev(intel_gpu_device)
# Per-client GPU usage (sum of all engines per process)
if client_usages:
results["clients"] = {}
snapshot_a = _read_intel_drm_fdinfo(target_pdev)
if not snapshot_a:
return None
for pid, samples in client_usages.items():
if samples:
results["clients"][pid] = (
f"{round(sum(samples) / len(samples), 2)}%"
)
start = time.monotonic()
time.sleep(_INTEL_FDINFO_SAMPLE_SECONDS)
elapsed_ns = (time.monotonic() - start) * 1e9
return results
snapshot_b = _read_intel_drm_fdinfo(target_pdev)
if not snapshot_b or elapsed_ns <= 0:
return None
engine_pct: dict[str, float] = {
"render": 0.0,
"video": 0.0,
"video-enhance": 0.0,
"compute": 0.0,
}
pid_pct: dict[str, float] = {}
for key, data_b in snapshot_b.items():
data_a = snapshot_a.get(key)
if not data_a or data_a["driver"] != data_b["driver"]:
continue
client_total = 0.0
for engine, (busy_b, total_b) in data_b["engines"].items():
if engine not in engine_pct:
continue
busy_a, total_a = data_a["engines"].get(engine, (busy_b, total_b))
if data_b["driver"] == "i915":
delta = max(0, busy_b - busy_a)
pct = min(100.0, delta / elapsed_ns * 100.0)
else:
delta_busy = max(0, busy_b - busy_a)
delta_total = total_b - total_a
if delta_total <= 0:
continue
pct = min(100.0, delta_busy / delta_total * 100.0)
engine_pct[engine] += pct
client_total += pct
pid_pct[data_b["pid"]] = pid_pct.get(data_b["pid"], 0.0) + client_total
for engine in engine_pct:
engine_pct[engine] = min(100.0, engine_pct[engine])
compute_pct = min(100.0, engine_pct["render"] + engine_pct["compute"])
dec_pct = min(100.0, engine_pct["video"] + engine_pct["video-enhance"])
overall_pct = min(100.0, compute_pct + dec_pct)
results: dict[str, Any] = {
"gpu": f"{round(overall_pct, 2)}%",
"mem": "-%",
"compute": f"{round(compute_pct, 2)}%",
"dec": f"{round(dec_pct, 2)}%",
}
if pid_pct:
results["clients"] = {
pid: f"{round(min(100.0, pct), 2)}%" for pid, pct in pid_pct.items()
}
return results
def get_openvino_npu_stats() -> Optional[dict[str, str]]:

View File

@ -31,7 +31,7 @@ test.describe("Replay — no active session @medium", () => {
await expect(
frigateApp.page.getByRole("heading", {
level: 2,
name: /No Active Replay Session/i,
name: /No Active Debug Replay Session/i,
}),
).toBeVisible({ timeout: 10_000 });
const goButton = frigateApp.page.getByRole("button", {
@ -48,7 +48,7 @@ test.describe("Replay — no active session @medium", () => {
await expect(
frigateApp.page.getByRole("heading", {
level: 2,
name: /No Active Replay Session/i,
name: /No Active Debug Replay Session/i,
}),
).toBeVisible({ timeout: 10_000 });
await frigateApp.page
@ -297,7 +297,7 @@ test.describe("Replay — mobile @medium @mobile", () => {
await expect(
frigateApp.page.getByRole("heading", {
level: 2,
name: /No Active Replay Session/i,
name: /No Active Debug Replay Session/i,
}),
).toBeVisible({ timeout: 10_000 });
});

View File

@ -485,6 +485,10 @@
"hwaccel_args": {
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
},
"max_concurrent": {
"label": "Maximum concurrent exports",
"description": "Maximum number of export jobs to process at the same time."
}
},
"preview": {

View File

@ -242,8 +242,8 @@
"description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)."
},
"intel_gpu_device": {
"label": "SR-IOV device",
"description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats."
"label": "Intel GPU device",
"description": "PCI bus address or DRM device path (e.g. /dev/dri/card1) used to pin Intel GPU stats to a specific device when multiple are present."
}
},
"version_check": {
@ -397,6 +397,14 @@
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"hailo10h": {
"label": "Hailo-10H",
"description": "Hailo-10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
"device": {
"label": "Device Type",
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"memryx": {
"label": "MemryX",
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",
@ -1000,6 +1008,10 @@
"hwaccel_args": {
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
},
"max_concurrent": {
"label": "Maximum concurrent exports",
"description": "Maximum number of export jobs to process at the same time."
}
},
"preview": {

View File

@ -19,26 +19,31 @@
"startLabel": "Start",
"endLabel": "End",
"toast": {
"success": "Debug replay started successfully",
"error": "Failed to start debug replay: {{error}}",
"alreadyActive": "A replay session is already active",
"stopped": "Debug replay stopped",
"stopError": "Failed to stop debug replay: {{error}}",
"goToReplay": "Go to Replay"
}
},
"page": {
"noSession": "No Active Replay Session",
"noSessionDesc": "Start a debug replay from the History view by clicking the Debug Replay button in the toolbar.",
"noSession": "No Active Debug Replay Session",
"noSessionDesc": "Start a Debug Replay from History view by clicking the Actions button in the toolbar and choosing Debug Replay.",
"goToRecordings": "Go to History",
"preparingClip": "Preparing clip…",
"preparingClipDesc": "Frigate is stitching together recordings for the selected time range. This can take a minute for longer ranges.",
"startingCamera": "Starting Debug Replay…",
"startError": {
"title": "Failed to start Debug Replay",
"back": "Back to History"
},
"sourceCamera": "Source Camera",
"replayCamera": "Replay Camera",
"initializingReplay": "Initializing replay...",
"stoppingReplay": "Stopping replay...",
"initializingReplay": "Initializing Debug Replay...",
"stoppingReplay": "Stopping Debug Replay...",
"stopReplay": "Stop Replay",
"confirmStop": {
"title": "Stop Debug Replay?",
"description": "This will stop the replay session and clean up all temporary data. Are you sure?",
"description": "This will stop the session and clean up all temporary data. Are you sure?",
"confirm": "Stop Replay",
"cancel": "Cancel"
},
@ -49,6 +54,6 @@
"activeTracking": "Active tracking",
"noActiveTracking": "No active tracking",
"configuration": "Configuration",
"configurationDesc": "Fine tune motion detection and object tracking settings for the debug replay camera. No changes are saved to your Frigate configuration file."
"configurationDesc": "Fine tune motion detection and object tracking settings for the Debug Replay camera. No changes are saved to your Frigate configuration file."
}
}

View File

@ -20,7 +20,18 @@
"overriddenGlobal": "Overridden (Global)",
"overriddenGlobalTooltip": "This camera overrides global configuration settings in this section",
"overriddenBaseConfig": "Overridden (Base Config)",
"overriddenBaseConfigTooltip": "The {{profile}} profile overrides configuration settings in this section"
"overriddenBaseConfigTooltip": "The {{profile}} profile overrides configuration settings in this section",
"overriddenInCameras": {
"label_one": "Overridden in {{count}} camera",
"label_other": "Overridden in {{count}} cameras",
"tooltip_one": "{{count}} camera overrides values in this section. Click to see details.",
"tooltip_other": "{{count}} cameras override values in this section. Click to see details.",
"heading_one": "This global section has fields that are overridden in {{count}} camera.",
"heading_other": "This global section has fields that are overridden in {{count}} cameras.",
"othersField_one": "{{count}} other",
"othersField_other": "{{count}} others",
"profilePrefix": "{{profile}} profile: {{fields}}"
}
},
"menu": {
"general": "General",

View File

@ -25,6 +25,7 @@ import {
} from "./section-special-cases";
import { getSectionValidation } from "../section-validations";
import { useConfigOverride } from "@/hooks/use-config-override";
import { CameraOverridesBadge } from "./CameraOverridesBadge";
import { useSectionSchema } from "@/hooks/use-config-schema";
import type { FrigateConfig } from "@/types/frigateConfig";
import { Badge } from "@/components/ui/badge";
@ -1263,6 +1264,9 @@ export function ConfigSection({
</TooltipContent>
</Tooltip>
)}
{showOverrideIndicator && effectiveLevel === "global" && (
<CameraOverridesBadge sectionPath={sectionPath} />
)}
{hasChanges && (
<Badge variant="outline" className="text-xs">
{t("button.modified", {
@ -1334,6 +1338,9 @@ export function ConfigSection({
</TooltipContent>
</Tooltip>
)}
{showOverrideIndicator && effectiveLevel === "global" && (
<CameraOverridesBadge sectionPath={sectionPath} />
)}
{hasChanges && (
<Badge
variant="secondary"

View File

@ -0,0 +1,303 @@
import useSWR from "swr";
import { useMemo } from "react";
import { useTranslation } from "react-i18next";
import { Link } from "react-router-dom";
import { LuChevronDown } from "react-icons/lu";
import { Badge } from "@/components/ui/badge";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import {
CameraOverrideEntry,
FieldDelta,
useCamerasOverridingSection,
} from "@/hooks/use-config-override";
import type { FrigateConfig } from "@/types/frigateConfig";
import type { ProfilesApiResponse } from "@/types/profile";
import { humanizeKey } from "@/components/config-form/theme/utils/i18n";
import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name";
import { formatList } from "@/utils/stringUtil";
import { getSectionConfig } from "@/utils/configUtil";
const CAMERA_PAGE_BY_SECTION: Record<string, string> = {
detect: "cameraDetect",
ffmpeg: "cameraFfmpeg",
record: "cameraRecording",
snapshots: "cameraSnapshots",
motion: "cameraMotion",
objects: "cameraObjects",
review: "cameraReview",
audio: "cameraAudioEvents",
audio_transcription: "cameraAudioTranscription",
notifications: "cameraNotifications",
live: "cameraLivePlayback",
birdseye: "cameraBirdseye",
face_recognition: "cameraFaceRecognition",
lpr: "cameraLpr",
timestamp_style: "cameraTimestampStyle",
};
const MAX_FIELDS_PER_CAMERA = 5;
/**
* Enrichment sections where the cross-camera override badge should be
* suppressed because they're effectively global-only (or per-camera
* configuration there isn't a useful affordance to surface here).
* Face recognition and LPR are intentionally omitted so the badge does show
* on those enrichment pages.
*/
const SECTIONS_WITHOUT_OVERRIDE_BADGE = new Set([
"semantic_search",
"genai",
"classification",
"audio_transcription",
]);
/**
* Match a delta path against a hidden-field pattern. Supports literal prefixes
* (so a hidden field "streams" also hides "streams.foo.bar") and `*` wildcards
* matching exactly one path segment (e.g. "filters.*.mask").
*/
function pathMatchesHiddenPattern(path: string, pattern: string): boolean {
if (!pattern) return false;
if (!pattern.includes("*")) {
return path === pattern || path.startsWith(`${pattern}.`);
}
const patternSegments = pattern.split(".");
const pathSegments = path.split(".");
if (pathSegments.length < patternSegments.length) return false;
for (let i = 0; i < patternSegments.length; i += 1) {
if (patternSegments[i] === "*") continue;
if (patternSegments[i] !== pathSegments[i]) return false;
}
return true;
}
type CameraEntryProps = {
sectionPath: string;
entry: CameraOverrideEntry;
cameraPage?: string;
};
type SourceGroup = {
/** undefined → camera-level; string → profile name */
profileName: string | undefined;
deltas: FieldDelta[];
};
function groupDeltasBySource(deltas: FieldDelta[]): SourceGroup[] {
const cameraDeltas: FieldDelta[] = [];
const byProfile = new Map<string, FieldDelta[]>();
for (const delta of deltas) {
if (delta.profileName) {
const arr = byProfile.get(delta.profileName) ?? [];
arr.push(delta);
byProfile.set(delta.profileName, arr);
} else {
cameraDeltas.push(delta);
}
}
const groups: SourceGroup[] = [];
if (cameraDeltas.length > 0) {
groups.push({ profileName: undefined, deltas: cameraDeltas });
}
for (const [profileName, group] of byProfile) {
groups.push({ profileName, deltas: group });
}
return groups;
}
function CameraEntry({ sectionPath, entry, cameraPage }: CameraEntryProps) {
const { t, i18n } = useTranslation([
"config/global",
"views/settings",
"objects",
]);
const friendlyName = useCameraFriendlyName(entry.camera);
const { data: profilesData } = useSWR<ProfilesApiResponse>("profiles");
const profileFriendlyNames = useMemo(() => {
const map = new Map<string, string>();
profilesData?.profiles?.forEach((p) => map.set(p.name, p.friendly_name));
return map;
}, [profilesData]);
const fieldLabel = (fieldPath: string) => {
if (!fieldPath) {
const sectionKey = `${sectionPath}.label`;
return i18n.exists(sectionKey, { ns: "config/global" })
? t(sectionKey, { ns: "config/global" })
: humanizeKey(sectionPath);
}
const segments = fieldPath.split(".");
// Most specific: try the full nested path
const fullKey = `${sectionPath}.${fieldPath}.label`;
if (i18n.exists(fullKey, { ns: "config/global" })) {
return t(fullKey, { ns: "config/global" });
}
// Try dropping each intermediate segment in turn — those are typically
// user-defined dict keys (object class names, zone names, etc.) that
// don't have their own label entries. Prepend the dropped segment as
// context to disambiguate (e.g. "Person · Minimum object area").
for (let i = 0; i < segments.length; i++) {
const reduced = [...segments.slice(0, i), ...segments.slice(i + 1)].join(
".",
);
if (!reduced) continue;
const reducedKey = `${sectionPath}.${reduced}.label`;
if (i18n.exists(reducedKey, { ns: "config/global" })) {
const resolvedLabel = t(reducedKey, { ns: "config/global" });
const dropped = segments[i];
// Object class names ("person", "car", "fox") have translations in
// the `objects` namespace; fall back to humanizing the raw key for
// anything that isn't a known label.
const droppedLabel = i18n.exists(dropped, { ns: "objects" })
? t(dropped, { ns: "objects" })
: humanizeKey(dropped);
return `${droppedLabel} · ${resolvedLabel}`;
}
}
// Last resort: humanize the leaf segment
return humanizeKey(segments[segments.length - 1]);
};
const formatDeltas = (deltas: FieldDelta[]) => {
const visibleLabels = deltas
.slice(0, MAX_FIELDS_PER_CAMERA)
.map((delta) => fieldLabel(delta.fieldPath));
const hiddenCount = deltas.length - visibleLabels.length;
const labelsForList =
hiddenCount > 0
? [
...visibleLabels,
t("button.overriddenInCameras.othersField", {
ns: "views/settings",
count: hiddenCount,
}),
]
: visibleLabels;
return formatList(labelsForList);
};
const groups = groupDeltasBySource(entry.fieldDeltas);
return (
<div className="flex flex-col gap-0.5 text-xs">
{cameraPage ? (
<Link
to={`/settings?page=${cameraPage}&camera=${encodeURIComponent(entry.camera)}`}
className="font-medium hover:underline"
>
{friendlyName}
</Link>
) : (
<span className="font-medium">{friendlyName}</span>
)}
{groups.map((group) => (
<span
key={group.profileName ?? "__camera__"}
className="ml-2 text-muted-foreground"
>
{group.profileName
? t("button.overriddenInCameras.profilePrefix", {
ns: "views/settings",
profile:
profileFriendlyNames.get(group.profileName) ??
group.profileName,
fields: formatDeltas(group.deltas),
})
: formatDeltas(group.deltas)}
</span>
))}
</div>
);
}
type Props = {
sectionPath: string;
className?: string;
};
export function CameraOverridesBadge({ sectionPath, className }: Props) {
const { data: config } = useSWR<FrigateConfig>("config");
const { t } = useTranslation(["views/settings"]);
const rawEntries = useCamerasOverridingSection(config, sectionPath);
const entries = useMemo(() => {
const hiddenFields =
getSectionConfig(sectionPath, "global").hiddenFields ?? [];
if (hiddenFields.length === 0) return rawEntries;
return rawEntries
.map((entry) => ({
...entry,
fieldDeltas: entry.fieldDeltas.filter(
(delta) =>
!hiddenFields.some((pattern) =>
pathMatchesHiddenPattern(delta.fieldPath, pattern),
),
),
}))
.filter((entry) => entry.fieldDeltas.length > 0);
}, [rawEntries, sectionPath]);
if (SECTIONS_WITHOUT_OVERRIDE_BADGE.has(sectionPath)) {
return null;
}
if (entries.length === 0) {
return null;
}
const cameraPage = CAMERA_PAGE_BY_SECTION[sectionPath];
const count = entries.length;
return (
<Popover>
<PopoverTrigger asChild>
<Badge
variant="secondary"
className={`cursor-pointer border-2 border-selected text-xs text-primary-variant ${className ?? ""}`}
aria-label={t("button.overriddenInCameras.tooltip", {
ns: "views/settings",
count: count,
})}
>
<span>
{t("button.overriddenInCameras.label", {
ns: "views/settings",
count: count,
})}
</span>
<LuChevronDown className="ml-1 size-3" />
</Badge>
</PopoverTrigger>
<PopoverContent align="start" className="w-80 max-w-[90vw] pr-0">
<div className="flex flex-col gap-3">
<div className="pr-4 text-xs text-primary-variant">
{t("button.overriddenInCameras.heading", {
ns: "views/settings",
count: count,
})}
</div>
<div className="scrollbar-container flex max-h-[40dvh] flex-col gap-2 overflow-y-auto pr-4">
{entries.map((entry) => (
<CameraEntry
key={entry.camera}
sectionPath={sectionPath}
entry={entry}
cameraPage={cameraPage}
/>
))}
</div>
</div>
</PopoverContent>
</Popover>
);
}

View File

@ -90,10 +90,6 @@ export default function SearchResultActions({
const handleDebugReplay = useCallback(
(event: SearchResult) => {
setIsStarting(true);
const toastId = toast.loading(
t("dialog.starting", { ns: "views/replay" }),
{ position: "top-center" },
);
axios
.post("debug_replay/start", {
@ -102,11 +98,7 @@ export default function SearchResultActions({
end_time: event.end_time,
})
.then((response) => {
if (response.status === 200) {
toast.success(t("dialog.toast.success", { ns: "views/replay" }), {
id: toastId,
position: "top-center",
});
if (response.status === 202 || response.status === 200) {
navigate("/replay");
}
})
@ -120,7 +112,6 @@ export default function SearchResultActions({
toast.error(
t("dialog.toast.alreadyActive", { ns: "views/replay" }),
{
id: toastId,
position: "top-center",
closeButton: true,
dismissible: false,
@ -135,7 +126,6 @@ export default function SearchResultActions({
);
} else {
toast.error(t("dialog.toast.error", { error: errorMessage }), {
id: toastId,
position: "top-center",
});
}

View File

@ -209,10 +209,7 @@ export default function DebugReplayDialog({
end_time: range.before,
})
.then((response) => {
if (response.status === 200) {
toast.success(t("dialog.toast.success"), {
position: "top-center",
});
if (response.status === 202 || response.status === 200) {
setMode("none");
setRange(undefined);
navigate("/replay");

View File

@ -262,10 +262,7 @@ export default function MobileReviewSettingsDrawer({
end_time: debugReplayRange.before,
});
if (response.status === 200) {
toast.success(t("dialog.toast.success", { ns: "views/replay" }), {
position: "top-center",
});
if (response.status === 202 || response.status === 200) {
setDebugReplayMode("none");
setDebugReplayRange(undefined);
setDrawerMode("none");

View File

@ -53,10 +53,6 @@ export default function EventMenu({
const handleDebugReplay = useCallback(
(event: Event) => {
setIsStarting(true);
const toastId = toast.loading(
t("dialog.starting", { ns: "views/replay" }),
{ position: "top-center" },
);
axios
.post("debug_replay/start", {
@ -65,11 +61,7 @@ export default function EventMenu({
end_time: event.end_time,
})
.then((response) => {
if (response.status === 200) {
toast.success(t("dialog.toast.success", { ns: "views/replay" }), {
id: toastId,
position: "top-center",
});
if (response.status === 202 || response.status === 200) {
navigate("/replay");
}
})
@ -83,7 +75,6 @@ export default function EventMenu({
toast.error(
t("dialog.toast.alreadyActive", { ns: "views/replay" }),
{
id: toastId,
position: "top-center",
closeButton: true,
dismissible: false,
@ -98,7 +89,6 @@ export default function EventMenu({
);
} else {
toast.error(t("dialog.toast.error", { error: errorMessage }), {
id: toastId,
position: "top-center",
});
}

View File

@ -202,6 +202,49 @@ export function useConfigOverride({
}, [config, cameraName, sectionPath, compareFields]);
}
/**
* Sections that can be overridden per-camera, with optional compareFields
* filters that scope the override comparison to a subset of fields.
*/
export const OVERRIDABLE_SECTIONS: ReadonlyArray<{
key: string;
compareFields?: string[];
}> = [
{ key: "detect" },
{ key: "record" },
{ key: "snapshots" },
{ key: "motion" },
{ key: "objects" },
{ key: "review" },
{ key: "audio" },
{ key: "notifications" },
{ key: "live" },
{ key: "timestamp_style" },
{
key: "audio_transcription",
compareFields: ["enabled", "live_enabled"],
},
{ key: "birdseye", compareFields: ["enabled", "mode"] },
{ key: "face_recognition", compareFields: ["enabled", "min_area"] },
{
key: "ffmpeg",
compareFields: [
"path",
"global_args",
"hwaccel_args",
"input_args",
"output_args",
"retry_interval",
"apple_compatibility",
"gpu",
],
},
{
key: "lpr",
compareFields: ["enabled", "min_area", "enhancement"],
},
];
/**
* Hook to get all overridden fields for a camera
*/
@ -221,47 +264,7 @@ export function useAllCameraOverrides(
const overriddenSections: string[] = [];
// Check each section that can be overridden
const sectionsToCheck: Array<{
key: string;
compareFields?: string[];
}> = [
{ key: "detect" },
{ key: "record" },
{ key: "snapshots" },
{ key: "motion" },
{ key: "objects" },
{ key: "review" },
{ key: "audio" },
{ key: "notifications" },
{ key: "live" },
{ key: "timestamp_style" },
{
key: "audio_transcription",
compareFields: ["enabled", "live_enabled"],
},
{ key: "birdseye", compareFields: ["enabled", "mode"] },
{ key: "face_recognition", compareFields: ["enabled", "min_area"] },
{
key: "ffmpeg",
compareFields: [
"path",
"global_args",
"hwaccel_args",
"input_args",
"output_args",
"retry_interval",
"apple_compatibility",
"gpu",
],
},
{
key: "lpr",
compareFields: ["enabled", "min_area", "enhancement"],
},
];
for (const { key, compareFields } of sectionsToCheck) {
for (const { key, compareFields } of OVERRIDABLE_SECTIONS) {
const globalValue = normalizeConfigValue(get(config, key));
const cameraValue = normalizeConfigValue(
getBaseCameraSectionValue(config, cameraName, key),
@ -286,3 +289,252 @@ export function useAllCameraOverrides(
return overriddenSections;
}, [config, cameraName]);
}
export interface FieldDelta {
/** Path relative to the section (e.g. "genai.enabled") */
fieldPath: string;
globalValue: unknown;
cameraValue: unknown;
/** Profile name when the override originates from a profile; undefined for camera-level overrides */
profileName?: string;
}
export interface CameraOverrideEntry {
camera: string;
fieldDeltas: FieldDelta[];
}
/**
* Collect leaf-level field differences between a global section value
* and a camera section value. When compareFields is provided, only those
* paths are compared; otherwise the objects are walked recursively.
*/
function collectFieldDeltas(
globalValue: JsonValue,
cameraValue: JsonValue,
compareFields?: string[],
pathPrefix = "",
): FieldDelta[] {
if (compareFields) {
if (compareFields.length === 0) {
return [];
}
const deltas: FieldDelta[] = [];
for (const path of compareFields) {
const g = get(globalValue, path);
const c = get(cameraValue, path);
if (!isEqual(g, c)) {
deltas.push({ fieldPath: path, globalValue: g, cameraValue: c });
}
}
return deltas;
}
if (isJsonObject(globalValue) && isJsonObject(cameraValue)) {
const deltas: FieldDelta[] = [];
const keys = new Set([
...Object.keys(globalValue),
...Object.keys(cameraValue),
]);
for (const key of keys) {
const g = (globalValue as JsonObject)[key];
const c = (cameraValue as JsonObject)[key];
if (isEqual(g, c)) continue;
const childPath = pathPrefix ? `${pathPrefix}.${key}` : key;
if (isJsonObject(g) && isJsonObject(c)) {
deltas.push(...collectFieldDeltas(g, c, undefined, childPath));
} else {
deltas.push({ fieldPath: childPath, globalValue: g, cameraValue: c });
}
}
return deltas;
}
if (!isEqual(globalValue, cameraValue)) {
return [{ fieldPath: pathPrefix, globalValue, cameraValue }];
}
return [];
}
/**
* Walk a partial config object and return the dot-paths of every leaf value
* (primitive or array) actually defined on it. Used to limit profile-vs-global
* diffs to keys the profile actually sets, avoiding false "undefined" deltas
* for fields the profile leaves unspecified.
*/
function collectDefinedLeafPaths(value: JsonValue, prefix = ""): string[] {
if (!isJsonObject(value)) {
return prefix ? [prefix] : [];
}
const paths: string[] = [];
for (const [key, val] of Object.entries(value as JsonObject)) {
const childPath = prefix ? `${prefix}.${key}` : key;
if (isJsonObject(val)) {
paths.push(...collectDefinedLeafPaths(val as JsonValue, childPath));
} else {
paths.push(childPath);
}
}
return paths;
}
function isPathAllowed(path: string, compareFields?: string[]): boolean {
if (!compareFields) return true;
return compareFields.some(
(allowed) => path === allowed || path.startsWith(`${allowed}.`),
);
}
/**
* Some Frigate sections (notably `motion`) are dumped by the backend with
* `exclude_unset=True`, so when the user hasn't explicitly written the section
* in their global YAML the API returns null even though every camera still
* gets defaults applied at runtime. To still detect cross-camera differences
* in those sections we synthesize a baseline by taking the modal (most common)
* value at each leaf path across cameras cameras whose value diverges from
* the modal are treated as overriding.
*/
function deriveSyntheticGlobalValue(
cameraSectionValues: JsonValue[],
compareFields?: string[],
): JsonObject {
const cameras = cameraSectionValues.filter(isJsonObject) as JsonObject[];
if (cameras.length === 0) return {};
const allPaths = new Set<string>();
for (const cam of cameras) {
for (const path of collectDefinedLeafPaths(cam as JsonValue)) {
if (!isPathAllowed(path, compareFields)) continue;
allPaths.add(path);
}
}
const baseline: JsonObject = {};
for (const path of allPaths) {
const counts = new Map<string, { value: unknown; count: number }>();
for (const cam of cameras) {
const v = get(cam, path);
const key = JSON.stringify(v ?? null);
const existing = counts.get(key);
if (existing) {
existing.count += 1;
} else {
counts.set(key, { value: v, count: 1 });
}
}
let modal: { value: unknown; count: number } | undefined;
for (const entry of counts.values()) {
if (!modal || entry.count > modal.count) modal = entry;
}
if (modal) {
set(baseline, path, modal.value);
}
}
return baseline;
}
/**
* Paths that are intentionally hidden from the cross-camera override summary
* because they're inherently per-camera (mask polygons, zone definitions) and
* would otherwise dominate the popover with noise. Excludes any path where
* `mask` appears as a path segment, so nested keys under a mask dict (e.g.
* `mask.global_object_mask_1.coordinates`) are also filtered.
*/
function isCrossCameraIgnoredPath(path: string): boolean {
if (!path) return false;
return path.split(".").includes("mask");
}
/**
* Hook to find every camera that overrides a given global section. Returns
* one entry per overriding camera with the specific field-level deltas.
* Considers both the camera's own (pre-profile) section value and any of its
* defined profiles, so a field overridden only inside a profile still surfaces.
*
* @example
* ```tsx
* const entries = useCamerasOverridingSection(config, "review");
* // [{ camera: "front_door", fieldDeltas: [{ fieldPath: "genai.enabled", ... }] }]
* ```
*/
export function useCamerasOverridingSection(
config: FrigateConfig | undefined,
sectionPath: string,
): CameraOverrideEntry[] {
return useMemo(() => {
if (!config?.cameras || !sectionPath) {
return [];
}
const sectionMeta = OVERRIDABLE_SECTIONS.find((s) => s.key === sectionPath);
const compareFields = sectionMeta?.compareFields;
const cameraNames = Object.keys(config.cameras);
const cameraSectionValues = cameraNames.map((name) =>
normalizeConfigValue(
getBaseCameraSectionValue(config, name, sectionPath),
),
);
const rawGlobalValue = get(config, sectionPath);
const globalValue: JsonValue =
rawGlobalValue == null
? deriveSyntheticGlobalValue(cameraSectionValues, compareFields)
: normalizeConfigValue(rawGlobalValue);
const entries: CameraOverrideEntry[] = [];
for (let idx = 0; idx < cameraNames.length; idx += 1) {
const cameraName = cameraNames[idx];
const cameraConfig = config.cameras[cameraName];
const deltasByPath = new Map<string, FieldDelta>();
// 1. Camera-level overrides (uses base_config when a profile is active)
const cameraValue = cameraSectionValues[idx];
for (const delta of collectFieldDeltas(
globalValue,
cameraValue,
compareFields,
)) {
if (isCrossCameraIgnoredPath(delta.fieldPath)) continue;
deltasByPath.set(delta.fieldPath, delta);
}
// 2. Profile-level overrides — diff only the paths each profile actually
// defines, so unspecified-in-profile fields don't register as deltas.
const profiles = cameraConfig?.profiles ?? {};
for (const profileName of Object.keys(profiles)) {
const profileSection = (
profiles[profileName] as Record<string, unknown> | undefined
)?.[sectionPath];
if (profileSection === undefined) continue;
const normalizedProfile = normalizeConfigValue(
profileSection as JsonValue,
);
for (const path of collectDefinedLeafPaths(normalizedProfile)) {
if (deltasByPath.has(path)) continue;
if (isCrossCameraIgnoredPath(path)) continue;
if (!isPathAllowed(path, compareFields)) continue;
const g = get(globalValue, path);
const p = get(normalizedProfile, path);
if (!isEqual(g, p)) {
deltasByPath.set(path, {
fieldPath: path,
globalValue: g,
cameraValue: p,
profileName,
});
}
}
}
if (deltasByPath.size > 0) {
entries.push({
camera: cameraName,
fieldDeltas: Array.from(deltasByPath.values()),
});
}
}
return entries;
}, [config, sectionPath]);
}

View File

@ -42,7 +42,9 @@ import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
import { getIconForLabel } from "@/utils/iconUtil";
import { getTranslatedLabel } from "@/utils/i18n";
import { Card } from "@/components/ui/card";
import { Progress } from "@/components/ui/progress";
import { ObjectType } from "@/types/ws";
import { useJobStatus } from "@/api/ws";
import WsMessageFeed from "@/components/ws/WsMessageFeed";
import { ConfigSectionTemplate } from "@/components/config-form/sections/ConfigSectionTemplate";
@ -53,6 +55,7 @@ import { isDesktop, isMobile } from "react-device-detect";
import Logo from "@/components/Logo";
import { Separator } from "@/components/ui/separator";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { useConfigSchema } from "@/hooks/use-config-schema";
import DebugDrawingLayer from "@/components/overlay/DebugDrawingLayer";
import { IoMdArrowRoundBack } from "react-icons/io";
@ -65,6 +68,15 @@ type DebugReplayStatus = {
live_ready: boolean;
};
type DebugReplayJobResults = {
current_step: "preparing_clip" | "starting_camera" | null;
progress_percent: number | null;
source_camera: string | null;
replay_camera_name: string | null;
start_ts: number | null;
end_ts: number | null;
};
type DebugOptions = {
bbox: boolean;
timestamp: boolean;
@ -105,8 +117,6 @@ const DEBUG_OPTION_I18N_KEY: Record<keyof DebugOptions, string> = {
paths: "paths",
};
const REPLAY_INIT_SKELETON_TIMEOUT_MS = 8000;
export default function Replay() {
const { t } = useTranslation(["views/replay", "views/settings", "common"]);
const navigate = useNavigate();
@ -119,6 +129,9 @@ export default function Replay() {
} = useSWR<DebugReplayStatus>("debug_replay/status", {
refreshInterval: 1000,
});
const { payload: replayJob } =
useJobStatus<DebugReplayJobResults>("debug_replay");
const configSchema = useConfigSchema();
const [isInitializing, setIsInitializing] = useState(true);
// Refresh status immediately on mount to avoid showing "no session" briefly
@ -130,12 +143,6 @@ export default function Replay() {
initializeStatus();
}, [refreshStatus]);
useEffect(() => {
if (status?.live_ready) {
setShowReplayInitSkeleton(false);
}
}, [status?.live_ready]);
const [options, setOptions] = useState<DebugOptions>(DEFAULT_OPTIONS);
const [isStopping, setIsStopping] = useState(false);
const [configDialogOpen, setConfigDialogOpen] = useState(false);
@ -160,11 +167,7 @@ export default function Replay() {
axios
.post("debug_replay/stop")
.then(() => {
toast.success(t("dialog.toast.stopped"), {
position: "top-center",
});
refreshStatus();
navigate("/review");
})
.catch((error) => {
const errorMessage =
@ -178,7 +181,7 @@ export default function Replay() {
.finally(() => {
setIsStopping(false);
});
}, [navigate, refreshStatus, t]);
}, [refreshStatus, t]);
// Camera activity for the replay camera
const { data: config } = useSWR<FrigateConfig>("config", {
@ -191,35 +194,10 @@ export default function Replay() {
const { objects } = useCameraActivity(replayCameraConfig);
const [showReplayInitSkeleton, setShowReplayInitSkeleton] = useState(false);
// debug draw
const containerRef = useRef<HTMLDivElement>(null);
const [debugDraw, setDebugDraw] = useState(false);
useEffect(() => {
if (!status?.active || !status.replay_camera) {
setShowReplayInitSkeleton(false);
return;
}
setShowReplayInitSkeleton(true);
const timeout = window.setTimeout(() => {
setShowReplayInitSkeleton(false);
}, REPLAY_INIT_SKELETON_TIMEOUT_MS);
return () => {
window.clearTimeout(timeout);
};
}, [status?.active, status?.replay_camera]);
useEffect(() => {
if (status?.live_ready) {
setShowReplayInitSkeleton(false);
}
}, [status?.live_ready]);
// Format time range for display
const timeRangeDisplay = useMemo(() => {
if (!status?.start_time || !status?.end_time) return "";
@ -237,8 +215,39 @@ export default function Replay() {
);
}
// No active session
if (!status?.active) {
// Startup error (job failed). Only show when status.active is also true so
// we don't surface stale failed jobs after a session ended cleanly.
if (replayJob?.status === "failed" && status?.active) {
return (
<div className="flex size-full flex-col items-center justify-center gap-4 p-8">
<Heading as="h2" className="text-center">
{t("page.startError.title")}
</Heading>
{replayJob.error_message && (
<p className="max-w-xl text-center text-sm text-muted-foreground">
{replayJob.error_message}
</p>
)}
<Button
variant="default"
onClick={() => {
axios
.post("debug_replay/stop")
.catch(() => {})
.finally(() => navigate("/review"));
}}
>
{t("page.startError.back")}
</Button>
</div>
);
}
// No active session. Also covers the brief window between the runner
// pushing job.status = "cancelled" via WS and the next SWR refresh
// flipping status.active to false — without this, render falls through
// to the full replay UI and you see a flash of it before stop completes.
if (!status?.active || replayJob?.status === "cancelled") {
return (
<div className="flex size-full flex-col items-center justify-center gap-4 p-8">
<MdReplay className="size-12" />
@ -255,6 +264,52 @@ export default function Replay() {
);
}
// Startup in progress (job is running). The session is active but the
// replay camera isn't ready yet; show progress / phase from the job.
const startupStep =
replayJob?.status === "running"
? (replayJob.results?.current_step ?? null)
: null;
if (startupStep === "preparing_clip" || startupStep === "starting_camera") {
const phaseTitle =
startupStep === "preparing_clip"
? t("page.preparingClip")
: t("page.startingCamera");
const progressPercent = replayJob?.results?.progress_percent ?? null;
const showProgressBar =
startupStep === "preparing_clip" && progressPercent != null;
return (
<div className="flex size-full flex-col items-center justify-center gap-4 p-8">
{showProgressBar ? (
<div className="flex w-64 flex-col items-center gap-2">
<Progress value={progressPercent ?? 0} />
<div className="text-xs text-muted-foreground">
{Math.round(progressPercent ?? 0)}%
</div>
</div>
) : (
<ActivityIndicator className="size-8" />
)}
<Heading as="h3" className="text-center">
{phaseTitle}
</Heading>
{startupStep === "preparing_clip" && (
<p className="max-w-md text-center text-sm text-muted-foreground">
{t("page.preparingClipDesc")}
</p>
)}
<Button
variant="outline"
size="sm"
disabled={isStopping}
onClick={handleStop}
>
{t("button.cancel", { ns: "common" })}
</Button>
</div>
);
}
return (
<div className="flex size-full flex-col overflow-hidden">
<Toaster position="top-center" closeButton={true} />
@ -345,27 +400,30 @@ export default function Replay() {
) : (
status.replay_camera && (
<div className="relative size-full min-h-10" ref={containerRef}>
<AutoUpdatingCameraImage
className="size-full"
cameraClasses="relative w-full h-full flex flex-col justify-start"
searchParams={searchParams}
camera={status.replay_camera}
showFps={false}
/>
{debugDraw && (
<DebugDrawingLayer
containerRef={containerRef}
cameraWidth={
config?.cameras?.[status.source_camera ?? ""]?.detect
.width ?? 1280
}
cameraHeight={
config?.cameras?.[status.source_camera ?? ""]?.detect
.height ?? 720
}
/>
)}
{showReplayInitSkeleton && (
{status.live_ready ? (
<>
<AutoUpdatingCameraImage
className="size-full"
cameraClasses="relative w-full h-full flex flex-col justify-start"
searchParams={searchParams}
camera={status.replay_camera}
showFps={false}
/>
{debugDraw && (
<DebugDrawingLayer
containerRef={containerRef}
cameraWidth={
config?.cameras?.[status.source_camera ?? ""]?.detect
.width ?? 1280
}
cameraHeight={
config?.cameras?.[status.source_camera ?? ""]?.detect
.height ?? 720
}
/>
)}
</>
) : (
<div className="pointer-events-none absolute inset-0 z-10 size-full rounded-lg bg-background">
<Skeleton className="size-full rounded-lg" />
<div className="absolute left-1/2 top-1/2 flex -translate-x-1/2 -translate-y-1/2 flex-col items-center justify-center gap-2">
@ -595,32 +653,38 @@ export default function Replay() {
{t("page.configurationDesc")}
</DialogDescription>
</DialogHeader>
<div className="space-y-6">
<ConfigSectionTemplate
sectionKey="motion"
level="replay"
cameraName={status.replay_camera ?? undefined}
skipSave
noStickyButtons
requiresRestart={false}
collapsible
defaultCollapsed={false}
showTitle
showOverrideIndicator={false}
/>
<ConfigSectionTemplate
sectionKey="objects"
level="replay"
cameraName={status.replay_camera ?? undefined}
skipSave
noStickyButtons
requiresRestart={false}
collapsible
defaultCollapsed={false}
showTitle
showOverrideIndicator={false}
/>
</div>
{configSchema == null ? (
<div className="flex h-40 items-center justify-center">
<ActivityIndicator />
</div>
) : (
<div className="space-y-6">
<ConfigSectionTemplate
sectionKey="motion"
level="replay"
cameraName={status.replay_camera ?? undefined}
skipSave
noStickyButtons
requiresRestart={false}
collapsible
defaultCollapsed={false}
showTitle
showOverrideIndicator={false}
/>
<ConfigSectionTemplate
sectionKey="objects"
level="replay"
cameraName={status.replay_camera ?? undefined}
skipSave
noStickyButtons
requiresRestart={false}
collapsible
defaultCollapsed={false}
showTitle
showOverrideIndicator={false}
/>
</div>
)}
</DialogContent>
</Dialog>
</div>

View File

@ -2,6 +2,7 @@ import { useCallback, useMemo, useState } from "react";
import { useTranslation } from "react-i18next";
import type { SectionConfig } from "@/components/config-form/sections";
import { ConfigSectionTemplate } from "@/components/config-form/sections";
import { CameraOverridesBadge } from "@/components/config-form/sections/CameraOverridesBadge";
import type { PolygonType } from "@/types/canvas";
import { Badge } from "@/components/ui/badge";
import {
@ -167,6 +168,9 @@ export function SingleSectionPage({
</div>
{/* Desktop: badge inline next to title */}
<div className="hidden shrink-0 sm:flex sm:flex-wrap sm:items-center sm:gap-2">
{level === "global" && showOverrideIndicator && (
<CameraOverridesBadge sectionPath={sectionKey} />
)}
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (
@ -224,6 +228,9 @@ export function SingleSectionPage({
</div>
{/* Mobile: badge below title/description */}
<div className="flex flex-wrap items-center gap-2 sm:hidden">
{level === "global" && showOverrideIndicator && (
<CameraOverridesBadge sectionPath={sectionKey} />
)}
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (