Compare commits

...

7 Commits

Author SHA1 Message Date
mathieu-d
92fcdbf17f
Merge 0f3dd097ec into 4171efcd79 2026-04-27 11:12:23 +08:00
Nicolas Mowen
4171efcd79
Miscellaneous fixes (#23009)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
* Reduce max frames per second to 1

* Use pydantic but don't fail if some constraints are not met.

* Adjust limits

* Adjust limits

* Cleanup

* add unsaved changes icon/popover to individual settings section

* allow changing camera friendly_name from camera management pane

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2026-04-26 17:09:35 -05:00
matieu-d
0f3dd097ec Prepare for pull request. Remove specific configurations 2026-04-17 22:25:46 +02:00
matieu-d
2a4d7e4766 Prepare for pull request. Remove specific configurations 2026-04-14 23:14:31 +02:00
matieu-d
46415ffeb5 Add Hailo-10H detector configuration to global.json 2026-04-14 22:54:58 +02:00
matieu-d
e35ab0b8a1 Add support of temperature reading for hailo 10H 2026-04-14 22:54:58 +02:00
matieu-d
837373547d H10 support patch 2026-04-14 22:54:58 +02:00
17 changed files with 723 additions and 91 deletions

View File

@ -21,6 +21,13 @@ local: version
--tag frigate:latest \
--load
localh10: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg HAILORT_VERSION=5.1.1 \
--build-arg HAILORT_GIT_REPO=mathieu-d/hailort \
--tag frigate:latest \
--load
debug: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg DEBUG=true \

View File

@ -12,6 +12,11 @@ services:
build:
context: .
dockerfile: docker/main/Dockerfile
# Use args to specify hailort version and location
# args:
# HAILORT_VERSION: "5.1.1"
# HAILORT_GIT_REPO: "mathieu-d/hailort"
# Use target devcontainer-trt for TensorRT dev
target: devcontainer
cache_from:
@ -29,6 +34,7 @@ services:
# devices:
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
volumes:
- .:/workspace/frigate:cached
- ./web/dist:/opt/frigate/web:cached

View File

@ -0,0 +1,7 @@
#!/bin/bash
# Update package list and install hailo driver version 5.1.1 for Hailo-10H
sudo apt update
sudo apt install -y hailo-h10-all=5.1.1

View File

@ -157,6 +157,8 @@ FROM base AS wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
ARG DEBUG=false
ARG HAILORT_VERSION=4.21.0
ARG HAILORT_GIT_REPO=frigate-nvr/hailort
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \

View File

@ -2,13 +2,11 @@
set -euxo pipefail
hailo_version="4.21.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64"
fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
wget -qO- "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-${HAILORT_VERSION}-cp311-cp311-linux_${arch}.whl"

View File

@ -40,7 +40,7 @@ logger = logging.getLogger(__name__)
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
MIN_RECORDING_DURATION = 10
MAX_IMAGE_TOKENS = 24000
MAX_FRAMES_PER_SECOND = 2
MAX_FRAMES_PER_SECOND = 1
class ReviewDescriptionProcessor(PostProcessorApi):

View File

@ -1,25 +1,48 @@
from pydantic import BaseModel, ConfigDict, Field
from typing import Annotated
from pydantic import BaseModel, ConfigDict, Field, StringConstraints
ObservationItem = Annotated[str, StringConstraints(min_length=20, max_length=160)]
class ReviewMetadata(BaseModel):
model_config = ConfigDict(extra="ignore", protected_namespaces=())
observations: list[str] = Field(
default_factory=list,
description="Chronological list of significant observations from the frames, written before the scene narrative is composed.",
observations: list[ObservationItem] = Field(
...,
min_length=3,
max_length=15,
description=(
"Enumerate the significant observations across all frames, in "
"chronological order, BEFORE composing the scene narrative. "
"Include the very start of the activity — for example, a vehicle "
"entering the frame or pulling into the driveway — even if it "
"lasts only a few frames and the rest of the clip is dominated "
"by a longer activity. Include each arrival, departure, motion "
"event, object handled, and notable change in position or state. "
"Each item is a single concrete fact written as a complete "
"sentence. Do not summarize, interpret, or assign meaning here — "
"that belongs in the scene field."
),
)
title: str = Field(
description="A short title characterizing what took place and where, under 10 words."
max_length=80,
description="A short title characterizing what took place and where, under 10 words.",
)
scene: str = Field(
description="A chronological narrative of what happens from start to finish.",
min_length=150,
max_length=600,
description="A chronological narrative of what happens from start to finish, drawing directly from the items in observations.",
)
shortSummary: str = Field(
description="A brief 2-sentence summary of the scene, suitable for notifications."
min_length=70,
max_length=100,
description="A brief 2-sentence summary of the scene, suitable for notifications.",
)
confidence: float = Field(
ge=0.0,
description="Confidence in the analysis, from 0 to 1.",
le=1.0,
description="Confidence in the analysis as a decimal between 0.0 and 1.0, where 0.0 means no confidence and 1.0 means complete confidence. Express ONLY as a decimal.",
)
potential_threat_level: int = Field(
ge=0,

View File

@ -0,0 +1,415 @@
import logging
import os
import subprocess
import threading
import urllib.request
from functools import partial
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from pydantic import ConfigDict, Field
from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
)
from frigate.object_detection.util import RequestStore, ResponseStore
logger = logging.getLogger(__name__)
# ----------------- Utility Functions ----------------- #
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
"""
Resize an image with unchanged aspect ratio using padding.
Assumes input image shape is (H, W, 3).
"""
if image.ndim == 4 and image.shape[0] == 1:
image = image[0]
h, w = image.shape[:2]
scale = min(model_w / w, model_h / h)
new_w, new_h = int(w * scale), int(h * scale)
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
x_offset = (model_w - new_w) // 2
y_offset = (model_h - new_h) // 2
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
resized_image
)
return padded_image
# ----------------- Global Constants ----------------- #
DETECTOR_KEY = "hailo10h"
ARCH = None
H10H_DEFAULT_MODEL = "yolov6n.hef"
H10H_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v5.2.0/hailo10h/yolov6n.hef"
def detect_hailo_arch():
try:
result = subprocess.run(
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
)
if result.returncode != 0:
logger.error(f"Inference error: {result.stderr}")
return None
for line in result.stdout.split("\n"):
if "Device Architecture" in line:
if "HAILO10H" in line:
return "hailo10h"
logger.error("Inference error: Could not determine Hailo architecture.")
return None
except Exception as e:
logger.error(f"Inference error: {e}")
return None
# ----------------- HailoAsyncInference Class ----------------- #
class HailoAsyncInference:
def __init__(
self,
hef_path: str,
input_store: RequestStore,
output_store: ResponseStore,
batch_size: int = 1,
input_type: Optional[str] = None,
output_type: Optional[Dict[str, str]] = None,
send_original_frame: bool = False,
) -> None:
# when importing hailo it activates the driver
# which leaves processes running even though it may not be used.
try:
from hailo_platform import (
HEF,
FormatType,
HailoSchedulingAlgorithm,
VDevice,
)
except ModuleNotFoundError:
pass
self.input_store = input_store
self.output_store = output_store
params = VDevice.create_params()
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
self.hef = HEF(hef_path)
self.target = VDevice(params)
self.infer_model = self.target.create_infer_model(hef_path)
self.infer_model.set_batch_size(batch_size)
if input_type is not None:
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
if output_type is not None:
for output_name, output_type in output_type.items():
self.infer_model.output(output_name).set_format_type(
getattr(FormatType, output_type)
)
self.output_type = output_type
self.send_original_frame = send_original_frame
def callback(
self,
completion_info,
bindings_list: List,
input_batch: List,
request_ids: List[int],
):
if completion_info.exception:
logger.error(f"Inference error: {completion_info.exception}")
else:
for i, bindings in enumerate(bindings_list):
if len(bindings._output_names) == 1:
result = bindings.output().get_buffer()
else:
result = {
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
for name in bindings._output_names
}
self.output_store.put(request_ids[i], (input_batch[i], result))
def _create_bindings(self, configured_infer_model) -> object:
if self.output_type is None:
output_buffers = {
output_info.name: np.empty(
self.infer_model.output(output_info.name).shape,
dtype=getattr(
np, str(output_info.format.type).split(".")[1].lower()
),
)
for output_info in self.hef.get_output_vstream_infos()
}
else:
output_buffers = {
name: np.empty(
self.infer_model.output(name).shape,
dtype=getattr(np, self.output_type[name].lower()),
)
for name in self.output_type
}
return configured_infer_model.create_bindings(output_buffers=output_buffers)
def get_input_shape(self) -> Tuple[int, ...]:
return self.hef.get_input_vstream_infos()[0].shape
def run(self) -> None:
job = None
with self.infer_model.configure() as configured_infer_model:
while True:
batch_data = self.input_store.get()
if batch_data is None:
break
request_id, frame_data = batch_data
preprocessed_batch = [frame_data]
request_ids = [request_id]
input_batch = preprocessed_batch # non-send_original_frame mode
bindings_list = []
for frame in preprocessed_batch:
bindings = self._create_bindings(configured_infer_model)
bindings.input().set_buffer(np.array(frame))
bindings_list.append(bindings)
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
job = configured_infer_model.run_async(
bindings_list,
partial(
self.callback,
input_batch=input_batch,
request_ids=request_ids,
bindings_list=bindings_list,
),
)
if job is not None:
job.wait(100)
# ----------------- HailoDetector Class ----------------- #
class HailoDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: "HailoDetectorConfig"):
global ARCH
ARCH = detect_hailo_arch()
self.cache_dir = MODEL_CACHE_DIR
self.device_type = detector_config.device
self.model_height = (
detector_config.model.height
if hasattr(detector_config.model, "height")
else None
)
self.model_width = (
detector_config.model.width
if hasattr(detector_config.model, "width")
else None
)
self.model_type = (
detector_config.model.model_type
if hasattr(detector_config.model, "model_type")
else None
)
self.tensor_format = (
detector_config.model.input_tensor
if hasattr(detector_config.model, "input_tensor")
else None
)
self.pixel_format = (
detector_config.model.input_pixel_format
if hasattr(detector_config.model, "input_pixel_format")
else None
)
self.input_dtype = (
detector_config.model.input_dtype
if hasattr(detector_config.model, "input_dtype")
else None
)
self.output_type = "FLOAT32"
self.set_path_and_url(detector_config.model.path)
self.working_model_path = self.check_and_prepare()
self.batch_size = 1
self.input_store = RequestStore()
self.response_store = ResponseStore()
try:
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
self.inference_engine = HailoAsyncInference(
self.working_model_path,
self.input_store,
self.response_store,
self.batch_size,
)
self.input_shape = self.inference_engine.get_input_shape()
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
self.inference_thread = threading.Thread(
target=self.inference_engine.run, daemon=True
)
self.inference_thread.start()
except Exception as e:
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
raise
def set_path_and_url(self, path: str = None):
if not path:
self.model_path = None
self.url = None
return
if self.is_url(path):
self.url = path
self.model_path = None
else:
self.model_path = path
self.url = None
def is_url(self, url: str) -> bool:
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("www.")
)
@staticmethod
def extract_model_name(path: str = None, url: str = None) -> str:
if path and path.endswith(".hef"):
return os.path.basename(path)
elif url and url.endswith(".hef"):
return os.path.basename(url)
else:
return H10H_DEFAULT_MODEL
@staticmethod
def download_model(url: str, destination: str):
if not url.endswith(".hef"):
raise ValueError("Invalid model URL. Only .hef files are supported.")
try:
urllib.request.urlretrieve(url, destination)
logger.debug(f"Downloaded model to {destination}")
except Exception as e:
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
def check_and_prepare(self) -> str:
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
model_name = self.extract_model_name(self.model_path, self.url)
cached_model_path = os.path.join(self.cache_dir, model_name)
if not self.model_path and not self.url:
if os.path.exists(cached_model_path):
logger.debug(f"Model found in cache: {cached_model_path}")
return cached_model_path
else:
logger.debug(f"Downloading default model: {model_name}")
self.download_model(H10H_DEFAULT_URL, cached_model_path)
elif self.url:
logger.debug(f"Downloading model from URL: {self.url}")
self.download_model(self.url, cached_model_path)
elif self.model_path:
if os.path.exists(self.model_path):
logger.debug(f"Using existing model at: {self.model_path}")
return self.model_path
else:
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
return cached_model_path
def detect_raw(self, tensor_input):
tensor_input = self.preprocess(tensor_input)
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
tensor_input = np.expand_dims(tensor_input, axis=0)
request_id = self.input_store.put(tensor_input)
try:
_, infer_results = self.response_store.get(request_id, timeout=1.0)
except TimeoutError:
logger.error(
f"Timeout waiting for inference results for request {request_id}"
)
if not self.inference_thread.is_alive():
raise RuntimeError(
"HailoRT inference thread has stopped, restart required."
)
return np.zeros((20, 6), dtype=np.float32)
if isinstance(infer_results, list) and len(infer_results) == 1:
infer_results = infer_results[0]
threshold = 0.4
all_detections = []
for class_id, detection_set in enumerate(infer_results):
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
continue
for det in detection_set:
if det.shape[0] < 5:
continue
score = float(det[4])
if score < threshold:
continue
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
if len(all_detections) == 0:
detections_array = np.zeros((20, 6), dtype=np.float32)
else:
detections_array = np.array(all_detections, dtype=np.float32)
if detections_array.shape[0] > 20:
detections_array = detections_array[:20, :]
elif detections_array.shape[0] < 20:
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
detections_array = np.vstack((detections_array, pad))
return detections_array
def preprocess(self, image):
if isinstance(image, np.ndarray):
processed = preprocess_tensor(
image, self.input_shape[1], self.input_shape[0]
)
return np.expand_dims(processed, axis=0)
else:
raise ValueError("Unsupported image format for preprocessing")
def close(self):
"""Properly shuts down the inference engine and releases the VDevice."""
logger.debug("[CLOSE] Closing HailoDetector")
try:
if hasattr(self, "inference_engine"):
if hasattr(self.inference_engine, "target"):
self.inference_engine.target.release()
logger.debug("Hailo VDevice released successfully")
except Exception as e:
logger.error(f"Failed to close Hailo device: {e}")
raise
def __del__(self):
"""Destructor to ensure cleanup when the object is deleted."""
self.close()
# ----------------- HailoDetectorConfig Class ----------------- #
class HailoDetectorConfig(BaseDetectorConfig):
"""Hailo10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
model_config = ConfigDict(
title="Hailo-10H",
)
type: Literal[DETECTOR_KEY]
device: str = Field(
default="PCIe",
title="Device Type",
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
)

View File

@ -2,6 +2,7 @@
import datetime
import importlib
import json
import logging
import os
import re
@ -9,6 +10,7 @@ from typing import Any, Callable, Optional
import numpy as np
from playhouse.shortcuts import model_to_dict
from pydantic import ValidationError
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
from frigate.const import CLIPS_DIR
@ -151,50 +153,6 @@ Each line represents a detection state, not necessarily unique individuals. The
if "other_concerns" in schema.get("required", []):
schema["required"].remove("other_concerns")
# Length hints injected into the schema as suggestions to the model
# (enforced by grammar-based providers like llama.cpp) but kept off the
# Pydantic model so a non-compliant response does not fail validation.
length_hints = {
"scene": {"minLength": 120, "maxLength": 600},
"shortSummary": {"minLength": 70, "maxLength": 100},
}
for field, hints in length_hints.items():
prop = schema.get("properties", {}).get(field)
if prop is not None:
prop.update(hints)
# observations is a chain-of-thought-by-schema field: forcing the model
# to enumerate concrete facts before writing scene/title surfaces details
# the narrative would otherwise gloss past (e.g. brief vehicle arrivals
# overshadowed by a longer activity). The minItems floor scales with
# event duration so longer clips get more observations.
observations_prop = schema.get("properties", {}).get("observations")
if observations_prop is not None:
duration_seconds = float(review_data.get("duration") or 0)
min_observations = max(3, round(duration_seconds / 5))
max_observations = min_observations + 8
observations_prop["description"] = (
"Enumerate the significant observations across all frames, in "
"chronological order, BEFORE composing the scene narrative. "
"Include the very start of the activity — for example, a "
"vehicle entering the frame or pulling into the driveway — "
"even if it lasts only a few frames and the rest of the clip "
"is dominated by a longer activity. Include each arrival, "
"departure, motion event, object handled, and notable change "
"in position or state. Each item is a single concrete fact "
"written as a complete sentence (e.g., 'A blue sedan turns "
"from the street into the driveway', 'Nick exits the driver "
"side carrying a plant pot'). Do not summarize, interpret, or "
"assign meaning here — that belongs in the scene field."
)
observations_prop["minItems"] = min_observations
observations_prop["maxItems"] = max_observations
observations_prop["items"] = {"type": "string", "minLength": 20}
required = schema.setdefault("required", [])
if "observations" not in required:
required.append("observations")
# OpenAI strict mode requires additionalProperties: false on all objects
schema["additionalProperties"] = False
@ -225,7 +183,35 @@ Each line represents a detection state, not necessarily unique individuals. The
try:
metadata = ReviewMetadata.model_validate_json(clean_json)
except ValidationError as ve:
# Constraint violations (length, item count, ranges) are logged
# at debug and the response is kept anyway — a slightly
# off-spec answer is still usable, and dropping the whole
# response loses the narrative content the model produced.
for err in ve.errors():
loc = ".".join(str(p) for p in err["loc"]) or "<root>"
logger.debug(
"Review metadata soft validation: %s%s (input: %r)",
loc,
err["msg"],
err.get("input"),
)
try:
raw = json.loads(clean_json)
except json.JSONDecodeError as je:
logger.error("Failed to parse review description JSON: %s", je)
return None
# observations is required on the model; fill an empty default
# if the response omitted it so attribute access stays safe.
raw.setdefault("observations", [])
metadata = ReviewMetadata.model_construct(**raw)
except Exception as e:
logger.error(
f"Failed to parse review description as the response did not match expected format. {e}"
)
return None
try:
# Normalize confidence if model returned a percentage (e.g. 85 instead of 0.85)
if metadata.confidence > 1.0:
metadata.confidence = min(metadata.confidence / 100.0, 1.0)
@ -238,10 +224,7 @@ Each line represents a detection state, not necessarily unique individuals. The
metadata.time = review_data["start"]
return metadata
except Exception as e:
# rarely LLMs can fail to follow directions on output format
logger.warning(
f"Failed to parse review description as the response did not match expected format. {e}"
)
logger.error(f"Failed to post-process review metadata: {e}")
return None
else:
logger.debug(

View File

@ -123,6 +123,15 @@ def get_detector_temperature(
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "hailo10h":
# Get temperatures for Hailo devices
hailo_temps = get_hailo_temps()
if hailo_temps:
hailo_device_names = sorted(hailo_temps.keys())
index = detector_index_by_type.get("hailo10h", 0)
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "rknn":
# Rockchip temperatures are handled by the GPU / NPU stats
# as there are not detector specific temperatures

View File

@ -397,6 +397,14 @@
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"hailo10h": {
"label": "Hailo-10H",
"description": "Hailo-10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
"device": {
"label": "Device Type",
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"memryx": {
"label": "MemryX",
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",

View File

@ -457,7 +457,13 @@
"enableDesc": "Temporarily disable an enabled camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.<br /> <em>Note: This does not disable go2rtc restreams.</em>",
"disableLabel": "Disabled cameras",
"disableDesc": "Enable a camera that is currently not visible in the UI and disabled in the configuration. A restart of Frigate is required after enabling.",
"enableSuccess": "Enabled {{cameraName}} in configuration. Restart Frigate to apply the changes."
"enableSuccess": "Enabled {{cameraName}} in configuration. Restart Frigate to apply the changes.",
"friendlyName": {
"edit": "Edit camera display name",
"title": "Edit Display Name",
"description": "Set the friendly name shown for this camera throughout the Frigate UI. Leave blank to use the camera ID.",
"rename": "Rename"
}
},
"cameraConfig": {
"add": "Add Camera",

View File

@ -65,10 +65,14 @@ import {
globalCameraDefaultSections,
buildOverrides,
buildConfigDataForPath,
flattenOverrides,
getBaseCameraSectionValue,
sanitizeSectionData as sharedSanitizeSectionData,
requiresRestartForOverrides as sharedRequiresRestartForOverrides,
} from "@/utils/configUtil";
import SaveAllPreviewPopover, {
type SaveAllPreviewItem,
} from "@/components/overlay/detail/SaveAllPreviewPopover";
import RestartDialog from "@/components/overlay/dialog/RestartDialog";
import { useRestart } from "@/api/ws";
import type {
@ -913,6 +917,34 @@ export function ConfigSection({
);
}, [sectionConfig?.renderers, sectionPath, cameraName, setPendingData]);
// Build a flat list of pending field changes for this section only.
// Mirrors the global Save All preview but scoped to the current section so
// users can inspect what will be saved without leaving the section.
const sectionPreviewItems = useMemo<SaveAllPreviewItem[]>(() => {
if (!hasChanges) return [];
if (!effectiveOverrides || typeof effectiveOverrides !== "object") {
return [];
}
const flattened = flattenOverrides(effectiveOverrides as JsonValue);
return flattened.map(({ path, value }) => ({
scope: effectiveLevel,
cameraName,
profileName: profileName
? (profileFriendlyName ?? profileName)
: undefined,
fieldPath: path ? `${sectionPath}.${path}` : sectionPath,
value,
}));
}, [
hasChanges,
effectiveOverrides,
effectiveLevel,
cameraName,
profileName,
profileFriendlyName,
sectionPath,
]);
if (!modifiedSchema) {
return null;
}
@ -1018,6 +1050,12 @@ export function ConfigSection({
defaultValue: "You have unsaved changes",
})}
</span>
<SaveAllPreviewPopover
items={sectionPreviewItems}
className="h-7 w-7"
align="start"
side="top"
/>
</div>
)}
<div className="flex w-full flex-col gap-2 sm:flex-row sm:items-center md:w-auto">

View File

@ -1,3 +1,4 @@
import ActivityIndicator from "@/components/indicators/activity-indicator";
import TextEntry from "@/components/input/TextEntry";
import { Button } from "@/components/ui/button";
import {
@ -19,7 +20,9 @@ type TextEntryDialogProps = {
setOpen: (open: boolean) => void;
onSave: (text: string) => void;
defaultValue?: string;
placeholder?: string;
allowEmpty?: boolean;
isSaving?: boolean;
regexPattern?: RegExp;
regexErrorMessage?: string;
forbiddenPattern?: RegExp;
@ -33,7 +36,9 @@ export default function TextEntryDialog({
setOpen,
onSave,
defaultValue = "",
placeholder,
allowEmpty = false,
isSaving = false,
regexPattern,
regexErrorMessage,
forbiddenPattern,
@ -50,6 +55,7 @@ export default function TextEntryDialog({
</DialogHeader>
<TextEntry
defaultValue={defaultValue}
placeholder={placeholder}
allowEmpty={allowEmpty}
onSave={onSave}
regexPattern={regexPattern}
@ -58,11 +64,22 @@ export default function TextEntryDialog({
forbiddenErrorMessage={forbiddenErrorMessage}
>
<DialogFooter className={cn("pt-4", isMobile && "gap-2")}>
<Button type="button" onClick={() => setOpen(false)}>
<Button
type="button"
disabled={isSaving}
onClick={() => setOpen(false)}
>
{t("button.cancel")}
</Button>
<Button variant="select" type="submit">
{t("button.save")}
<Button variant="select" type="submit" disabled={isSaving}>
{isSaving ? (
<div className="flex flex-row items-center gap-2">
<ActivityIndicator className="size-4" />
<span>{t("button.saving")}</span>
</div>
) : (
t("button.save")
)}
</Button>
</DialogFooter>
</TextEntry>

View File

@ -28,11 +28,7 @@ import useOptimisticState from "@/hooks/use-optimistic-state";
import { isMobile } from "react-device-detect";
import { FaVideo } from "react-icons/fa";
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
import type {
ConfigSectionData,
JsonObject,
JsonValue,
} from "@/types/configForm";
import type { ConfigSectionData, JsonObject } from "@/types/configForm";
import useSWR from "swr";
import FilterSwitch from "@/components/filter/FilterSwitch";
import { ZoneMaskFilterButton } from "@/components/filter/ZoneMaskFilter";
@ -93,6 +89,7 @@ import { mutate } from "swr";
import { RJSFSchema } from "@rjsf/utils";
import {
buildConfigDataForPath,
flattenOverrides,
parseProfileFromSectionPath,
prepareSectionSavePayload,
PROFILE_ELIGIBLE_SECTIONS,
@ -190,25 +187,6 @@ const parsePendingDataKey = (pendingDataKey: string) => {
};
};
const flattenOverrides = (
value: JsonValue | undefined,
path: string[] = [],
): Array<{ path: string; value: JsonValue }> => {
if (value === undefined) return [];
if (value === null || typeof value !== "object" || Array.isArray(value)) {
return [{ path: path.join("."), value }];
}
const entries = Object.entries(value);
if (entries.length === 0) {
return [{ path: path.join("."), value: {} }];
}
return entries.flatMap(([key, entryValue]) =>
flattenOverrides(entryValue, [...path, key]),
);
};
const createSectionPage = (
sectionKey: string,
level: "global" | "camera",

View File

@ -219,6 +219,32 @@ export function buildOverrides(
return current;
}
// ---------------------------------------------------------------------------
// flattenOverrides — turn an overrides object into a list of leaf paths
// ---------------------------------------------------------------------------
// Walks a nested overrides value and produces a flat list of `{ path, value }`
// entries, one per leaf. Used by save/preview UIs to enumerate the individual
// fields that will be changed.
export function flattenOverrides(
value: JsonValue | undefined,
path: string[] = [],
): Array<{ path: string; value: JsonValue }> {
if (value === undefined) return [];
if (value === null || typeof value !== "object" || Array.isArray(value)) {
return [{ path: path.join("."), value }];
}
const entries = Object.entries(value);
if (entries.length === 0) {
return [{ path: path.join("."), value: {} }];
}
return entries.flatMap(([key, entryValue]) =>
flattenOverrides(entryValue, [...path, key]),
);
}
// ---------------------------------------------------------------------------
// sanitizeSectionData — normalize config values and strip hidden fields
// ---------------------------------------------------------------------------

View File

@ -14,7 +14,7 @@ import { useTranslation } from "react-i18next";
import CameraEditForm from "@/components/settings/CameraEditForm";
import CameraWizardDialog from "@/components/settings/CameraWizardDialog";
import DeleteCameraDialog from "@/components/overlay/dialog/DeleteCameraDialog";
import { LuPlus, LuTrash2 } from "react-icons/lu";
import { LuPencil, LuPlus, LuTrash2 } from "react-icons/lu";
import { IoMdArrowRoundBack } from "react-icons/io";
import { isDesktop } from "react-device-detect";
import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
@ -26,6 +26,12 @@ import axios from "axios";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import RestartDialog from "@/components/overlay/dialog/RestartDialog";
import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator";
import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import type { ProfileState } from "@/types/profile";
import { getProfileColor } from "@/utils/profileColors";
import { cn } from "@/lib/utils";
@ -161,7 +167,13 @@ export default function CameraManagementView({
key={camera}
className="flex flex-row items-center justify-between"
>
<CameraNameLabel camera={camera} />
<div className="flex items-center gap-1">
<CameraNameLabel camera={camera} />
<CameraFriendlyNameEditor
cameraName={camera}
onConfigChanged={updateConfig}
/>
</div>
<CameraEnableSwitch cameraName={camera} />
</div>
))}
@ -297,6 +309,103 @@ function CameraEnableSwitch({ cameraName }: CameraEnableSwitchProps) {
);
}
type CameraFriendlyNameEditorProps = {
cameraName: string;
onConfigChanged: () => Promise<unknown>;
};
function CameraFriendlyNameEditor({
cameraName,
onConfigChanged,
}: CameraFriendlyNameEditorProps) {
const { t } = useTranslation(["views/settings", "common"]);
const { data: config } = useSWR<FrigateConfig>("config");
const [open, setOpen] = useState(false);
const [isSaving, setIsSaving] = useState(false);
const currentFriendlyName = config?.cameras?.[cameraName]?.friendly_name;
const onSave = useCallback(
async (text: string) => {
if (isSaving) return;
setIsSaving(true);
try {
await axios.put("config/set", {
requires_restart: 0,
config_data: {
cameras: {
[cameraName]: {
friendly_name: text.trim() || null,
},
},
},
});
await onConfigChanged();
setOpen(false);
toast.success(t("toast.save.success", { ns: "common" }), {
position: "top-center",
});
} catch (error) {
const errorMessage =
axios.isAxiosError(error) &&
(error.response?.data?.message || error.response?.data?.detail)
? error.response?.data?.message || error.response?.data?.detail
: t("toast.save.error.noMessage", { ns: "common" });
toast.error(
t("toast.save.error.title", { errorMessage, ns: "common" }),
{ position: "top-center" },
);
} finally {
setIsSaving(false);
}
},
[cameraName, isSaving, onConfigChanged, t],
);
const renameLabel = t("cameraManagement.streams.friendlyName.rename", {
ns: "views/settings",
});
return (
<>
<Tooltip>
<TooltipTrigger asChild>
<Button
variant="ghost"
size="icon"
className="size-7"
aria-label={renameLabel}
onClick={() => setOpen(true)}
disabled={isSaving}
>
<LuPencil className="size-3.5" />
</Button>
</TooltipTrigger>
<TooltipContent>{renameLabel}</TooltipContent>
</Tooltip>
<TextEntryDialog
open={open}
setOpen={setOpen}
title={t("cameraManagement.streams.friendlyName.title", {
ns: "views/settings",
})}
description={t("cameraManagement.streams.friendlyName.description", {
ns: "views/settings",
})}
defaultValue={currentFriendlyName ?? ""}
placeholder={currentFriendlyName ? undefined : cameraName}
allowEmpty
isSaving={isSaving}
onSave={onSave}
/>
</>
);
}
type CameraConfigEnableSwitchProps = {
cameraName: string;
setRestartDialogOpen: React.Dispatch<React.SetStateAction<boolean>>;