Compare commits

..

No commits in common. "acd10d0e08267d2af227defcf92320d36e96829e" and "cedcbdba078cb32305ce5553254ce09565b6f3ca" have entirely different histories.

56 changed files with 719 additions and 3017 deletions

17
.vscode/launch.json vendored
View File

@ -6,23 +6,6 @@
"type": "debugpy", "type": "debugpy",
"request": "launch", "request": "launch",
"module": "frigate" "module": "frigate"
},
{
"type": "editor-browser",
"request": "launch",
"name": "Vite: Launch in integrated browser",
"url": "http://localhost:5173"
},
{
"type": "editor-browser",
"request": "launch",
"name": "Nginx: Launch in integrated browser",
"url": "http://localhost:5000"
},
{
"type": "editor-browser",
"request": "attach",
"name": "Attach to integrated browser"
} }
] ]
} }

View File

@ -122,17 +122,6 @@ docs/ # Documentation site
migrations/ # Database migrations migrations/ # Database migrations
``` ```
## Translations
Frigate uses [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) for managing language translations. If you'd like to help translate Frigate into your language:
1. Visit the [Frigate project on Weblate](https://hosted.weblate.org/projects/frigate-nvr/).
2. Create an account or log in.
3. Browse the available languages and select the one you'd like to contribute to, or request a new language.
4. Translate strings directly in the Weblate interface — no code changes or pull requests needed.
Translation contributions through Weblate are automatically synced to the repository. Please do not submit pull requests for translation changes — use Weblate instead so that translations are properly tracked and coordinated.
## Resources ## Resources
- [Documentation](https://docs.frigate.video) - [Documentation](https://docs.frigate.video)

View File

@ -52,7 +52,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
rm -rf ffmpeg.tar.xz rm -rf ffmpeg.tar.xz
mkdir -p /usr/lib/ffmpeg/7.0 mkdir -p /usr/lib/ffmpeg/7.0
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2026-03-19-13-03/ffmpeg-n7.1.3-43-g5a1f107b4c-linux64-gpl-7.1.tar.xz" wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
rm -rf ffmpeg.tar.xz rm -rf ffmpeg.tar.xz
fi fi
@ -64,7 +64,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
rm -f ffmpeg.tar.xz rm -f ffmpeg.tar.xz
mkdir -p /usr/lib/ffmpeg/7.0 mkdir -p /usr/lib/ffmpeg/7.0
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2026-03-19-13-03/ffmpeg-n7.1.3-43-g5a1f107b4c-linuxarm64-gpl-7.1.tar.xz" wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
rm -f ffmpeg.tar.xz rm -f ffmpeg.tar.xz
fi fi

File diff suppressed because it is too large Load Diff

View File

@ -201,9 +201,10 @@ def get_tool_definitions() -> List[Dict[str, Any]]:
"function": { "function": {
"name": "get_live_context", "name": "get_live_context",
"description": ( "description": (
"Get the current live image and detection information for a camera: objects being tracked, " "Get the current detection information for a camera: objects being tracked, "
"zones, timestamps. Use this to understand what is visible in the live view. " "zones, timestamps. Use this to understand what is visible in the live view. "
"Call this when answering questions about what is happening right now on a specific camera." "Call this when the user has included a live image (via include_live_image) or "
"when answering questions about what is happening right now on a specific camera."
), ),
"parameters": { "parameters": {
"type": "object", "type": "object",
@ -383,54 +384,12 @@ async def _execute_get_live_context(
"stationary": obj_dict.get("stationary", False), "stationary": obj_dict.get("stationary", False),
} }
result: Dict[str, Any] = { return {
"camera": camera, "camera": camera,
"timestamp": frame_time, "timestamp": frame_time,
"detections": list(tracked_objects_dict.values()), "detections": list(tracked_objects_dict.values()),
} }
# Grab live frame and handle based on provider configuration
image_url = await _get_live_frame_image_url(request, camera, allowed_cameras)
if image_url:
genai_manager = request.app.genai_manager
if genai_manager.tool_client is genai_manager.vision_client:
# Same provider handles both roles — pass image URL so it can
# be injected as a user message (images can't be in tool results)
result["_image_url"] = image_url
elif genai_manager.vision_client is not None:
# Separate vision provider — have it describe the image,
# providing detection context so it knows what to focus on
frame_bytes = _decode_data_url(image_url)
if frame_bytes:
detections = result.get("detections", [])
if detections:
detection_lines = []
for d in detections:
parts = [d.get("label", "unknown")]
if d.get("sub_label"):
parts.append(f"({d['sub_label']})")
if d.get("zones"):
parts.append(f"in {', '.join(d['zones'])}")
detection_lines.append(" ".join(parts))
context = (
"The following objects are currently being tracked: "
+ "; ".join(detection_lines)
+ "."
)
else:
context = "No objects are currently being tracked."
description = genai_manager.vision_client._send(
f"Describe what you see in this security camera image. "
f"{context} Focus on the scene, any visible activity, "
f"and details about the tracked objects.",
[frame_bytes],
)
if description:
result["image_description"] = description
return result
except Exception as e: except Exception as e:
logger.error(f"Error executing get_live_context: {e}", exc_info=True) logger.error(f"Error executing get_live_context: {e}", exc_info=True)
return { return {
@ -446,8 +405,8 @@ async def _get_live_frame_image_url(
""" """
Fetch the current live frame for a camera as a base64 data URL. Fetch the current live frame for a camera as a base64 data URL.
Returns None if the frame cannot be retrieved. Used by get_live_context Returns None if the frame cannot be retrieved. Used when include_live_image
to attach the live image to the conversation. is set to attach the image to the first user message.
""" """
if ( if (
camera not in allowed_cameras camera not in allowed_cameras
@ -462,12 +421,12 @@ async def _get_live_frame_image_url(
if frame is None: if frame is None:
return None return None
height, width = frame.shape[:2] height, width = frame.shape[:2]
target_height = 480 max_dimension = 1024
if height > target_height: if height > max_dimension or width > max_dimension:
scale = target_height / height scale = max_dimension / max(height, width)
frame = cv2.resize( frame = cv2.resize(
frame, frame,
(int(width * scale), target_height), (int(width * scale), int(height * scale)),
interpolation=cv2.INTER_AREA, interpolation=cv2.INTER_AREA,
) )
_, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) _, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
@ -478,17 +437,6 @@ async def _get_live_frame_image_url(
return None return None
def _decode_data_url(data_url: str) -> Optional[bytes]:
"""Decode a base64 data URL to raw bytes."""
try:
# Format: data:image/jpeg;base64,<data>
_, encoded = data_url.split(",", 1)
return base64.b64decode(encoded)
except (ValueError, Exception) as e:
logger.debug("Failed to decode data URL: %s", e)
return None
async def _execute_set_camera_state( async def _execute_set_camera_state(
request: Request, request: Request,
arguments: Dict[str, Any], arguments: Dict[str, Any],
@ -579,18 +527,12 @@ async def _execute_pending_tools(
pending_tool_calls: List[Dict[str, Any]], pending_tool_calls: List[Dict[str, Any]],
request: Request, request: Request,
allowed_cameras: List[str], allowed_cameras: List[str],
) -> tuple[List[ToolCall], List[Dict[str, Any]], List[Dict[str, Any]]]: ) -> tuple[List[ToolCall], List[Dict[str, Any]]]:
""" """
Execute a list of tool calls. Execute a list of tool calls; return (ToolCall list for API response, tool result dicts for conversation).
Returns:
(ToolCall list for API response,
tool result dicts for conversation,
extra messages to inject after tool results e.g. user messages with images)
""" """
tool_calls_out: List[ToolCall] = [] tool_calls_out: List[ToolCall] = []
tool_results: List[Dict[str, Any]] = [] tool_results: List[Dict[str, Any]] = []
extra_messages: List[Dict[str, Any]] = []
for tool_call in pending_tool_calls: for tool_call in pending_tool_calls:
tool_name = tool_call["name"] tool_name = tool_call["name"]
tool_args = tool_call.get("arguments") or {} tool_args = tool_call.get("arguments") or {}
@ -627,27 +569,6 @@ async def _execute_pending_tools(
for evt in tool_result for evt in tool_result
if isinstance(evt, dict) if isinstance(evt, dict)
] ]
# Extract _image_url from get_live_context results — images can
# only be sent in user messages, not tool results
if isinstance(tool_result, dict) and "_image_url" in tool_result:
image_url = tool_result.pop("_image_url")
extra_messages.append(
{
"role": "user",
"content": [
{
"type": "text",
"text": f"Here is the current live image from camera '{tool_result.get('camera', 'unknown')}'.",
},
{
"type": "image_url",
"image_url": {"url": image_url},
},
],
}
)
result_content = ( result_content = (
json.dumps(tool_result) json.dumps(tool_result)
if isinstance(tool_result, (dict, list)) if isinstance(tool_result, (dict, list))
@ -683,7 +604,7 @@ async def _execute_pending_tools(
"content": error_content, "content": error_content,
} }
) )
return (tool_calls_out, tool_results, extra_messages) return (tool_calls_out, tool_results)
@router.post( @router.post(
@ -739,13 +660,7 @@ async def chat_completion(
if camera_config.friendly_name if camera_config.friendly_name
else camera_id.replace("_", " ").title() else camera_id.replace("_", " ").title()
) )
zone_names = list(camera_config.zones.keys()) cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
if zone_names:
cameras_info.append(
f" - {friendly_name} (ID: {camera_id}, zones: {', '.join(zone_names)})"
)
else:
cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
cameras_section = "" cameras_section = ""
if cameras_info: if cameras_info:
@ -755,6 +670,14 @@ async def chat_completion(
+ "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls." + "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls."
) )
live_image_note = ""
if body.include_live_image:
live_image_note = (
f"\n\nThe first user message includes a live image from camera "
f"'{body.include_live_image}'. Use get_live_context for that camera to get "
"current detection details (objects, zones) to aid in understanding the image."
)
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events. system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
Current server local date and time: {current_date_str} at {current_time_str} Current server local date and time: {current_date_str} at {current_time_str}
@ -764,7 +687,7 @@ Do not start your response with phrases like "I will check...", "Let me see...",
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields. Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference. When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.{cameras_section}""" Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}"""
conversation.append( conversation.append(
{ {
@ -773,6 +696,7 @@ Always be accurate with time calculations based on the current date provided.{ca
} }
) )
first_user_message_seen = False
for msg in body.messages: for msg in body.messages:
msg_dict = { msg_dict = {
"role": msg.role, "role": msg.role,
@ -783,6 +707,21 @@ Always be accurate with time calculations based on the current date provided.{ca
if msg.name: if msg.name:
msg_dict["name"] = msg.name msg_dict["name"] = msg.name
if (
msg.role == "user"
and not first_user_message_seen
and body.include_live_image
):
first_user_message_seen = True
image_url = await _get_live_frame_image_url(
request, body.include_live_image, allowed_cameras
)
if image_url:
msg_dict["content"] = [
{"type": "text", "text": msg.content},
{"type": "image_url", "image_url": {"url": image_url}},
]
conversation.append(msg_dict) conversation.append(msg_dict)
tool_iterations = 0 tool_iterations = 0
@ -840,16 +779,11 @@ Always be accurate with time calculations based on the current date provided.{ca
msg.get("content"), pending msg.get("content"), pending
) )
) )
( executed_calls, tool_results = await _execute_pending_tools(
executed_calls,
tool_results,
extra_msgs,
) = await _execute_pending_tools(
pending, request, allowed_cameras pending, request, allowed_cameras
) )
stream_tool_calls.extend(executed_calls) stream_tool_calls.extend(executed_calls)
conversation.extend(tool_results) conversation.extend(tool_results)
conversation.extend(extra_msgs)
yield ( yield (
json.dumps( json.dumps(
{ {
@ -956,12 +890,11 @@ Always be accurate with time calculations based on the current date provided.{ca
f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): " f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): "
f"{len(pending_tool_calls)} tool(s) to execute" f"{len(pending_tool_calls)} tool(s) to execute"
) )
executed_calls, tool_results, extra_msgs = await _execute_pending_tools( executed_calls, tool_results = await _execute_pending_tools(
pending_tool_calls, request, allowed_cameras pending_tool_calls, request, allowed_cameras
) )
tool_calls.extend(executed_calls) tool_calls.extend(executed_calls)
conversation.extend(tool_results) conversation.extend(tool_results)
conversation.extend(extra_msgs)
logger.debug( logger.debug(
f"Added {len(tool_results)} tool result(s) to conversation. " f"Added {len(tool_results)} tool result(s) to conversation. "
f"Continuing with next LLM call..." f"Continuing with next LLM call..."

View File

@ -32,6 +32,13 @@ class ChatCompletionRequest(BaseModel):
le=10, le=10,
description="Maximum number of tool call iterations (default: 5)", description="Maximum number of tool call iterations (default: 5)",
) )
include_live_image: Optional[str] = Field(
default=None,
description=(
"If set, the current live frame from this camera is attached to the first "
"user message as multimodal content. Use with get_live_context for detection info."
),
)
stream: bool = Field( stream: bool = Field(
default=False, default=False,
description="If true, stream the final assistant response in the body as newline-delimited JSON.", description="If true, stream the final assistant response in the body as newline-delimited JSON.",

View File

@ -49,8 +49,8 @@ class StationaryConfig(FrigateBaseModel):
class DetectConfig(FrigateBaseModel): class DetectConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(
default=False, default=False,
title="Enable object detection", title="Detection enabled",
description="Enable or disable object detection for all cameras; can be overridden per-camera.", description="Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run.",
) )
height: Optional[int] = Field( height: Optional[int] = Field(
default=None, default=None,

View File

@ -29,7 +29,7 @@ class RetainConfig(FrigateBaseModel):
class SnapshotsConfig(FrigateBaseModel): class SnapshotsConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(
default=False, default=False,
title="Enable snapshots", title="Snapshots enabled",
description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.", description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.",
) )
clean_copy: bool = Field( clean_copy: bool = Field(

View File

@ -444,7 +444,7 @@ class FrigateConfig(FrigateBaseModel):
# GenAI config (named provider configs: name -> GenAIConfig) # GenAI config (named provider configs: name -> GenAIConfig)
genai: Dict[str, GenAIConfig] = Field( genai: Dict[str, GenAIConfig] = Field(
default_factory=dict, default_factory=dict,
title="Generative AI configuration", title="Generative AI configuration (named providers).",
description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.", description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
) )

View File

@ -4,6 +4,7 @@ import re
import urllib.request import urllib.request
from typing import Literal from typing import Literal
import axengine as axe
from pydantic import ConfigDict from pydantic import ConfigDict
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
@ -36,12 +37,6 @@ class Axengine(DetectionApi):
type_key = DETECTOR_KEY type_key = DETECTOR_KEY
def __init__(self, config: AxengineDetectorConfig): def __init__(self, config: AxengineDetectorConfig):
try:
import axengine as axe
except ModuleNotFoundError:
raise ImportError("AXEngine is not installed.")
return
logger.info("__init__ axengine") logger.info("__init__ axengine")
super().__init__(config) super().__init__(config)
self.height = config.model.height self.height = config.model.height

View File

@ -120,10 +120,10 @@ PRESETS_HW_ACCEL_DECODE["preset-rk-h265"] = PRESETS_HW_ACCEL_DECODE[
PRESETS_HW_ACCEL_SCALE = { PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12", FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=w={1}:h={2}:format=nv12,hwdownload,format=nv12,fps={0},format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=w={1}:h={2}:format=nv12,hwdownload,format=nv12,fps={0},format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12", FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
"preset-jetson-h264": "-r {0}", # scaled in decoder "preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder "preset-jetson-h265": "-r {0}", # scaled in decoder
FFMPEG_HWACCEL_RKMPP: "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p", FFMPEG_HWACCEL_RKMPP: "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
@ -242,6 +242,15 @@ def parse_preset_hardware_acceleration_scale(
else: else:
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"]) scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
if (
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5" in scale
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
):
scale = scale.replace(
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
":format=nv12,hwdownload,format=nv12,format=yuv420p",
)
scale = scale.format(fps, width, height).split(" ") scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args) scale.extend(detect_args)
return scale return scale

View File

@ -73,8 +73,9 @@ class TestFfmpegPresets(unittest.TestCase):
assert "preset-nvidia-h264" not in ( assert "preset-nvidia-h264" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]) " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
) )
assert "fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12" in ( assert (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]) "fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5"
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
) )
def test_default_ffmpeg_input_arg_preset(self): def test_default_ffmpeg_input_arg_preset(self):

View File

@ -518,15 +518,6 @@ def main():
sanitize_camera_descriptions(camera_translations) sanitize_camera_descriptions(camera_translations)
# Profiles contain the same sections as the camera itself; only keep
# label and description to avoid duplicating every camera section.
if "profiles" in camera_translations:
camera_translations["profiles"] = {
k: v
for k, v in camera_translations["profiles"].items()
if k in ("label", "description")
}
with open(cameras_file, "w", encoding="utf-8") as f: with open(cameras_file, "w", encoding="utf-8") as f:
json.dump(camera_translations, f, indent=2, ensure_ascii=False) json.dump(camera_translations, f, indent=2, ensure_ascii=False)
f.write("\n") f.write("\n")

View File

@ -79,8 +79,8 @@
"label": "Object Detection", "label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.", "description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": { "enabled": {
"label": "Enable object detection", "label": "Detection enabled",
"description": "Enable or disable object detection for this camera." "description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
}, },
"height": { "height": {
"label": "Detect height", "label": "Detect height",
@ -628,7 +628,7 @@
"label": "Snapshots", "label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects for this camera.", "description": "Settings for saved JPEG snapshots of tracked objects for this camera.",
"enabled": { "enabled": {
"label": "Enable snapshots", "label": "Snapshots enabled",
"description": "Enable or disable saving snapshots for this camera." "description": "Enable or disable saving snapshots for this camera."
}, },
"clean_copy": { "clean_copy": {
@ -860,10 +860,6 @@
"label": "Camera URL", "label": "Camera URL",
"description": "URL to visit the camera directly from system page" "description": "URL to visit the camera directly from system page"
}, },
"profiles": {
"label": "Profiles",
"description": "Named config profiles with partial overrides that can be activated at runtime."
},
"zones": { "zones": {
"label": "Zones", "label": "Zones",
"description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.", "description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",

View File

@ -1174,7 +1174,7 @@
} }
}, },
"genai": { "genai": {
"label": "Generative AI configuration", "label": "Generative AI configuration (named providers).",
"description": "Settings for integrated generative AI providers used to generate object descriptions and review summaries.", "description": "Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
"api_key": { "api_key": {
"label": "API key", "label": "API key",
@ -1293,8 +1293,8 @@
"label": "Object Detection", "label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.", "description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": { "enabled": {
"label": "Enable object detection", "label": "Detection enabled",
"description": "Enable or disable object detection for all cameras; can be overridden per-camera." "description": "Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run."
}, },
"height": { "height": {
"label": "Detect height", "label": "Detect height",
@ -1778,7 +1778,7 @@
"label": "Snapshots", "label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.", "description": "Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.",
"enabled": { "enabled": {
"label": "Enable snapshots", "label": "Snapshots enabled",
"description": "Enable or disable saving snapshots for all cameras; can be overridden per-camera." "description": "Enable or disable saving snapshots for all cameras; can be overridden per-camera."
}, },
"clean_copy": { "clean_copy": {
@ -2128,18 +2128,6 @@
"description": "Numeric order used to sort camera groups in the UI; larger numbers appear later." "description": "Numeric order used to sort camera groups in the UI; larger numbers appear later."
} }
}, },
"profiles": {
"label": "Profiles",
"description": "Named profile definitions with friendly names. Camera profiles must reference names defined here.",
"friendly_name": {
"label": "Friendly name",
"description": "Display name for this profile shown in the UI."
}
},
"active_profile": {
"label": "Active profile",
"description": "Currently active profile name. Runtime-only, not persisted in YAML."
},
"camera_mqtt": { "camera_mqtt": {
"label": "MQTT", "label": "MQTT",
"description": "MQTT image publishing settings.", "description": "MQTT image publishing settings.",

View File

@ -1402,18 +1402,6 @@
"audio": "Audio" "audio": "Audio"
} }
}, },
"genaiRoles": {
"options": {
"embeddings": "Embedding",
"vision": "Vision",
"tools": "Tools"
}
},
"semanticSearchModel": {
"placeholder": "Select model…",
"builtIn": "Built-in Models",
"genaiProviders": "GenAI Providers"
},
"review": { "review": {
"title": "Review Settings" "title": "Review Settings"
}, },

View File

@ -13,7 +13,7 @@ const audio: SectionConfigOverrides = {
"num_threads", "num_threads",
], ],
fieldGroups: { fieldGroups: {
detection: ["listen", "filters"], detection: ["enabled", "listen", "filters"],
sensitivity: ["min_volume", "max_not_heard"], sensitivity: ["min_volume", "max_not_heard"],
}, },
hiddenFields: ["enabled_in_config"], hiddenFields: ["enabled_in_config"],

View File

@ -18,7 +18,7 @@ const detect: SectionConfigOverrides = {
], ],
restartRequired: [], restartRequired: [],
fieldGroups: { fieldGroups: {
resolution: ["width", "height", "fps"], resolution: ["enabled", "width", "height", "fps"],
tracking: ["min_initialized", "max_disappeared"], tracking: ["min_initialized", "max_disappeared"],
}, },
hiddenFields: ["enabled_in_config"], hiddenFields: ["enabled_in_config"],

View File

@ -6,7 +6,7 @@ const faceRecognition: SectionConfigOverrides = {
restartRequired: [], restartRequired: [],
fieldOrder: ["enabled", "min_area"], fieldOrder: ["enabled", "min_area"],
hiddenFields: [], hiddenFields: [],
advancedFields: [], advancedFields: ["min_area"],
overrideFields: ["enabled", "min_area"], overrideFields: ["enabled", "min_area"],
}, },
global: { global: {

View File

@ -4,50 +4,39 @@ const genai: SectionConfigOverrides = {
base: { base: {
sectionDocs: "/configuration/genai/config", sectionDocs: "/configuration/genai/config",
restartRequired: [ restartRequired: [
"*.provider", "provider",
"*.api_key", "api_key",
"*.base_url", "base_url",
"*.model", "model",
"*.provider_options", "provider_options",
"*.runtime_options", "runtime_options",
], ],
advancedFields: ["*.base_url", "*.provider_options", "*.runtime_options"], fieldOrder: [
"provider",
"api_key",
"base_url",
"model",
"provider_options",
"runtime_options",
],
advancedFields: ["base_url", "provider_options", "runtime_options"],
hiddenFields: ["genai.enabled_in_config"], hiddenFields: ["genai.enabled_in_config"],
uiSchema: { uiSchema: {
"ui:options": { disableNestedCard: true }, api_key: {
"*": { "ui:options": { size: "md" },
"ui:options": { disableNestedCard: true },
"ui:order": [
"provider",
"api_key",
"base_url",
"model",
"provider_options",
"runtime_options",
"*",
],
}, },
"*.roles": { base_url: {
"ui:widget": "genaiRoles",
},
"*.api_key": {
"ui:options": { size: "lg" }, "ui:options": { size: "lg" },
}, },
"*.base_url": { model: {
"ui:options": { size: "lg" }, "ui:options": { size: "md" },
}, },
"*.model": { provider_options: {
"ui:options": { size: "xs" },
},
"*.provider": {
"ui:options": { size: "xs" },
},
"*.provider_options": {
additionalProperties: { additionalProperties: {
"ui:options": { size: "lg" }, "ui:options": { size: "lg" },
}, },
}, },
"*.runtime_options": { runtime_options: {
additionalProperties: { additionalProperties: {
"ui:options": { size: "lg" }, "ui:options": { size: "lg" },
}, },

View File

@ -7,9 +7,9 @@ const lpr: SectionConfigOverrides = {
enhancement: "/configuration/license_plate_recognition#enhancement", enhancement: "/configuration/license_plate_recognition#enhancement",
}, },
restartRequired: [], restartRequired: [],
fieldOrder: ["enabled", "min_area", "enhancement", "expire_time"], fieldOrder: ["enabled", "expire_time", "min_area", "enhancement"],
hiddenFields: [], hiddenFields: [],
advancedFields: ["expire_time", "enhancement"], advancedFields: ["expire_time", "min_area", "enhancement"],
overrideFields: ["enabled", "min_area", "enhancement"], overrideFields: ["enabled", "min_area", "enhancement"],
}, },
global: { global: {

View File

@ -23,7 +23,7 @@ const motion: SectionConfigOverrides = {
"mqtt_off_delay", "mqtt_off_delay",
], ],
fieldGroups: { fieldGroups: {
sensitivity: ["threshold", "contour_area"], sensitivity: ["enabled", "threshold", "contour_area"],
algorithm: ["improve_contrast", "delta_alpha", "frame_alpha"], algorithm: ["improve_contrast", "delta_alpha", "frame_alpha"],
}, },
uiSchema: { uiSchema: {

View File

@ -15,7 +15,7 @@ const record: SectionConfigOverrides = {
"export", "export",
], ],
fieldGroups: { fieldGroups: {
retention: ["continuous", "motion"], retention: ["enabled", "continuous", "motion"],
events: ["alerts", "detections"], events: ["alerts", "detections"],
}, },
hiddenFields: ["enabled_in_config", "sync_recordings"], hiddenFields: ["enabled_in_config", "sync_recordings"],

View File

@ -18,11 +18,6 @@ const semanticSearch: SectionConfigOverrides = {
advancedFields: ["reindex", "device"], advancedFields: ["reindex", "device"],
restartRequired: ["enabled", "model", "model_size", "device"], restartRequired: ["enabled", "model", "model_size", "device"],
hiddenFields: ["reindex"], hiddenFields: ["reindex"],
uiSchema: {
model: {
"ui:widget": "semanticSearchModel",
},
},
}, },
}; };

View File

@ -13,7 +13,7 @@ const snapshots: SectionConfigOverrides = {
"retain", "retain",
], ],
fieldGroups: { fieldGroups: {
display: ["bounding_box", "crop", "quality", "timestamp"], display: ["enabled", "bounding_box", "crop", "quality", "timestamp"],
}, },
hiddenFields: ["enabled_in_config"], hiddenFields: ["enabled_in_config"],
advancedFields: ["height", "quality", "retain"], advancedFields: ["height", "quality", "retain"],

View File

@ -936,7 +936,7 @@ export function ConfigSection({
</span> </span>
</div> </div>
)} )}
<div className="flex w-full flex-col gap-2 sm:flex-row sm:items-center md:w-auto"> <div className="flex w-full items-center gap-2 md:w-auto">
{((effectiveLevel === "camera" && isOverridden) || {((effectiveLevel === "camera" && isOverridden) ||
effectiveLevel === "global") && effectiveLevel === "global") &&
!hasChanges && !hasChanges &&

View File

@ -23,12 +23,10 @@ import { AudioLabelSwitchesWidget } from "./widgets/AudioLabelSwitchesWidget";
import { ZoneSwitchesWidget } from "./widgets/ZoneSwitchesWidget"; import { ZoneSwitchesWidget } from "./widgets/ZoneSwitchesWidget";
import { ArrayAsTextWidget } from "./widgets/ArrayAsTextWidget"; import { ArrayAsTextWidget } from "./widgets/ArrayAsTextWidget";
import { FfmpegArgsWidget } from "./widgets/FfmpegArgsWidget"; import { FfmpegArgsWidget } from "./widgets/FfmpegArgsWidget";
import { GenAIRolesWidget } from "./widgets/GenAIRolesWidget";
import { InputRolesWidget } from "./widgets/InputRolesWidget"; import { InputRolesWidget } from "./widgets/InputRolesWidget";
import { TimezoneSelectWidget } from "./widgets/TimezoneSelectWidget"; import { TimezoneSelectWidget } from "./widgets/TimezoneSelectWidget";
import { CameraPathWidget } from "./widgets/CameraPathWidget"; import { CameraPathWidget } from "./widgets/CameraPathWidget";
import { OptionalFieldWidget } from "./widgets/OptionalFieldWidget"; import { OptionalFieldWidget } from "./widgets/OptionalFieldWidget";
import { SemanticSearchModelWidget } from "./widgets/SemanticSearchModelWidget";
import { FieldTemplate } from "./templates/FieldTemplate"; import { FieldTemplate } from "./templates/FieldTemplate";
import { ObjectFieldTemplate } from "./templates/ObjectFieldTemplate"; import { ObjectFieldTemplate } from "./templates/ObjectFieldTemplate";
@ -62,7 +60,6 @@ export const frigateTheme: FrigateTheme = {
ArrayAsTextWidget: ArrayAsTextWidget, ArrayAsTextWidget: ArrayAsTextWidget,
FfmpegArgsWidget: FfmpegArgsWidget, FfmpegArgsWidget: FfmpegArgsWidget,
CameraPathWidget: CameraPathWidget, CameraPathWidget: CameraPathWidget,
genaiRoles: GenAIRolesWidget,
inputRoles: InputRolesWidget, inputRoles: InputRolesWidget,
// Custom widgets // Custom widgets
switch: SwitchWidget, switch: SwitchWidget,
@ -78,7 +75,6 @@ export const frigateTheme: FrigateTheme = {
zoneNames: ZoneSwitchesWidget, zoneNames: ZoneSwitchesWidget,
timezoneSelect: TimezoneSelectWidget, timezoneSelect: TimezoneSelectWidget,
optionalField: OptionalFieldWidget, optionalField: OptionalFieldWidget,
semanticSearchModel: SemanticSearchModelWidget,
}, },
templates: { templates: {
FieldTemplate: FieldTemplate as React.ComponentType<FieldTemplateProps>, FieldTemplate: FieldTemplate as React.ComponentType<FieldTemplateProps>,

View File

@ -311,54 +311,51 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
return null; return null;
} }
// Build a lookup: field name → group info const grouped = new Set<string>();
const fieldToGroup = new Map< const groups = Object.entries(groupDefinitions)
string, .map(([groupKey, fields]) => {
{ groupKey: string; label: string; items: (typeof properties)[number][] } const ordered = fields
>(); .map((field) => items.find((item) => item.name === field))
const hasGroups = Object.keys(groupDefinitions).length > 0; .filter(Boolean) as (typeof properties)[number][];
for (const [groupKey, fields] of Object.entries(groupDefinitions)) { if (ordered.length === 0) {
const ordered = fields return null;
.map((field) => items.find((item) => item.name === field)) }
.filter(Boolean) as (typeof properties)[number][];
if (ordered.length === 0) continue; ordered.forEach((item) => grouped.add(item.name));
const label = domain const label = domain
? t(`${sectionI18nPrefix}.${domain}.${groupKey}`, { ? t(`${sectionI18nPrefix}.${domain}.${groupKey}`, {
ns: "config/groups", ns: "config/groups",
defaultValue: humanizeKey(groupKey), defaultValue: humanizeKey(groupKey),
}) })
: t(`groups.${groupKey}`, { : t(`groups.${groupKey}`, {
defaultValue: humanizeKey(groupKey), defaultValue: humanizeKey(groupKey),
}); });
const groupInfo = { groupKey, label, items: ordered }; return {
for (const item of ordered) { key: groupKey,
fieldToGroup.set(item.name, groupInfo); label,
} items: ordered,
} };
})
.filter(Boolean) as Array<{
key: string;
label: string;
items: (typeof properties)[number][];
}>;
const ungrouped = items.filter((item) => !grouped.has(item.name));
const isObjectLikeField = (item: (typeof properties)[number]) => { const isObjectLikeField = (item: (typeof properties)[number]) => {
const fieldSchema = (item.content.props as RjsfElementProps)?.schema; const fieldSchema = (item.content.props as RjsfElementProps)?.schema;
return fieldSchema?.type === "object"; return fieldSchema?.type === "object";
}; };
// Walk items in order (respects fieldOrder / ui:order). return (
// When we hit the first field of a group, render the whole group block. <div className="space-y-6">
// Skip subsequent fields that belong to an already-rendered group. {groups.map((group) => (
const renderedGroups = new Set<string>();
const elements: React.ReactNode[] = [];
for (const item of items) {
const group = fieldToGroup.get(item.name);
if (group) {
if (renderedGroups.has(group.groupKey)) continue;
renderedGroups.add(group.groupKey);
elements.push(
<div <div
key={group.groupKey} key={group.key}
className="space-y-4 rounded-lg border border-border/70 bg-card/30 p-4" className="space-y-4 rounded-lg border border-border/70 bg-card/30 p-4"
> >
<div className="text-md border-b border-border/60 pb-4 font-semibold text-primary-variant"> <div className="text-md border-b border-border/60 pb-4 font-semibold text-primary-variant">
@ -369,21 +366,25 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
<div key={element.name}>{element.content}</div> <div key={element.name}>{element.content}</div>
))} ))}
</div> </div>
</div>, </div>
); ))}
} else {
elements.push(
<div
key={item.name}
className={cn(hasGroups && !isObjectLikeField(item) && "px-4")}
>
{item.content}
</div>,
);
}
}
return <div className="space-y-6">{elements}</div>; {ungrouped.length > 0 && (
<div className={cn("space-y-6", groups.length > 0 && "pt-2")}>
{ungrouped.map((element) => (
<div
key={element.name}
className={cn(
groups.length > 0 && !isObjectLikeField(element) && "px-4",
)}
>
{element.content}
</div>
))}
</div>
)}
</div>
);
}; };
// Root level renders children directly // Root level renders children directly
@ -455,7 +456,7 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
<CollapsibleTrigger asChild> <CollapsibleTrigger asChild>
<CardHeader className="cursor-pointer p-4 transition-colors hover:bg-muted/50"> <CardHeader className="cursor-pointer p-4 transition-colors hover:bg-muted/50">
<div className="flex items-center justify-between"> <div className="flex items-center justify-between">
<div className="min-w-0 pr-3"> <div>
<CardTitle <CardTitle
className={cn( className={cn(
"flex items-center text-sm", "flex items-center text-sm",
@ -474,9 +475,9 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
)} )}
</div> </div>
{isOpen ? ( {isOpen ? (
<LuChevronDown className="h-4 w-4 shrink-0" /> <LuChevronDown className="h-4 w-4" />
) : ( ) : (
<LuChevronRight className="h-4 w-4 shrink-0" /> <LuChevronRight className="h-4 w-4" />
)} )}
</div> </div>
</CardHeader> </CardHeader>

View File

@ -1,109 +0,0 @@
import type { WidgetProps } from "@rjsf/utils";
import { useMemo } from "react";
import { useTranslation } from "react-i18next";
import { Switch } from "@/components/ui/switch";
import type { ConfigFormContext } from "@/types/configForm";
const GENAI_ROLES = ["embeddings", "vision", "tools"] as const;
function normalizeValue(value: unknown): string[] {
if (Array.isArray(value)) {
return value.filter((item): item is string => typeof item === "string");
}
if (typeof value === "string" && value.trim()) {
return [value.trim()];
}
return [];
}
function getProviderKey(widgetId: string): string | undefined {
const prefix = "root_";
const suffix = "_roles";
if (!widgetId.startsWith(prefix) || !widgetId.endsWith(suffix)) {
return undefined;
}
return widgetId.slice(prefix.length, -suffix.length) || undefined;
}
export function GenAIRolesWidget(props: WidgetProps) {
const { id, value, disabled, readonly, onChange, registry } = props;
const { t } = useTranslation(["views/settings"]);
const formContext = registry?.formContext as ConfigFormContext | undefined;
const selectedRoles = useMemo(() => normalizeValue(value), [value]);
const providerKey = useMemo(() => getProviderKey(id), [id]);
// Compute occupied roles directly from formData. The computation is
// trivially cheap (iterate providers × 3 roles max) so we skip an
// intermediate memoization layer whose formData dependency would
// never produce a cache hit (new object reference on every change).
const occupiedRoles = useMemo(() => {
const occupied = new Set<string>();
const fd = formContext?.formData;
if (!fd || typeof fd !== "object") return occupied;
for (const [provider, config] of Object.entries(
fd as Record<string, unknown>,
)) {
if (provider === providerKey) continue;
if (!config || typeof config !== "object" || Array.isArray(config))
continue;
for (const role of normalizeValue(
(config as Record<string, unknown>).roles,
)) {
occupied.add(role);
}
}
return occupied;
}, [formContext?.formData, providerKey]);
const toggleRole = (role: string, enabled: boolean) => {
if (enabled) {
if (!selectedRoles.includes(role)) {
onChange([...selectedRoles, role]);
}
return;
}
onChange(selectedRoles.filter((item) => item !== role));
};
return (
<div className="rounded-lg border border-secondary-highlight bg-background_alt p-2 pr-0 md:max-w-md">
<div className="grid gap-2">
{GENAI_ROLES.map((role) => {
const checked = selectedRoles.includes(role);
const roleDisabled = !checked && occupiedRoles.has(role);
const label = t(`configForm.genaiRoles.options.${role}`, {
ns: "views/settings",
defaultValue: role,
});
return (
<div
key={role}
className="flex items-center justify-between rounded-md px-3 py-0"
>
<label htmlFor={`${id}-${role}`} className="text-sm">
{label}
</label>
<Switch
id={`${id}-${role}`}
checked={checked}
disabled={disabled || readonly || roleDisabled}
onCheckedChange={(enabled) => toggleRole(role, !!enabled)}
/>
</div>
);
})}
</div>
</div>
);
}

View File

@ -1,159 +0,0 @@
// Combobox widget for semantic_search.model field.
// Shows built-in model enum values and GenAI providers with the embeddings role.
import { useState, useMemo } from "react";
import type { WidgetProps } from "@rjsf/utils";
import { useTranslation } from "react-i18next";
import { Check, ChevronsUpDown } from "lucide-react";
import { cn } from "@/lib/utils";
import { Button } from "@/components/ui/button";
import {
Command,
CommandGroup,
CommandItem,
CommandList,
} from "@/components/ui/command";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import type { ConfigFormContext } from "@/types/configForm";
import { getSizedFieldClassName } from "../utils";
interface ProviderOption {
value: string;
label: string;
}
export function SemanticSearchModelWidget(props: WidgetProps) {
const { id, value, disabled, readonly, onChange, schema, registry, options } =
props;
const { t } = useTranslation(["views/settings"]);
const [open, setOpen] = useState(false);
const formContext = registry?.formContext as ConfigFormContext | undefined;
const fieldClassName = getSizedFieldClassName(options, "sm");
// Built-in model options from schema.examples (populated by transformer
// collapsing the anyOf enum+string union)
const builtInModels: ProviderOption[] = useMemo(() => {
const examples = (schema as Record<string, unknown>).examples;
if (!Array.isArray(examples)) return [];
return examples
.filter((v): v is string => typeof v === "string")
.map((v) => ({ value: v, label: v }));
}, [schema]);
// GenAI providers that have the "embeddings" role
const embeddingsProviders: ProviderOption[] = useMemo(() => {
const genai = (
formContext?.fullConfig as Record<string, unknown> | undefined
)?.genai;
if (!genai || typeof genai !== "object" || Array.isArray(genai)) return [];
const providers: ProviderOption[] = [];
for (const [key, config] of Object.entries(
genai as Record<string, unknown>,
)) {
if (!config || typeof config !== "object" || Array.isArray(config))
continue;
const roles = (config as Record<string, unknown>).roles;
if (Array.isArray(roles) && roles.includes("embeddings")) {
providers.push({ value: key, label: key });
}
}
return providers;
}, [formContext?.fullConfig]);
const currentLabel =
builtInModels.find((m) => m.value === value)?.label ??
embeddingsProviders.find((p) => p.value === value)?.label ??
(typeof value === "string" && value ? value : undefined);
return (
<Popover open={open} onOpenChange={setOpen}>
<PopoverTrigger asChild>
<Button
id={id}
type="button"
variant="outline"
role="combobox"
aria-expanded={open}
disabled={disabled || readonly}
className={cn(
"justify-between font-normal",
!currentLabel && "text-muted-foreground",
fieldClassName,
)}
>
{currentLabel ??
t("configForm.semanticSearchModel.placeholder", {
ns: "views/settings",
defaultValue: "Select model…",
})}
<ChevronsUpDown className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</Button>
</PopoverTrigger>
<PopoverContent className="w-[--radix-popover-trigger-width] p-0">
<Command>
<CommandList>
{builtInModels.length > 0 && (
<CommandGroup
heading={t("configForm.semanticSearchModel.builtIn", {
ns: "views/settings",
defaultValue: "Built-in Models",
})}
>
{builtInModels.map((model) => (
<CommandItem
key={model.value}
value={model.value}
onSelect={() => {
onChange(model.value);
setOpen(false);
}}
>
<Check
className={cn(
"mr-2 h-4 w-4",
value === model.value ? "opacity-100" : "opacity-0",
)}
/>
{model.label}
</CommandItem>
))}
</CommandGroup>
)}
{embeddingsProviders.length > 0 && (
<CommandGroup
heading={t("configForm.semanticSearchModel.genaiProviders", {
ns: "views/settings",
defaultValue: "GenAI Providers",
})}
>
{embeddingsProviders.map((provider) => (
<CommandItem
key={provider.value}
value={provider.value}
onSelect={() => {
onChange(provider.value);
setOpen(false);
}}
>
<Check
className={cn(
"mr-2 h-4 w-4",
value === provider.value ? "opacity-100" : "opacity-0",
)}
/>
{provider.label}
</CommandItem>
))}
</CommandGroup>
)}
</CommandList>
</Command>
</PopoverContent>
</Popover>
);
}

View File

@ -98,8 +98,8 @@ function normalizeNullableSchema(schema: RJSFSchema): RJSFSchema {
: ["null"]; : ["null"];
const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj; const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj;
const merged: Record<string, unknown> = { const merged: Record<string, unknown> = {
...normalizedNonNullObj,
...rest, ...rest,
...normalizedNonNullObj,
type: mergedType, type: mergedType,
}; };
// When unwrapping a nullable enum, add null to the enum list so // When unwrapping a nullable enum, add null to the enum list so
@ -110,39 +110,6 @@ function normalizeNullableSchema(schema: RJSFSchema): RJSFSchema {
return merged as RJSFSchema; return merged as RJSFSchema;
} }
// Handle anyOf where a plain string branch subsumes a string-enum branch
// (e.g. Union[StrEnum, str] or Union[StrEnum, str, None]).
// Collapse to a single string type with enum values preserved as `examples`.
const stringBranches = anyOf.filter(
(item) =>
isSchemaObject(item) &&
(item as Record<string, unknown>).type === "string",
);
const enumBranch = stringBranches.find((item) =>
Array.isArray((item as Record<string, unknown>).enum),
);
const plainStringBranch = stringBranches.find(
(item) => !Array.isArray((item as Record<string, unknown>).enum),
);
if (
enumBranch &&
plainStringBranch &&
anyOf.length === stringBranches.length + (hasNull ? 1 : 0)
) {
const enumValues = (enumBranch as Record<string, unknown>).enum as
| unknown[]
| undefined;
const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj;
return {
...rest,
type: hasNull ? ["string", "null"] : "string",
...(enumValues && enumValues.length > 0
? { examples: enumValues }
: {}),
} as RJSFSchema;
}
return { return {
...schemaObj, ...schemaObj,
anyOf: anyOf anyOf: anyOf
@ -175,8 +142,8 @@ function normalizeNullableSchema(schema: RJSFSchema): RJSFSchema {
: ["null"]; : ["null"];
const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj; const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj;
const merged: Record<string, unknown> = { const merged: Record<string, unknown> = {
...normalizedNonNullObj,
...rest, ...rest,
...normalizedNonNullObj,
type: mergedType, type: mergedType,
}; };
// When unwrapping a nullable oneOf enum, add null to the enum list. // When unwrapping a nullable oneOf enum, add null to the enum list.

View File

@ -24,8 +24,8 @@ export const EmbeddingThreshold = {
} as Threshold; } as Threshold;
export const GenAIThreshold = { export const GenAIThreshold = {
warning: 60, warning: 30000,
error: 120, error: 60000,
} as Threshold; } as Threshold;
export const DetectorTempThreshold = { export const DetectorTempThreshold = {

View File

@ -372,25 +372,24 @@ export default function MotionSearchROICanvas({
/> />
)} )}
{/* Vertex points (only shown in interactive/dialog mode) */} {/* Vertex points */}
{isInteractive && {scaledPoints.map((point, index) => (
scaledPoints.map((point, index) => ( <Circle
<Circle key={index}
key={index} name={`point-${index}`}
name={`point-${index}`} x={point[0]}
x={point[0]} y={point[1]}
y={point[1]} radius={vertexRadius}
radius={vertexRadius} fill={polygonColorString}
fill={polygonColorString} stroke="white"
stroke="white" strokeWidth={2}
strokeWidth={2} draggable={!isDrawing && isInteractive}
draggable={!isDrawing} onDragMove={(e) => handlePointDragMove(e, index)}
onDragMove={(e) => handlePointDragMove(e, index)} onMouseOver={(e) => handleMouseOverPoint(e, index)}
onMouseOver={(e) => handleMouseOverPoint(e, index)} onMouseOut={(e) => handleMouseOutPoint(e, index)}
onMouseOut={(e) => handleMouseOutPoint(e, index)} onContextMenu={(e) => handleContextMenu(e, index)}
onContextMenu={(e) => handleContextMenu(e, index)} />
/> ))}
))}
</Layer> </Layer>
</Stage> </Stage>
)} )}

View File

@ -385,7 +385,7 @@ export default function ProfilesView({
{/* Active Profile + Add Profile bar */} {/* Active Profile + Add Profile bar */}
{(hasProfiles || profilesUIEnabled) && ( {(hasProfiles || profilesUIEnabled) && (
<div className="my-4 flex flex-col gap-3 rounded-lg border border-border/70 bg-card/30 p-4 sm:flex-row sm:items-center sm:justify-between"> <div className="my-4 flex items-center justify-between rounded-lg border border-border/70 bg-card/30 p-4">
{hasProfiles && ( {hasProfiles && (
<div className="flex items-center gap-3"> <div className="flex items-center gap-3">
<span className="text-sm font-semibold text-primary-variant"> <span className="text-sm font-semibold text-primary-variant">
@ -470,12 +470,12 @@ export default function ProfilesView({
)} )}
> >
<CollapsibleTrigger asChild> <CollapsibleTrigger asChild>
<div className="flex cursor-pointer flex-wrap items-center gap-y-2 px-4 py-3 hover:bg-secondary/30"> <div className="flex cursor-pointer items-center justify-between px-4 py-3 hover:bg-secondary/30">
<div className="flex min-w-0 items-center gap-3"> <div className="flex items-center gap-3">
{isExpanded ? ( {isExpanded ? (
<LuChevronDown className="size-4 shrink-0 text-muted-foreground" /> <LuChevronDown className="size-4 text-muted-foreground" />
) : ( ) : (
<LuChevronRight className="size-4 shrink-0 text-muted-foreground" /> <LuChevronRight className="size-4 text-muted-foreground" />
)} )}
<span <span
className={cn( className={cn(
@ -483,13 +483,13 @@ export default function ProfilesView({
color.dot, color.dot,
)} )}
/> />
<span className="truncate font-medium"> <span className="font-medium">
{profileFriendlyNames?.get(profile) ?? profile} {profileFriendlyNames?.get(profile) ?? profile}
</span> </span>
<Button <Button
variant="ghost" variant="ghost"
size="icon" size="icon"
className="size-6 shrink-0 text-muted-foreground hover:text-primary" className="size-6 text-muted-foreground hover:text-primary"
onClick={(e) => { onClick={(e) => {
e.stopPropagation(); e.stopPropagation();
setRenameProfile(profile); setRenameProfile(profile);
@ -500,8 +500,6 @@ export default function ProfilesView({
> >
<Pencil className="size-3" /> <Pencil className="size-3" />
</Button> </Button>
</div>
<div className="ml-auto flex items-center gap-3">
{isActive && ( {isActive && (
<Badge <Badge
variant="secondary" variant="secondary"
@ -510,6 +508,8 @@ export default function ProfilesView({
{t("profiles.active", { ns: "views/settings" })} {t("profiles.active", { ns: "views/settings" })}
</Badge> </Badge>
)} )}
</div>
<div className="flex items-center gap-3">
<span className="text-sm text-muted-foreground"> <span className="text-sm text-muted-foreground">
{cameras.length > 0 {cameras.length > 0
? t("profiles.cameraCount", { ? t("profiles.cameraCount", {
@ -523,7 +523,7 @@ export default function ProfilesView({
<Button <Button
variant="ghost" variant="ghost"
size="icon" size="icon"
className="size-7 shrink-0 text-muted-foreground hover:text-destructive" className="size-7 text-muted-foreground hover:text-destructive"
disabled={deleting && deleteProfile === profile} disabled={deleting && deleteProfile === profile}
onClick={(e) => { onClick={(e) => {
e.stopPropagation(); e.stopPropagation();

View File

@ -131,35 +131,34 @@ export function SingleSectionPage({
return ( return (
<div className="flex size-full flex-col lg:pr-2"> <div className="flex size-full flex-col lg:pr-2">
<div className="mb-5 flex flex-col gap-2"> <div className="mb-5 flex items-center justify-between gap-4">
<div className="flex items-center justify-between gap-4"> <div className="flex flex-col">
<div className="flex flex-col"> <Heading as="h4">
<Heading as="h4"> {t(`${sectionKey}.label`, { ns: sectionNamespace })}
{t(`${sectionKey}.label`, { ns: sectionNamespace })} </Heading>
</Heading> {i18n.exists(`${sectionKey}.description`, {
{i18n.exists(`${sectionKey}.description`, { ns: sectionNamespace,
ns: sectionNamespace, }) && (
}) && ( <div className="my-1 text-sm text-muted-foreground">
<div className="my-1 text-sm text-muted-foreground"> {t(`${sectionKey}.description`, { ns: sectionNamespace })}
{t(`${sectionKey}.description`, { ns: sectionNamespace })} </div>
</div> )}
)} {sectionDocsUrl && (
{sectionDocsUrl && ( <div className="flex items-center text-sm text-primary-variant">
<div className="flex items-center text-sm text-primary-variant"> <Link
<Link to={sectionDocsUrl}
to={sectionDocsUrl} target="_blank"
target="_blank" rel="noopener noreferrer"
rel="noopener noreferrer" className="inline"
className="inline" >
> {t("readTheDocumentation", { ns: "common" })}
{t("readTheDocumentation", { ns: "common" })} <LuExternalLink className="ml-2 inline-flex size-3" />
<LuExternalLink className="ml-2 inline-flex size-3" /> </Link>
</Link> </div>
</div> )}
)} </div>
</div> <div className="flex flex-col items-end gap-2 md:flex-row md:items-center">
{/* Desktop: badge inline next to title */} <div className="flex flex-wrap items-center justify-end gap-2">
<div className="hidden shrink-0 sm:flex sm:flex-wrap sm:items-center sm:gap-2">
{level === "camera" && {level === "camera" &&
showOverrideIndicator && showOverrideIndicator &&
sectionStatus.isOverridden && ( sectionStatus.isOverridden && (
@ -212,40 +211,6 @@ export function SingleSectionPage({
)} )}
</div> </div>
</div> </div>
{/* Mobile: badge below title/description */}
<div className="flex flex-wrap items-center gap-2 sm:hidden">
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (
<Badge
variant="secondary"
className={cn(
"cursor-default border-2 text-center text-xs text-primary-variant",
sectionStatus.overrideSource === "profile" && profileColor
? profileColor.border
: "border-selected",
)}
>
{sectionStatus.overrideSource === "profile"
? t("button.overriddenBaseConfig", {
ns: "views/settings",
defaultValue: "Overridden (Base Config)",
})
: t("button.overriddenGlobal", {
ns: "views/settings",
defaultValue: "Overridden (Global)",
})}
</Badge>
)}
{sectionStatus.hasChanges && (
<Badge
variant="secondary"
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
>
{t("modified", { ns: "common", defaultValue: "Modified" })}
</Badge>
)}
</div>
</div> </div>
<ConfigSectionTemplate <ConfigSectionTemplate
sectionKey={sectionKey} sectionKey={sectionKey}

View File

@ -107,10 +107,7 @@ export default function EnrichmentMetrics({
}; };
} }
series[key].data.push({ series[key].data.push({ x: statsIdx + 1, y: stat });
x: statsIdx + 1,
y: rawKey.includes("description_speed") ? stat / 1000 : stat,
});
}); });
}); });
@ -118,7 +115,6 @@ export default function EnrichmentMetrics({
const grouped: { const grouped: {
[category: string]: { [category: string]: {
categoryName: string; categoryName: string;
unit: string;
speedSeries?: { speedSeries?: {
name: string; name: string;
metrics: Threshold; metrics: Threshold;
@ -158,7 +154,6 @@ export default function EnrichmentMetrics({
if (!(categoryKey in grouped)) { if (!(categoryKey in grouped)) {
grouped[categoryKey] = { grouped[categoryKey] = {
categoryName, categoryName,
unit: categoryKey.includes("description") ? "s" : "ms",
speedSeries: undefined, speedSeries: undefined,
eventsSeries: undefined, eventsSeries: undefined,
}; };
@ -201,7 +196,7 @@ export default function EnrichmentMetrics({
key={`${group.categoryName}-speed`} key={`${group.categoryName}-speed`}
graphId={`${group.categoryName}-inference`} graphId={`${group.categoryName}-inference`}
name={t("enrichments.averageInf")} name={t("enrichments.averageInf")}
unit={group.unit} unit="ms"
threshold={group.speedSeries.metrics} threshold={group.speedSeries.metrics}
updateTimes={updateTimes} updateTimes={updateTimes}
data={[group.speedSeries]} data={[group.speedSeries]}