Merge branch 'blakeblackshear:dev' into dev

This commit is contained in:
ibs0d 2026-03-21 12:42:30 +11:00 committed by GitHub
commit 1e10a2f746
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
60 changed files with 3826 additions and 726 deletions

17
.vscode/launch.json vendored
View File

@ -6,6 +6,23 @@
"type": "debugpy",
"request": "launch",
"module": "frigate"
},
{
"type": "editor-browser",
"request": "launch",
"name": "Vite: Launch in integrated browser",
"url": "http://localhost:5173"
},
{
"type": "editor-browser",
"request": "launch",
"name": "Nginx: Launch in integrated browser",
"url": "http://localhost:5000"
},
{
"type": "editor-browser",
"request": "attach",
"name": "Attach to integrated browser"
}
]
}

View File

@ -122,6 +122,17 @@ docs/ # Documentation site
migrations/ # Database migrations
```
## Translations
Frigate uses [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) for managing language translations. If you'd like to help translate Frigate into your language:
1. Visit the [Frigate project on Weblate](https://hosted.weblate.org/projects/frigate-nvr/).
2. Create an account or log in.
3. Browse the available languages and select the one you'd like to contribute to, or request a new language.
4. Translate strings directly in the Weblate interface — no code changes or pull requests needed.
Translation contributions through Weblate are automatically synced to the repository. Please do not submit pull requests for translation changes — use Weblate instead so that translations are properly tracked and coordinated.
## Resources
- [Documentation](https://docs.frigate.video)

View File

@ -52,7 +52,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
rm -rf ffmpeg.tar.xz
mkdir -p /usr/lib/ffmpeg/7.0
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2026-03-19-13-03/ffmpeg-n7.1.3-43-g5a1f107b4c-linux64-gpl-7.1.tar.xz"
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
rm -rf ffmpeg.tar.xz
fi
@ -64,7 +64,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
rm -f ffmpeg.tar.xz
mkdir -p /usr/lib/ffmpeg/7.0
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2026-03-19-13-03/ffmpeg-n7.1.3-43-g5a1f107b4c-linuxarm64-gpl-7.1.tar.xz"
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
rm -f ffmpeg.tar.xz
fi

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,11 @@ from frigate.api.defs.response.chat_response import (
from frigate.api.defs.tags import Tags
from frigate.api.event import events
from frigate.genai.utils import build_assistant_message_for_conversation
from frigate.jobs.vlm_watch import (
get_vlm_watch_job,
start_vlm_watch_job,
stop_vlm_watch_job,
)
logger = logging.getLogger(__name__)
@ -82,6 +87,16 @@ class ToolExecuteRequest(BaseModel):
arguments: Dict[str, Any]
class VLMMonitorRequest(BaseModel):
"""Request model for starting a VLM watch job."""
camera: str
condition: str
max_duration_minutes: int = 60
labels: List[str] = []
zones: List[str] = []
def get_tool_definitions() -> List[Dict[str, Any]]:
"""
Get OpenAI-compatible tool definitions for Frigate.
@ -95,9 +110,11 @@ def get_tool_definitions() -> List[Dict[str, Any]]:
"function": {
"name": "search_objects",
"description": (
"Search for detected objects in Frigate by camera, object label, time range, "
"zones, and other filters. Use this to answer questions about when "
"objects were detected, what objects appeared, or to find specific object detections. "
"Search the historical record of detected objects in Frigate. "
"Use this ONLY for questions about the PAST — e.g. 'did anyone come by today?', "
"'when was the last car?', 'show me detections from yesterday'. "
"Do NOT use this for monitoring or alerting requests about future events — "
"use start_camera_watch instead for those. "
"An 'object' in Frigate represents a tracked detection (e.g., a person, package, car). "
"When the user asks about a specific name (person, delivery company, animal, etc.), "
"filter by sub_label only and do not set label."
@ -201,10 +218,9 @@ def get_tool_definitions() -> List[Dict[str, Any]]:
"function": {
"name": "get_live_context",
"description": (
"Get the current detection information for a camera: objects being tracked, "
"Get the current live image and detection information for a camera: objects being tracked, "
"zones, timestamps. Use this to understand what is visible in the live view. "
"Call this when the user has included a live image (via include_live_image) or "
"when answering questions about what is happening right now on a specific camera."
"Call this when answering questions about what is happening right now on a specific camera."
),
"parameters": {
"type": "object",
@ -218,6 +234,65 @@ def get_tool_definitions() -> List[Dict[str, Any]]:
},
},
},
{
"type": "function",
"function": {
"name": "start_camera_watch",
"description": (
"Start a continuous VLM watch job that monitors a camera and sends a notification "
"when a specified condition is met. Use this when the user wants to be alerted about "
"a future event, e.g. 'tell me when guests arrive' or 'notify me when the package is picked up'. "
"Only one watch job can run at a time. Returns a job ID."
),
"parameters": {
"type": "object",
"properties": {
"camera": {
"type": "string",
"description": "Camera ID to monitor.",
},
"condition": {
"type": "string",
"description": (
"Natural-language description of the condition to watch for, "
"e.g. 'a person arrives at the front door'."
),
},
"max_duration_minutes": {
"type": "integer",
"description": "Maximum time to watch before giving up (minutes, default 60).",
"default": 60,
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "Object labels that should trigger a VLM check (e.g. ['person', 'car']). If omitted, any detection on the camera triggers a check.",
},
"zones": {
"type": "array",
"items": {"type": "string"},
"description": "Zone names to filter by. If specified, only detections in these zones trigger a VLM check.",
},
},
"required": ["camera", "condition"],
},
},
},
{
"type": "function",
"function": {
"name": "stop_camera_watch",
"description": (
"Cancel the currently running VLM watch job. Use this when the user wants to "
"stop a previously started watch, e.g. 'stop watching the front door'."
),
"parameters": {
"type": "object",
"properties": {},
"required": [],
},
},
},
]
@ -384,12 +459,54 @@ async def _execute_get_live_context(
"stationary": obj_dict.get("stationary", False),
}
return {
result: Dict[str, Any] = {
"camera": camera,
"timestamp": frame_time,
"detections": list(tracked_objects_dict.values()),
}
# Grab live frame and handle based on provider configuration
image_url = await _get_live_frame_image_url(request, camera, allowed_cameras)
if image_url:
genai_manager = request.app.genai_manager
if genai_manager.tool_client is genai_manager.vision_client:
# Same provider handles both roles — pass image URL so it can
# be injected as a user message (images can't be in tool results)
result["_image_url"] = image_url
elif genai_manager.vision_client is not None:
# Separate vision provider — have it describe the image,
# providing detection context so it knows what to focus on
frame_bytes = _decode_data_url(image_url)
if frame_bytes:
detections = result.get("detections", [])
if detections:
detection_lines = []
for d in detections:
parts = [d.get("label", "unknown")]
if d.get("sub_label"):
parts.append(f"({d['sub_label']})")
if d.get("zones"):
parts.append(f"in {', '.join(d['zones'])}")
detection_lines.append(" ".join(parts))
context = (
"The following objects are currently being tracked: "
+ "; ".join(detection_lines)
+ "."
)
else:
context = "No objects are currently being tracked."
description = genai_manager.vision_client._send(
f"Describe what you see in this security camera image. "
f"{context} Focus on the scene, any visible activity, "
f"and details about the tracked objects.",
[frame_bytes],
)
if description:
result["image_description"] = description
return result
except Exception as e:
logger.error(f"Error executing get_live_context: {e}", exc_info=True)
return {
@ -405,8 +522,8 @@ async def _get_live_frame_image_url(
"""
Fetch the current live frame for a camera as a base64 data URL.
Returns None if the frame cannot be retrieved. Used when include_live_image
is set to attach the image to the first user message.
Returns None if the frame cannot be retrieved. Used by get_live_context
to attach the live image to the conversation.
"""
if (
camera not in allowed_cameras
@ -421,12 +538,12 @@ async def _get_live_frame_image_url(
if frame is None:
return None
height, width = frame.shape[:2]
max_dimension = 1024
if height > max_dimension or width > max_dimension:
scale = max_dimension / max(height, width)
target_height = 480
if height > target_height:
scale = target_height / height
frame = cv2.resize(
frame,
(int(width * scale), int(height * scale)),
(int(width * scale), target_height),
interpolation=cv2.INTER_AREA,
)
_, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
@ -437,6 +554,17 @@ async def _get_live_frame_image_url(
return None
def _decode_data_url(data_url: str) -> Optional[bytes]:
"""Decode a base64 data URL to raw bytes."""
try:
# Format: data:image/jpeg;base64,<data>
_, encoded = data_url.split(",", 1)
return base64.b64decode(encoded)
except (ValueError, Exception) as e:
logger.debug("Failed to decode data URL: %s", e)
return None
async def _execute_set_camera_state(
request: Request,
arguments: Dict[str, Any],
@ -513,26 +641,91 @@ async def _execute_tool_internal(
)
return {"error": "Camera parameter is required"}
return await _execute_get_live_context(request, camera, allowed_cameras)
elif tool_name == "start_camera_watch":
return await _execute_start_camera_watch(request, arguments)
elif tool_name == "stop_camera_watch":
return _execute_stop_camera_watch()
else:
logger.error(
"Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context. "
"Arguments received: %s",
"Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context, "
"start_camera_watch, stop_camera_watch. Arguments received: %s",
tool_name,
json.dumps(arguments),
)
return {"error": f"Unknown tool: {tool_name}"}
async def _execute_start_camera_watch(
request: Request,
arguments: Dict[str, Any],
) -> Dict[str, Any]:
camera = arguments.get("camera", "").strip()
condition = arguments.get("condition", "").strip()
max_duration_minutes = int(arguments.get("max_duration_minutes", 60))
labels = arguments.get("labels") or []
zones = arguments.get("zones") or []
if not camera or not condition:
return {"error": "camera and condition are required."}
config = request.app.frigate_config
if camera not in config.cameras:
return {"error": f"Camera '{camera}' not found."}
genai_manager = request.app.genai_manager
vision_client = genai_manager.vision_client or genai_manager.tool_client
if vision_client is None:
return {"error": "No vision/GenAI provider configured."}
try:
job_id = start_vlm_watch_job(
camera=camera,
condition=condition,
max_duration_minutes=max_duration_minutes,
config=config,
frame_processor=request.app.detected_frames_processor,
genai_manager=genai_manager,
dispatcher=request.app.dispatcher,
labels=labels,
zones=zones,
)
except RuntimeError as e:
logger.error("Failed to start VLM watch job: %s", e, exc_info=True)
return {"error": "Failed to start VLM watch job."}
return {
"success": True,
"job_id": job_id,
"message": (
f"Now watching '{camera}' for: {condition}. "
f"You'll receive a notification when the condition is met (timeout: {max_duration_minutes} min)."
),
}
def _execute_stop_camera_watch() -> Dict[str, Any]:
cancelled = stop_vlm_watch_job()
if cancelled:
return {"success": True, "message": "Watch job cancelled."}
return {"success": False, "message": "No active watch job to cancel."}
async def _execute_pending_tools(
pending_tool_calls: List[Dict[str, Any]],
request: Request,
allowed_cameras: List[str],
) -> tuple[List[ToolCall], List[Dict[str, Any]]]:
) -> tuple[List[ToolCall], List[Dict[str, Any]], List[Dict[str, Any]]]:
"""
Execute a list of tool calls; return (ToolCall list for API response, tool result dicts for conversation).
Execute a list of tool calls.
Returns:
(ToolCall list for API response,
tool result dicts for conversation,
extra messages to inject after tool results e.g. user messages with images)
"""
tool_calls_out: List[ToolCall] = []
tool_results: List[Dict[str, Any]] = []
extra_messages: List[Dict[str, Any]] = []
for tool_call in pending_tool_calls:
tool_name = tool_call["name"]
tool_args = tool_call.get("arguments") or {}
@ -569,6 +762,27 @@ async def _execute_pending_tools(
for evt in tool_result
if isinstance(evt, dict)
]
# Extract _image_url from get_live_context results — images can
# only be sent in user messages, not tool results
if isinstance(tool_result, dict) and "_image_url" in tool_result:
image_url = tool_result.pop("_image_url")
extra_messages.append(
{
"role": "user",
"content": [
{
"type": "text",
"text": f"Here is the current live image from camera '{tool_result.get('camera', 'unknown')}'.",
},
{
"type": "image_url",
"image_url": {"url": image_url},
},
],
}
)
result_content = (
json.dumps(tool_result)
if isinstance(tool_result, (dict, list))
@ -604,7 +818,7 @@ async def _execute_pending_tools(
"content": error_content,
}
)
return (tool_calls_out, tool_results)
return (tool_calls_out, tool_results, extra_messages)
@router.post(
@ -660,7 +874,13 @@ async def chat_completion(
if camera_config.friendly_name
else camera_id.replace("_", " ").title()
)
cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
zone_names = list(camera_config.zones.keys())
if zone_names:
cameras_info.append(
f" - {friendly_name} (ID: {camera_id}, zones: {', '.join(zone_names)})"
)
else:
cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
cameras_section = ""
if cameras_info:
@ -670,14 +890,6 @@ async def chat_completion(
+ "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls."
)
live_image_note = ""
if body.include_live_image:
live_image_note = (
f"\n\nThe first user message includes a live image from camera "
f"'{body.include_live_image}'. Use get_live_context for that camera to get "
"current detection details (objects, zones) to aid in understanding the image."
)
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
Current server local date and time: {current_date_str} at {current_time_str}
@ -687,7 +899,7 @@ Do not start your response with phrases like "I will check...", "Let me see...",
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}"""
Always be accurate with time calculations based on the current date provided.{cameras_section}"""
conversation.append(
{
@ -696,7 +908,6 @@ Always be accurate with time calculations based on the current date provided.{ca
}
)
first_user_message_seen = False
for msg in body.messages:
msg_dict = {
"role": msg.role,
@ -707,21 +918,6 @@ Always be accurate with time calculations based on the current date provided.{ca
if msg.name:
msg_dict["name"] = msg.name
if (
msg.role == "user"
and not first_user_message_seen
and body.include_live_image
):
first_user_message_seen = True
image_url = await _get_live_frame_image_url(
request, body.include_live_image, allowed_cameras
)
if image_url:
msg_dict["content"] = [
{"type": "text", "text": msg.content},
{"type": "image_url", "image_url": {"url": image_url}},
]
conversation.append(msg_dict)
tool_iterations = 0
@ -779,11 +975,16 @@ Always be accurate with time calculations based on the current date provided.{ca
msg.get("content"), pending
)
)
executed_calls, tool_results = await _execute_pending_tools(
(
executed_calls,
tool_results,
extra_msgs,
) = await _execute_pending_tools(
pending, request, allowed_cameras
)
stream_tool_calls.extend(executed_calls)
conversation.extend(tool_results)
conversation.extend(extra_msgs)
yield (
json.dumps(
{
@ -890,11 +1091,12 @@ Always be accurate with time calculations based on the current date provided.{ca
f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): "
f"{len(pending_tool_calls)} tool(s) to execute"
)
executed_calls, tool_results = await _execute_pending_tools(
executed_calls, tool_results, extra_msgs = await _execute_pending_tools(
pending_tool_calls, request, allowed_cameras
)
tool_calls.extend(executed_calls)
conversation.extend(tool_results)
conversation.extend(extra_msgs)
logger.debug(
f"Added {len(tool_results)} tool result(s) to conversation. "
f"Continuing with next LLM call..."
@ -924,3 +1126,95 @@ Always be accurate with time calculations based on the current date provided.{ca
},
status_code=500,
)
# ---------------------------------------------------------------------------
# VLM Monitor endpoints
# ---------------------------------------------------------------------------
@router.post(
"/vlm/monitor",
dependencies=[Depends(allow_any_authenticated())],
summary="Start a VLM watch job",
description=(
"Start monitoring a camera with the vision provider. "
"The VLM analyzes live frames until the specified condition is met, "
"then sends a notification. Only one watch job can run at a time."
),
)
async def start_vlm_monitor(
request: Request,
body: VLMMonitorRequest,
) -> JSONResponse:
config = request.app.frigate_config
genai_manager = request.app.genai_manager
if body.camera not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera '{body.camera}' not found."},
status_code=404,
)
vision_client = genai_manager.vision_client or genai_manager.tool_client
if vision_client is None:
return JSONResponse(
content={
"success": False,
"message": "No vision/GenAI provider configured.",
},
status_code=400,
)
try:
job_id = start_vlm_watch_job(
camera=body.camera,
condition=body.condition,
max_duration_minutes=body.max_duration_minutes,
config=config,
frame_processor=request.app.detected_frames_processor,
genai_manager=genai_manager,
dispatcher=request.app.dispatcher,
labels=body.labels,
zones=body.zones,
)
except RuntimeError as e:
logger.error("Failed to start VLM watch job: %s", e, exc_info=True)
return JSONResponse(
content={"success": False, "message": "Failed to start VLM watch job."},
status_code=409,
)
return JSONResponse(
content={"success": True, "job_id": job_id},
status_code=201,
)
@router.get(
"/vlm/monitor",
dependencies=[Depends(allow_any_authenticated())],
summary="Get current VLM watch job",
description="Returns the current (or most recently completed) VLM watch job.",
)
async def get_vlm_monitor() -> JSONResponse:
job = get_vlm_watch_job()
if job is None:
return JSONResponse(content={"active": False}, status_code=200)
return JSONResponse(content={"active": True, **job.to_dict()}, status_code=200)
@router.delete(
"/vlm/monitor",
dependencies=[Depends(allow_any_authenticated())],
summary="Cancel the current VLM watch job",
description="Cancels the running watch job if one exists.",
)
async def cancel_vlm_monitor() -> JSONResponse:
cancelled = stop_vlm_watch_job()
if not cancelled:
return JSONResponse(
content={"success": False, "message": "No active watch job to cancel."},
status_code=404,
)
return JSONResponse(content={"success": True}, status_code=200)

View File

@ -32,13 +32,6 @@ class ChatCompletionRequest(BaseModel):
le=10,
description="Maximum number of tool call iterations (default: 5)",
)
include_live_image: Optional[str] = Field(
default=None,
description=(
"If set, the current live frame from this camera is attached to the first "
"user message as multimodal content. Use with get_live_context for detection info."
),
)
stream: bool = Field(
default=False,
description="If true, stream the final assistant response in the body as newline-delimited JSON.",

View File

@ -8,7 +8,7 @@ from multiprocessing import Queue
from multiprocessing.managers import DictProxy, SyncManager
from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path
from typing import Optional
from typing import Callable, Optional
import psutil
import uvicorn
@ -81,6 +81,7 @@ from frigate.timeline import TimelineProcessor
from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.builtin import empty_and_close_queue
from frigate.util.image import UntrackedSharedMemory
from frigate.util.process import FrigateProcess
from frigate.util.services import set_file_limit
from frigate.version import VERSION
from frigate.watchdog import FrigateWatchdog
@ -499,6 +500,47 @@ class FrigateApp:
def start_watchdog(self) -> None:
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
# (attribute on self, key in self.processes, factory)
specs: list[tuple[str, str, Callable[[], FrigateProcess]]] = [
(
"embedding_process",
"embeddings",
lambda: EmbeddingProcess(
self.config, self.embeddings_metrics, self.stop_event
),
),
(
"recording_process",
"recording",
lambda: RecordProcess(self.config, self.stop_event),
),
(
"review_segment_process",
"review_segment",
lambda: ReviewProcess(self.config, self.stop_event),
),
(
"output_processor",
"output",
lambda: OutputProcess(self.config, self.stop_event),
),
]
for attr, key, factory in specs:
if not hasattr(self, attr):
continue
def on_restart(
proc: FrigateProcess, _attr: str = attr, _key: str = key
) -> None:
setattr(self, _attr, proc)
self.processes[_key] = proc.pid or 0
self.frigate_watchdog.register(
key, getattr(self, attr), factory, on_restart
)
self.frigate_watchdog.start()
def init_auth(self) -> None:

View File

@ -210,6 +210,15 @@ class WebPushClient(Communicator):
logger.debug(f"Notifications for {camera} are currently suspended.")
return
self.send_trigger(decoded)
elif topic == "camera_monitoring":
decoded = json.loads(payload)
camera = decoded["camera"]
if not self.config.cameras[camera].notifications.enabled:
return
if self.is_camera_suspended(camera):
logger.debug(f"Notifications for {camera} are currently suspended.")
return
self.send_camera_monitoring(decoded)
elif topic == "notification_test":
if not self.config.notifications.enabled and not any(
cam.notifications.enabled for cam in self.config.cameras.values()
@ -477,6 +486,30 @@ class WebPushClient(Communicator):
self.cleanup_registrations()
def send_camera_monitoring(self, payload: dict[str, Any]) -> None:
camera: str = payload["camera"]
camera_name: str = getattr(
self.config.cameras[camera], "friendly_name", None
) or titlecase(camera.replace("_", " "))
self.check_registrations()
reasoning: str = payload.get("reasoning", "")
title = f"{camera_name}: Monitoring Alert"
message = (reasoning[:197] + "...") if len(reasoning) > 200 else reasoning
logger.debug(f"Sending camera monitoring push notification for {camera_name}")
for user in self.web_pushers:
self.send_push_notification(
user=user,
payload=payload,
title=title,
message=message,
)
self.cleanup_registrations()
def stop(self) -> None:
logger.info("Closing notification queue")
self.notification_thread.join()

View File

@ -49,8 +49,8 @@ class StationaryConfig(FrigateBaseModel):
class DetectConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Detection enabled",
description="Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run.",
title="Enable object detection",
description="Enable or disable object detection for all cameras; can be overridden per-camera.",
)
height: Optional[int] = Field(
default=None,

View File

@ -29,7 +29,7 @@ class RetainConfig(FrigateBaseModel):
class SnapshotsConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Snapshots enabled",
title="Enable snapshots",
description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.",
)
clean_copy: bool = Field(

View File

@ -444,7 +444,7 @@ class FrigateConfig(FrigateBaseModel):
# GenAI config (named provider configs: name -> GenAIConfig)
genai: Dict[str, GenAIConfig] = Field(
default_factory=dict,
title="Generative AI configuration (named providers).",
title="Generative AI configuration",
description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
)

View File

@ -4,7 +4,6 @@ import re
import urllib.request
from typing import Literal
import axengine as axe
from pydantic import ConfigDict
from frigate.const import MODEL_CACHE_DIR
@ -37,6 +36,12 @@ class Axengine(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: AxengineDetectorConfig):
try:
import axengine as axe
except ModuleNotFoundError:
raise ImportError("AXEngine is not installed.")
return
logger.info("__init__ axengine")
super().__init__(config)
self.height = config.model.height

View File

@ -120,10 +120,10 @@ PRESETS_HW_ACCEL_DECODE["preset-rk-h265"] = PRESETS_HW_ACCEL_DECODE[
PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=w={1}:h={2}:format=nv12,hwdownload,format=nv12,fps={0},format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=w={1}:h={2}:format=nv12,hwdownload,format=nv12,fps={0},format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12",
"preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder
FFMPEG_HWACCEL_RKMPP: "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
@ -242,15 +242,6 @@ def parse_preset_hardware_acceleration_scale(
else:
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
if (
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5" in scale
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
):
scale = scale.replace(
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
":format=nv12,hwdownload,format=nv12,format=yuv420p",
)
scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args)
return scale

405
frigate/jobs/vlm_watch.py Normal file
View File

@ -0,0 +1,405 @@
"""VLM watch job: continuously monitors a camera and notifies when a condition is met."""
import base64
import json
import logging
import re
import threading
import time
from dataclasses import asdict, dataclass, field
from datetime import datetime
from typing import Any, Optional
import cv2
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.const import UPDATE_JOB_STATE
from frigate.jobs.job import Job
from frigate.types import JobStatusTypesEnum
logger = logging.getLogger(__name__)
# Polling interval bounds (seconds)
_MIN_INTERVAL = 1
_MAX_INTERVAL = 300
# Max user/assistant turn pairs to keep in conversation history
_MAX_HISTORY = 10
@dataclass
class VLMWatchJob(Job):
"""Job state for a VLM watch monitor."""
job_type: str = "vlm_watch"
camera: str = ""
condition: str = ""
max_duration_minutes: int = 60
labels: list = field(default_factory=list)
zones: list = field(default_factory=list)
last_reasoning: str = ""
iteration_count: int = 0
def to_dict(self) -> dict[str, Any]:
return asdict(self)
class VLMWatchRunner(threading.Thread):
"""Background thread that polls a camera with the vision client until a condition is met."""
def __init__(
self,
job: VLMWatchJob,
config: FrigateConfig,
cancel_event: threading.Event,
frame_processor,
genai_manager,
dispatcher,
) -> None:
super().__init__(daemon=True, name=f"vlm_watch_{job.id}")
self.job = job
self.config = config
self.cancel_event = cancel_event
self.frame_processor = frame_processor
self.genai_manager = genai_manager
self.dispatcher = dispatcher
self.requestor = InterProcessRequestor()
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video.value)
self.conversation: list[dict[str, Any]] = []
def run(self) -> None:
self.job.status = JobStatusTypesEnum.running
self.job.start_time = time.time()
self._broadcast_status()
self.conversation = [{"role": "system", "content": self._build_system_prompt()}]
max_end_time = self.job.start_time + self.job.max_duration_minutes * 60
try:
while not self.cancel_event.is_set():
if time.time() > max_end_time:
logger.debug(
"VLM watch job %s timed out after %d minutes",
self.job.id,
self.job.max_duration_minutes,
)
self.job.status = JobStatusTypesEnum.failed
self.job.error_message = f"Monitor timed out after {self.job.max_duration_minutes} minutes"
break
next_run_in = self._run_iteration()
if self.job.status == JobStatusTypesEnum.success:
break
self._wait_for_trigger(next_run_in)
except Exception as e:
logger.exception("VLM watch job %s failed: %s", self.job.id, e)
self.job.status = JobStatusTypesEnum.failed
self.job.error_message = str(e)
finally:
if self.job.status == JobStatusTypesEnum.running:
self.job.status = JobStatusTypesEnum.cancelled
self.job.end_time = time.time()
self._broadcast_status()
try:
self.detection_subscriber.stop()
except Exception:
pass
try:
self.requestor.stop()
except Exception:
pass
def _run_iteration(self) -> float:
"""Run one VLM analysis iteration. Returns seconds until next run."""
vision_client = (
self.genai_manager.vision_client or self.genai_manager.tool_client
)
if vision_client is None:
logger.warning("VLM watch job %s: no vision client available", self.job.id)
return 30
frame = self.frame_processor.get_current_frame(self.job.camera, {})
if frame is None:
logger.debug(
"VLM watch job %s: frame unavailable for camera %s",
self.job.id,
self.job.camera,
)
self.job.last_reasoning = "Camera frame unavailable"
return 10
# Downscale frame to 480p max height
h, w = frame.shape[:2]
if h > 480:
scale = 480.0 / h
frame = cv2.resize(
frame, (int(w * scale), 480), interpolation=cv2.INTER_AREA
)
_, enc = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
b64 = base64.b64encode(enc.tobytes()).decode()
timestamp = datetime.now().strftime("%H:%M:%S")
self.conversation.append(
{
"role": "user",
"content": [
{"type": "text", "text": f"Frame captured at {timestamp}."},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{b64}"},
},
],
}
)
response = vision_client.chat_with_tools(
messages=self.conversation,
tools=None,
tool_choice=None,
)
response_str = response.get("content") or ""
if not response_str:
logger.warning(
"VLM watch job %s: empty response from vision client", self.job.id
)
# Remove the user message we just added so we don't leave a dangling turn
self.conversation.pop()
return 30
logger.debug("VLM watch job %s response: %s", self.job.id, response_str)
self.conversation.append({"role": "assistant", "content": response_str})
# Keep system prompt + last _MAX_HISTORY user/assistant pairs
max_msgs = 1 + _MAX_HISTORY * 2
if len(self.conversation) > max_msgs:
self.conversation = [self.conversation[0]] + self.conversation[
-(max_msgs - 1) :
]
try:
clean = re.sub(
r"\n?```$", "", re.sub(r"^```[a-zA-Z0-9]*\n?", "", response_str)
)
parsed = json.loads(clean)
condition_met = bool(parsed.get("condition_met", False))
next_run_in = max(
_MIN_INTERVAL,
min(_MAX_INTERVAL, int(parsed.get("next_run_in", 30))),
)
reasoning = str(parsed.get("reasoning", ""))
except (json.JSONDecodeError, ValueError, TypeError) as e:
logger.warning(
"VLM watch job %s: failed to parse VLM response: %s", self.job.id, e
)
return 30
self.job.last_reasoning = reasoning
self.job.iteration_count += 1
self._broadcast_status()
if condition_met:
logger.debug(
"VLM watch job %s: condition met on camera %s%s",
self.job.id,
self.job.camera,
reasoning,
)
self._send_notification(reasoning)
self.job.status = JobStatusTypesEnum.success
return 0
return next_run_in
def _wait_for_trigger(self, max_wait: float) -> None:
"""Wait up to max_wait seconds, returning early if a relevant detection fires on the target camera."""
deadline = time.time() + max_wait
while not self.cancel_event.is_set():
remaining = deadline - time.time()
if remaining <= 0:
break
topic, payload = self.detection_subscriber.check_for_update(
timeout=min(1.0, remaining)
)
if topic is None or payload is None:
continue
# payload = (camera, frame_name, frame_time, tracked_objects, motion_boxes, regions)
cam = payload[0]
tracked_objects = payload[3]
logger.debug(
"VLM watch job %s: detection event cam=%s (want %s), objects=%s",
self.job.id,
cam,
self.job.camera,
[
{"label": o.get("label"), "zones": o.get("current_zones")}
for o in (tracked_objects or [])
],
)
if cam != self.job.camera or not tracked_objects:
continue
if self._detection_matches_filters(tracked_objects):
logger.debug(
"VLM watch job %s: woken early by detection event on %s",
self.job.id,
self.job.camera,
)
break
def _detection_matches_filters(self, tracked_objects: list) -> bool:
"""Return True if any tracked object passes the label and zone filters."""
labels = self.job.labels
zones = self.job.zones
for obj in tracked_objects:
label_ok = not labels or obj.get("label") in labels
zone_ok = not zones or bool(set(obj.get("current_zones", [])) & set(zones))
if label_ok and zone_ok:
return True
return False
def _build_system_prompt(self) -> str:
focus_text = ""
if self.job.labels or self.job.zones:
parts = []
if self.job.labels:
parts.append(f"object types: {', '.join(self.job.labels)}")
if self.job.zones:
parts.append(f"zones: {', '.join(self.job.zones)}")
focus_text = f"\nFocus on {' and '.join(parts)}.\n"
return (
f'You are monitoring a security camera. Your task: determine when "{self.job.condition}" occurs.\n'
f"{focus_text}\n"
f"You will receive a sequence of frames over time. Use the conversation history to understand "
f"what is stationary vs. actively changing.\n\n"
f"For each frame respond with JSON only:\n"
f'{{"condition_met": <true/false>, "next_run_in": <integer seconds 1-300>, "reasoning": "<brief explanation>"}}\n\n'
f"Guidelines for next_run_in:\n"
f"- Scene is empty / nothing of interest visible: 60-300.\n"
f"- Relevant object(s) visible anywhere in frame (even outside the target zone): 3-10. "
f"They may be moving toward the zone.\n"
f"- Condition is actively forming (object approaching zone or threshold): 1-5.\n"
f"- Set condition_met to true only when you are confident the condition is currently met.\n"
f"- Keep reasoning to 1-2 sentences."
)
def _send_notification(self, reasoning: str) -> None:
"""Publish a camera_monitoring event so downstream handlers (web push, MQTT) can notify users."""
payload = {
"camera": self.job.camera,
"condition": self.job.condition,
"reasoning": reasoning,
"job_id": self.job.id,
}
if self.dispatcher:
try:
self.dispatcher.publish("camera_monitoring", json.dumps(payload))
except Exception as e:
logger.warning(
"VLM watch job %s: failed to publish alert: %s", self.job.id, e
)
def _broadcast_status(self) -> None:
try:
self.requestor.send_data(UPDATE_JOB_STATE, self.job.to_dict())
except Exception as e:
logger.warning(
"VLM watch job %s: failed to broadcast status: %s", self.job.id, e
)
# Module-level singleton (only one watch job at a time)
_current_job: Optional[VLMWatchJob] = None
_cancel_event: Optional[threading.Event] = None
_job_lock = threading.Lock()
def start_vlm_watch_job(
camera: str,
condition: str,
max_duration_minutes: int,
config: FrigateConfig,
frame_processor,
genai_manager,
dispatcher,
labels: list[str] | None = None,
zones: list[str] | None = None,
) -> str:
"""Start a new VLM watch job. Returns the job ID.
Raises RuntimeError if a job is already running.
"""
global _current_job, _cancel_event
with _job_lock:
if _current_job is not None and _current_job.status in (
JobStatusTypesEnum.queued,
JobStatusTypesEnum.running,
):
raise RuntimeError(
f"A VLM watch job is already running (id={_current_job.id}). "
"Cancel it before starting a new one."
)
job = VLMWatchJob(
camera=camera,
condition=condition,
max_duration_minutes=max_duration_minutes,
labels=labels or [],
zones=zones or [],
)
cancel_ev = threading.Event()
_current_job = job
_cancel_event = cancel_ev
runner = VLMWatchRunner(
job=job,
config=config,
cancel_event=cancel_ev,
frame_processor=frame_processor,
genai_manager=genai_manager,
dispatcher=dispatcher,
)
runner.start()
logger.debug(
"Started VLM watch job %s: camera=%s, condition=%r, max_duration=%dm",
job.id,
camera,
condition,
max_duration_minutes,
)
return job.id
def stop_vlm_watch_job() -> bool:
"""Cancel the current VLM watch job. Returns True if a job was cancelled."""
global _current_job, _cancel_event
with _job_lock:
if _current_job is None or _current_job.status not in (
JobStatusTypesEnum.queued,
JobStatusTypesEnum.running,
):
return False
if _cancel_event:
_cancel_event.set()
_current_job.status = JobStatusTypesEnum.cancelled
logger.debug("Cancelled VLM watch job %s", _current_job.id)
return True
def get_vlm_watch_job() -> Optional[VLMWatchJob]:
"""Return the current (or most recent) VLM watch job."""
return _current_job

View File

@ -73,9 +73,8 @@ class TestFfmpegPresets(unittest.TestCase):
assert "preset-nvidia-h264" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert (
"fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5"
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
assert "fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_default_ffmpeg_input_arg_preset(self):

View File

@ -2,19 +2,111 @@ import datetime
import logging
import threading
import time
from collections import deque
from dataclasses import dataclass, field
from multiprocessing.synchronize import Event as MpEvent
from typing import Callable
from frigate.object_detection.base import ObjectDetectProcess
from frigate.util.process import FrigateProcess
from frigate.util.services import restart_frigate
logger = logging.getLogger(__name__)
MAX_RESTARTS = 5
RESTART_WINDOW_S = 60
@dataclass
class MonitoredProcess:
"""A process monitored by the watchdog for automatic restart."""
name: str
process: FrigateProcess
factory: Callable[[], FrigateProcess]
on_restart: Callable[[FrigateProcess], None] | None = None
restart_timestamps: deque[float] = field(
default_factory=lambda: deque(maxlen=MAX_RESTARTS)
)
def is_restarting_too_fast(self, now: float) -> bool:
while (
self.restart_timestamps
and now - self.restart_timestamps[0] > RESTART_WINDOW_S
):
self.restart_timestamps.popleft()
return len(self.restart_timestamps) >= MAX_RESTARTS
class FrigateWatchdog(threading.Thread):
def __init__(self, detectors: dict[str, ObjectDetectProcess], stop_event: MpEvent):
def __init__(
self,
detectors: dict[str, ObjectDetectProcess],
stop_event: MpEvent,
):
super().__init__(name="frigate_watchdog")
self.detectors = detectors
self.stop_event = stop_event
self._monitored: list[MonitoredProcess] = []
def register(
self,
name: str,
process: FrigateProcess,
factory: Callable[[], FrigateProcess],
on_restart: Callable[[FrigateProcess], None] | None = None,
) -> None:
"""Register a FrigateProcess for monitoring and automatic restart."""
self._monitored.append(
MonitoredProcess(
name=name,
process=process,
factory=factory,
on_restart=on_restart,
)
)
def _check_process(self, entry: MonitoredProcess) -> None:
if entry.process.is_alive():
return
exitcode = entry.process.exitcode
if exitcode == 0:
logger.info("Process %s exited cleanly, not restarting", entry.name)
return
logger.warning(
"Process %s (PID %s) exited with code %s",
entry.name,
entry.process.pid,
exitcode,
)
now = datetime.datetime.now().timestamp()
if entry.is_restarting_too_fast(now):
logger.error(
"Process %s restarting too frequently (%d times in %ds), backing off",
entry.name,
MAX_RESTARTS,
RESTART_WINDOW_S,
)
return
try:
entry.process.close()
new_process = entry.factory()
new_process.start()
entry.process = new_process
entry.restart_timestamps.append(now)
if entry.on_restart:
entry.on_restart(new_process)
logger.info("Restarted %s (PID %s)", entry.name, new_process.pid)
except Exception:
logger.exception("Failed to restart %s", entry.name)
def run(self) -> None:
time.sleep(10)
@ -38,4 +130,7 @@ class FrigateWatchdog(threading.Thread):
logger.info("Detection appears to have stopped. Exiting Frigate...")
restart_frigate()
for entry in self._monitored:
self._check_process(entry)
logger.info("Exiting watchdog...")

View File

@ -518,6 +518,15 @@ def main():
sanitize_camera_descriptions(camera_translations)
# Profiles contain the same sections as the camera itself; only keep
# label and description to avoid duplicating every camera section.
if "profiles" in camera_translations:
camera_translations["profiles"] = {
k: v
for k, v in camera_translations["profiles"].items()
if k in ("label", "description")
}
with open(cameras_file, "w", encoding="utf-8") as f:
json.dump(camera_translations, f, indent=2, ensure_ascii=False)
f.write("\n")

View File

@ -79,8 +79,8 @@
"label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": {
"label": "Detection enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
"label": "Enable object detection",
"description": "Enable or disable object detection for this camera."
},
"height": {
"label": "Detect height",
@ -628,7 +628,7 @@
"label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects for this camera.",
"enabled": {
"label": "Snapshots enabled",
"label": "Enable snapshots",
"description": "Enable or disable saving snapshots for this camera."
},
"clean_copy": {
@ -860,6 +860,10 @@
"label": "Camera URL",
"description": "URL to visit the camera directly from system page"
},
"profiles": {
"label": "Profiles",
"description": "Named config profiles with partial overrides that can be activated at runtime."
},
"zones": {
"label": "Zones",
"description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",

View File

@ -1174,7 +1174,7 @@
}
},
"genai": {
"label": "Generative AI configuration (named providers).",
"label": "Generative AI configuration",
"description": "Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
"api_key": {
"label": "API key",
@ -1293,8 +1293,8 @@
"label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": {
"label": "Detection enabled",
"description": "Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run."
"label": "Enable object detection",
"description": "Enable or disable object detection for all cameras; can be overridden per-camera."
},
"height": {
"label": "Detect height",
@ -1778,7 +1778,7 @@
"label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.",
"enabled": {
"label": "Snapshots enabled",
"label": "Enable snapshots",
"description": "Enable or disable saving snapshots for all cameras; can be overridden per-camera."
},
"clean_copy": {
@ -2128,6 +2128,18 @@
"description": "Numeric order used to sort camera groups in the UI; larger numbers appear later."
}
},
"profiles": {
"label": "Profiles",
"description": "Named profile definitions with friendly names. Camera profiles must reference names defined here.",
"friendly_name": {
"label": "Friendly name",
"description": "Display name for this profile shown in the UI."
}
},
"active_profile": {
"label": "Active profile",
"description": "Currently active profile name. Runtime-only, not persisted in YAML."
},
"camera_mqtt": {
"label": "MQTT",
"description": "MQTT image publishing settings.",

View File

@ -1402,6 +1402,18 @@
"audio": "Audio"
}
},
"genaiRoles": {
"options": {
"embeddings": "Embedding",
"vision": "Vision",
"tools": "Tools"
}
},
"semanticSearchModel": {
"placeholder": "Select model…",
"builtIn": "Built-in Models",
"genaiProviders": "GenAI Providers"
},
"review": {
"title": "Review Settings"
},

View File

@ -13,7 +13,7 @@ const audio: SectionConfigOverrides = {
"num_threads",
],
fieldGroups: {
detection: ["enabled", "listen", "filters"],
detection: ["listen", "filters"],
sensitivity: ["min_volume", "max_not_heard"],
},
hiddenFields: ["enabled_in_config"],

View File

@ -18,7 +18,7 @@ const detect: SectionConfigOverrides = {
],
restartRequired: [],
fieldGroups: {
resolution: ["enabled", "width", "height", "fps"],
resolution: ["width", "height", "fps"],
tracking: ["min_initialized", "max_disappeared"],
},
hiddenFields: ["enabled_in_config"],

View File

@ -6,7 +6,7 @@ const faceRecognition: SectionConfigOverrides = {
restartRequired: [],
fieldOrder: ["enabled", "min_area"],
hiddenFields: [],
advancedFields: ["min_area"],
advancedFields: [],
overrideFields: ["enabled", "min_area"],
},
global: {

View File

@ -4,39 +4,50 @@ const genai: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/genai/config",
restartRequired: [
"provider",
"api_key",
"base_url",
"model",
"provider_options",
"runtime_options",
"*.provider",
"*.api_key",
"*.base_url",
"*.model",
"*.provider_options",
"*.runtime_options",
],
fieldOrder: [
"provider",
"api_key",
"base_url",
"model",
"provider_options",
"runtime_options",
],
advancedFields: ["base_url", "provider_options", "runtime_options"],
advancedFields: ["*.base_url", "*.provider_options", "*.runtime_options"],
hiddenFields: ["genai.enabled_in_config"],
uiSchema: {
api_key: {
"ui:options": { size: "md" },
"ui:options": { disableNestedCard: true },
"*": {
"ui:options": { disableNestedCard: true },
"ui:order": [
"provider",
"api_key",
"base_url",
"model",
"provider_options",
"runtime_options",
"*",
],
},
base_url: {
"*.roles": {
"ui:widget": "genaiRoles",
},
"*.api_key": {
"ui:options": { size: "lg" },
},
model: {
"ui:options": { size: "md" },
"*.base_url": {
"ui:options": { size: "lg" },
},
provider_options: {
"*.model": {
"ui:options": { size: "xs" },
},
"*.provider": {
"ui:options": { size: "xs" },
},
"*.provider_options": {
additionalProperties: {
"ui:options": { size: "lg" },
},
},
runtime_options: {
"*.runtime_options": {
additionalProperties: {
"ui:options": { size: "lg" },
},

View File

@ -7,9 +7,9 @@ const lpr: SectionConfigOverrides = {
enhancement: "/configuration/license_plate_recognition#enhancement",
},
restartRequired: [],
fieldOrder: ["enabled", "expire_time", "min_area", "enhancement"],
fieldOrder: ["enabled", "min_area", "enhancement", "expire_time"],
hiddenFields: [],
advancedFields: ["expire_time", "min_area", "enhancement"],
advancedFields: ["expire_time", "enhancement"],
overrideFields: ["enabled", "min_area", "enhancement"],
},
global: {

View File

@ -23,7 +23,7 @@ const motion: SectionConfigOverrides = {
"mqtt_off_delay",
],
fieldGroups: {
sensitivity: ["enabled", "threshold", "contour_area"],
sensitivity: ["threshold", "contour_area"],
algorithm: ["improve_contrast", "delta_alpha", "frame_alpha"],
},
uiSchema: {

View File

@ -15,7 +15,7 @@ const record: SectionConfigOverrides = {
"export",
],
fieldGroups: {
retention: ["enabled", "continuous", "motion"],
retention: ["continuous", "motion"],
events: ["alerts", "detections"],
},
hiddenFields: ["enabled_in_config", "sync_recordings"],

View File

@ -18,6 +18,11 @@ const semanticSearch: SectionConfigOverrides = {
advancedFields: ["reindex", "device"],
restartRequired: ["enabled", "model", "model_size", "device"],
hiddenFields: ["reindex"],
uiSchema: {
model: {
"ui:widget": "semanticSearchModel",
},
},
},
};

View File

@ -13,7 +13,7 @@ const snapshots: SectionConfigOverrides = {
"retain",
],
fieldGroups: {
display: ["enabled", "bounding_box", "crop", "quality", "timestamp"],
display: ["bounding_box", "crop", "quality", "timestamp"],
},
hiddenFields: ["enabled_in_config"],
advancedFields: ["height", "quality", "retain"],

View File

@ -936,7 +936,7 @@ export function ConfigSection({
</span>
</div>
)}
<div className="flex w-full items-center gap-2 md:w-auto">
<div className="flex w-full flex-col gap-2 sm:flex-row sm:items-center md:w-auto">
{((effectiveLevel === "camera" && isOverridden) ||
effectiveLevel === "global") &&
!hasChanges &&

View File

@ -23,10 +23,12 @@ import { AudioLabelSwitchesWidget } from "./widgets/AudioLabelSwitchesWidget";
import { ZoneSwitchesWidget } from "./widgets/ZoneSwitchesWidget";
import { ArrayAsTextWidget } from "./widgets/ArrayAsTextWidget";
import { FfmpegArgsWidget } from "./widgets/FfmpegArgsWidget";
import { GenAIRolesWidget } from "./widgets/GenAIRolesWidget";
import { InputRolesWidget } from "./widgets/InputRolesWidget";
import { TimezoneSelectWidget } from "./widgets/TimezoneSelectWidget";
import { CameraPathWidget } from "./widgets/CameraPathWidget";
import { OptionalFieldWidget } from "./widgets/OptionalFieldWidget";
import { SemanticSearchModelWidget } from "./widgets/SemanticSearchModelWidget";
import { FieldTemplate } from "./templates/FieldTemplate";
import { ObjectFieldTemplate } from "./templates/ObjectFieldTemplate";
@ -60,6 +62,7 @@ export const frigateTheme: FrigateTheme = {
ArrayAsTextWidget: ArrayAsTextWidget,
FfmpegArgsWidget: FfmpegArgsWidget,
CameraPathWidget: CameraPathWidget,
genaiRoles: GenAIRolesWidget,
inputRoles: InputRolesWidget,
// Custom widgets
switch: SwitchWidget,
@ -75,6 +78,7 @@ export const frigateTheme: FrigateTheme = {
zoneNames: ZoneSwitchesWidget,
timezoneSelect: TimezoneSelectWidget,
optionalField: OptionalFieldWidget,
semanticSearchModel: SemanticSearchModelWidget,
},
templates: {
FieldTemplate: FieldTemplate as React.ComponentType<FieldTemplateProps>,

View File

@ -311,51 +311,54 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
return null;
}
const grouped = new Set<string>();
const groups = Object.entries(groupDefinitions)
.map(([groupKey, fields]) => {
const ordered = fields
.map((field) => items.find((item) => item.name === field))
.filter(Boolean) as (typeof properties)[number][];
// Build a lookup: field name → group info
const fieldToGroup = new Map<
string,
{ groupKey: string; label: string; items: (typeof properties)[number][] }
>();
const hasGroups = Object.keys(groupDefinitions).length > 0;
if (ordered.length === 0) {
return null;
}
for (const [groupKey, fields] of Object.entries(groupDefinitions)) {
const ordered = fields
.map((field) => items.find((item) => item.name === field))
.filter(Boolean) as (typeof properties)[number][];
ordered.forEach((item) => grouped.add(item.name));
if (ordered.length === 0) continue;
const label = domain
? t(`${sectionI18nPrefix}.${domain}.${groupKey}`, {
ns: "config/groups",
defaultValue: humanizeKey(groupKey),
})
: t(`groups.${groupKey}`, {
defaultValue: humanizeKey(groupKey),
});
const label = domain
? t(`${sectionI18nPrefix}.${domain}.${groupKey}`, {
ns: "config/groups",
defaultValue: humanizeKey(groupKey),
})
: t(`groups.${groupKey}`, {
defaultValue: humanizeKey(groupKey),
});
return {
key: groupKey,
label,
items: ordered,
};
})
.filter(Boolean) as Array<{
key: string;
label: string;
items: (typeof properties)[number][];
}>;
const groupInfo = { groupKey, label, items: ordered };
for (const item of ordered) {
fieldToGroup.set(item.name, groupInfo);
}
}
const ungrouped = items.filter((item) => !grouped.has(item.name));
const isObjectLikeField = (item: (typeof properties)[number]) => {
const fieldSchema = (item.content.props as RjsfElementProps)?.schema;
return fieldSchema?.type === "object";
};
return (
<div className="space-y-6">
{groups.map((group) => (
// Walk items in order (respects fieldOrder / ui:order).
// When we hit the first field of a group, render the whole group block.
// Skip subsequent fields that belong to an already-rendered group.
const renderedGroups = new Set<string>();
const elements: React.ReactNode[] = [];
for (const item of items) {
const group = fieldToGroup.get(item.name);
if (group) {
if (renderedGroups.has(group.groupKey)) continue;
renderedGroups.add(group.groupKey);
elements.push(
<div
key={group.key}
key={group.groupKey}
className="space-y-4 rounded-lg border border-border/70 bg-card/30 p-4"
>
<div className="text-md border-b border-border/60 pb-4 font-semibold text-primary-variant">
@ -366,25 +369,21 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
<div key={element.name}>{element.content}</div>
))}
</div>
</div>
))}
</div>,
);
} else {
elements.push(
<div
key={item.name}
className={cn(hasGroups && !isObjectLikeField(item) && "px-4")}
>
{item.content}
</div>,
);
}
}
{ungrouped.length > 0 && (
<div className={cn("space-y-6", groups.length > 0 && "pt-2")}>
{ungrouped.map((element) => (
<div
key={element.name}
className={cn(
groups.length > 0 && !isObjectLikeField(element) && "px-4",
)}
>
{element.content}
</div>
))}
</div>
)}
</div>
);
return <div className="space-y-6">{elements}</div>;
};
// Root level renders children directly
@ -456,7 +455,7 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
<CollapsibleTrigger asChild>
<CardHeader className="cursor-pointer p-4 transition-colors hover:bg-muted/50">
<div className="flex items-center justify-between">
<div>
<div className="min-w-0 pr-3">
<CardTitle
className={cn(
"flex items-center text-sm",
@ -475,9 +474,9 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
)}
</div>
{isOpen ? (
<LuChevronDown className="h-4 w-4" />
<LuChevronDown className="h-4 w-4 shrink-0" />
) : (
<LuChevronRight className="h-4 w-4" />
<LuChevronRight className="h-4 w-4 shrink-0" />
)}
</div>
</CardHeader>

View File

@ -0,0 +1,109 @@
import type { WidgetProps } from "@rjsf/utils";
import { useMemo } from "react";
import { useTranslation } from "react-i18next";
import { Switch } from "@/components/ui/switch";
import type { ConfigFormContext } from "@/types/configForm";
const GENAI_ROLES = ["embeddings", "vision", "tools"] as const;
function normalizeValue(value: unknown): string[] {
if (Array.isArray(value)) {
return value.filter((item): item is string => typeof item === "string");
}
if (typeof value === "string" && value.trim()) {
return [value.trim()];
}
return [];
}
function getProviderKey(widgetId: string): string | undefined {
const prefix = "root_";
const suffix = "_roles";
if (!widgetId.startsWith(prefix) || !widgetId.endsWith(suffix)) {
return undefined;
}
return widgetId.slice(prefix.length, -suffix.length) || undefined;
}
export function GenAIRolesWidget(props: WidgetProps) {
const { id, value, disabled, readonly, onChange, registry } = props;
const { t } = useTranslation(["views/settings"]);
const formContext = registry?.formContext as ConfigFormContext | undefined;
const selectedRoles = useMemo(() => normalizeValue(value), [value]);
const providerKey = useMemo(() => getProviderKey(id), [id]);
// Compute occupied roles directly from formData. The computation is
// trivially cheap (iterate providers × 3 roles max) so we skip an
// intermediate memoization layer whose formData dependency would
// never produce a cache hit (new object reference on every change).
const occupiedRoles = useMemo(() => {
const occupied = new Set<string>();
const fd = formContext?.formData;
if (!fd || typeof fd !== "object") return occupied;
for (const [provider, config] of Object.entries(
fd as Record<string, unknown>,
)) {
if (provider === providerKey) continue;
if (!config || typeof config !== "object" || Array.isArray(config))
continue;
for (const role of normalizeValue(
(config as Record<string, unknown>).roles,
)) {
occupied.add(role);
}
}
return occupied;
}, [formContext?.formData, providerKey]);
const toggleRole = (role: string, enabled: boolean) => {
if (enabled) {
if (!selectedRoles.includes(role)) {
onChange([...selectedRoles, role]);
}
return;
}
onChange(selectedRoles.filter((item) => item !== role));
};
return (
<div className="rounded-lg border border-secondary-highlight bg-background_alt p-2 pr-0 md:max-w-md">
<div className="grid gap-2">
{GENAI_ROLES.map((role) => {
const checked = selectedRoles.includes(role);
const roleDisabled = !checked && occupiedRoles.has(role);
const label = t(`configForm.genaiRoles.options.${role}`, {
ns: "views/settings",
defaultValue: role,
});
return (
<div
key={role}
className="flex items-center justify-between rounded-md px-3 py-0"
>
<label htmlFor={`${id}-${role}`} className="text-sm">
{label}
</label>
<Switch
id={`${id}-${role}`}
checked={checked}
disabled={disabled || readonly || roleDisabled}
onCheckedChange={(enabled) => toggleRole(role, !!enabled)}
/>
</div>
);
})}
</div>
</div>
);
}

View File

@ -0,0 +1,159 @@
// Combobox widget for semantic_search.model field.
// Shows built-in model enum values and GenAI providers with the embeddings role.
import { useState, useMemo } from "react";
import type { WidgetProps } from "@rjsf/utils";
import { useTranslation } from "react-i18next";
import { Check, ChevronsUpDown } from "lucide-react";
import { cn } from "@/lib/utils";
import { Button } from "@/components/ui/button";
import {
Command,
CommandGroup,
CommandItem,
CommandList,
} from "@/components/ui/command";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import type { ConfigFormContext } from "@/types/configForm";
import { getSizedFieldClassName } from "../utils";
interface ProviderOption {
value: string;
label: string;
}
export function SemanticSearchModelWidget(props: WidgetProps) {
const { id, value, disabled, readonly, onChange, schema, registry, options } =
props;
const { t } = useTranslation(["views/settings"]);
const [open, setOpen] = useState(false);
const formContext = registry?.formContext as ConfigFormContext | undefined;
const fieldClassName = getSizedFieldClassName(options, "sm");
// Built-in model options from schema.examples (populated by transformer
// collapsing the anyOf enum+string union)
const builtInModels: ProviderOption[] = useMemo(() => {
const examples = (schema as Record<string, unknown>).examples;
if (!Array.isArray(examples)) return [];
return examples
.filter((v): v is string => typeof v === "string")
.map((v) => ({ value: v, label: v }));
}, [schema]);
// GenAI providers that have the "embeddings" role
const embeddingsProviders: ProviderOption[] = useMemo(() => {
const genai = (
formContext?.fullConfig as Record<string, unknown> | undefined
)?.genai;
if (!genai || typeof genai !== "object" || Array.isArray(genai)) return [];
const providers: ProviderOption[] = [];
for (const [key, config] of Object.entries(
genai as Record<string, unknown>,
)) {
if (!config || typeof config !== "object" || Array.isArray(config))
continue;
const roles = (config as Record<string, unknown>).roles;
if (Array.isArray(roles) && roles.includes("embeddings")) {
providers.push({ value: key, label: key });
}
}
return providers;
}, [formContext?.fullConfig]);
const currentLabel =
builtInModels.find((m) => m.value === value)?.label ??
embeddingsProviders.find((p) => p.value === value)?.label ??
(typeof value === "string" && value ? value : undefined);
return (
<Popover open={open} onOpenChange={setOpen}>
<PopoverTrigger asChild>
<Button
id={id}
type="button"
variant="outline"
role="combobox"
aria-expanded={open}
disabled={disabled || readonly}
className={cn(
"justify-between font-normal",
!currentLabel && "text-muted-foreground",
fieldClassName,
)}
>
{currentLabel ??
t("configForm.semanticSearchModel.placeholder", {
ns: "views/settings",
defaultValue: "Select model…",
})}
<ChevronsUpDown className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</Button>
</PopoverTrigger>
<PopoverContent className="w-[--radix-popover-trigger-width] p-0">
<Command>
<CommandList>
{builtInModels.length > 0 && (
<CommandGroup
heading={t("configForm.semanticSearchModel.builtIn", {
ns: "views/settings",
defaultValue: "Built-in Models",
})}
>
{builtInModels.map((model) => (
<CommandItem
key={model.value}
value={model.value}
onSelect={() => {
onChange(model.value);
setOpen(false);
}}
>
<Check
className={cn(
"mr-2 h-4 w-4",
value === model.value ? "opacity-100" : "opacity-0",
)}
/>
{model.label}
</CommandItem>
))}
</CommandGroup>
)}
{embeddingsProviders.length > 0 && (
<CommandGroup
heading={t("configForm.semanticSearchModel.genaiProviders", {
ns: "views/settings",
defaultValue: "GenAI Providers",
})}
>
{embeddingsProviders.map((provider) => (
<CommandItem
key={provider.value}
value={provider.value}
onSelect={() => {
onChange(provider.value);
setOpen(false);
}}
>
<Check
className={cn(
"mr-2 h-4 w-4",
value === provider.value ? "opacity-100" : "opacity-0",
)}
/>
{provider.label}
</CommandItem>
))}
</CommandGroup>
)}
</CommandList>
</Command>
</PopoverContent>
</Popover>
);
}

View File

@ -98,8 +98,8 @@ function normalizeNullableSchema(schema: RJSFSchema): RJSFSchema {
: ["null"];
const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj;
const merged: Record<string, unknown> = {
...rest,
...normalizedNonNullObj,
...rest,
type: mergedType,
};
// When unwrapping a nullable enum, add null to the enum list so
@ -110,6 +110,39 @@ function normalizeNullableSchema(schema: RJSFSchema): RJSFSchema {
return merged as RJSFSchema;
}
// Handle anyOf where a plain string branch subsumes a string-enum branch
// (e.g. Union[StrEnum, str] or Union[StrEnum, str, None]).
// Collapse to a single string type with enum values preserved as `examples`.
const stringBranches = anyOf.filter(
(item) =>
isSchemaObject(item) &&
(item as Record<string, unknown>).type === "string",
);
const enumBranch = stringBranches.find((item) =>
Array.isArray((item as Record<string, unknown>).enum),
);
const plainStringBranch = stringBranches.find(
(item) => !Array.isArray((item as Record<string, unknown>).enum),
);
if (
enumBranch &&
plainStringBranch &&
anyOf.length === stringBranches.length + (hasNull ? 1 : 0)
) {
const enumValues = (enumBranch as Record<string, unknown>).enum as
| unknown[]
| undefined;
const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj;
return {
...rest,
type: hasNull ? ["string", "null"] : "string",
...(enumValues && enumValues.length > 0
? { examples: enumValues }
: {}),
} as RJSFSchema;
}
return {
...schemaObj,
anyOf: anyOf
@ -142,8 +175,8 @@ function normalizeNullableSchema(schema: RJSFSchema): RJSFSchema {
: ["null"];
const { anyOf: _anyOf, oneOf: _oneOf, ...rest } = schemaObj;
const merged: Record<string, unknown> = {
...rest,
...normalizedNonNullObj,
...rest,
type: mergedType,
};
// When unwrapping a nullable oneOf enum, add null to the enum list.

View File

@ -24,8 +24,8 @@ export const EmbeddingThreshold = {
} as Threshold;
export const GenAIThreshold = {
warning: 30000,
error: 60000,
warning: 60,
error: 120,
} as Threshold;
export const DetectorTempThreshold = {

View File

@ -372,24 +372,25 @@ export default function MotionSearchROICanvas({
/>
)}
{/* Vertex points */}
{scaledPoints.map((point, index) => (
<Circle
key={index}
name={`point-${index}`}
x={point[0]}
y={point[1]}
radius={vertexRadius}
fill={polygonColorString}
stroke="white"
strokeWidth={2}
draggable={!isDrawing && isInteractive}
onDragMove={(e) => handlePointDragMove(e, index)}
onMouseOver={(e) => handleMouseOverPoint(e, index)}
onMouseOut={(e) => handleMouseOutPoint(e, index)}
onContextMenu={(e) => handleContextMenu(e, index)}
/>
))}
{/* Vertex points (only shown in interactive/dialog mode) */}
{isInteractive &&
scaledPoints.map((point, index) => (
<Circle
key={index}
name={`point-${index}`}
x={point[0]}
y={point[1]}
radius={vertexRadius}
fill={polygonColorString}
stroke="white"
strokeWidth={2}
draggable={!isDrawing}
onDragMove={(e) => handlePointDragMove(e, index)}
onMouseOver={(e) => handleMouseOverPoint(e, index)}
onMouseOut={(e) => handleMouseOutPoint(e, index)}
onContextMenu={(e) => handleContextMenu(e, index)}
/>
))}
</Layer>
</Stage>
)}

View File

@ -385,7 +385,7 @@ export default function ProfilesView({
{/* Active Profile + Add Profile bar */}
{(hasProfiles || profilesUIEnabled) && (
<div className="my-4 flex items-center justify-between rounded-lg border border-border/70 bg-card/30 p-4">
<div className="my-4 flex flex-col gap-3 rounded-lg border border-border/70 bg-card/30 p-4 sm:flex-row sm:items-center sm:justify-between">
{hasProfiles && (
<div className="flex items-center gap-3">
<span className="text-sm font-semibold text-primary-variant">
@ -470,12 +470,12 @@ export default function ProfilesView({
)}
>
<CollapsibleTrigger asChild>
<div className="flex cursor-pointer items-center justify-between px-4 py-3 hover:bg-secondary/30">
<div className="flex items-center gap-3">
<div className="flex cursor-pointer flex-wrap items-center gap-y-2 px-4 py-3 hover:bg-secondary/30">
<div className="flex min-w-0 items-center gap-3">
{isExpanded ? (
<LuChevronDown className="size-4 text-muted-foreground" />
<LuChevronDown className="size-4 shrink-0 text-muted-foreground" />
) : (
<LuChevronRight className="size-4 text-muted-foreground" />
<LuChevronRight className="size-4 shrink-0 text-muted-foreground" />
)}
<span
className={cn(
@ -483,13 +483,13 @@ export default function ProfilesView({
color.dot,
)}
/>
<span className="font-medium">
<span className="truncate font-medium">
{profileFriendlyNames?.get(profile) ?? profile}
</span>
<Button
variant="ghost"
size="icon"
className="size-6 text-muted-foreground hover:text-primary"
className="size-6 shrink-0 text-muted-foreground hover:text-primary"
onClick={(e) => {
e.stopPropagation();
setRenameProfile(profile);
@ -500,6 +500,8 @@ export default function ProfilesView({
>
<Pencil className="size-3" />
</Button>
</div>
<div className="ml-auto flex items-center gap-3">
{isActive && (
<Badge
variant="secondary"
@ -508,8 +510,6 @@ export default function ProfilesView({
{t("profiles.active", { ns: "views/settings" })}
</Badge>
)}
</div>
<div className="flex items-center gap-3">
<span className="text-sm text-muted-foreground">
{cameras.length > 0
? t("profiles.cameraCount", {
@ -523,7 +523,7 @@ export default function ProfilesView({
<Button
variant="ghost"
size="icon"
className="size-7 text-muted-foreground hover:text-destructive"
className="size-7 shrink-0 text-muted-foreground hover:text-destructive"
disabled={deleting && deleteProfile === profile}
onClick={(e) => {
e.stopPropagation();

View File

@ -131,34 +131,35 @@ export function SingleSectionPage({
return (
<div className="flex size-full flex-col lg:pr-2">
<div className="mb-5 flex items-center justify-between gap-4">
<div className="flex flex-col">
<Heading as="h4">
{t(`${sectionKey}.label`, { ns: sectionNamespace })}
</Heading>
{i18n.exists(`${sectionKey}.description`, {
ns: sectionNamespace,
}) && (
<div className="my-1 text-sm text-muted-foreground">
{t(`${sectionKey}.description`, { ns: sectionNamespace })}
</div>
)}
{sectionDocsUrl && (
<div className="flex items-center text-sm text-primary-variant">
<Link
to={sectionDocsUrl}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
)}
</div>
<div className="flex flex-col items-end gap-2 md:flex-row md:items-center">
<div className="flex flex-wrap items-center justify-end gap-2">
<div className="mb-5 flex flex-col gap-2">
<div className="flex items-center justify-between gap-4">
<div className="flex flex-col">
<Heading as="h4">
{t(`${sectionKey}.label`, { ns: sectionNamespace })}
</Heading>
{i18n.exists(`${sectionKey}.description`, {
ns: sectionNamespace,
}) && (
<div className="my-1 text-sm text-muted-foreground">
{t(`${sectionKey}.description`, { ns: sectionNamespace })}
</div>
)}
{sectionDocsUrl && (
<div className="flex items-center text-sm text-primary-variant">
<Link
to={sectionDocsUrl}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
)}
</div>
{/* Desktop: badge inline next to title */}
<div className="hidden shrink-0 sm:flex sm:flex-wrap sm:items-center sm:gap-2">
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (
@ -211,6 +212,40 @@ export function SingleSectionPage({
)}
</div>
</div>
{/* Mobile: badge below title/description */}
<div className="flex flex-wrap items-center gap-2 sm:hidden">
{level === "camera" &&
showOverrideIndicator &&
sectionStatus.isOverridden && (
<Badge
variant="secondary"
className={cn(
"cursor-default border-2 text-center text-xs text-primary-variant",
sectionStatus.overrideSource === "profile" && profileColor
? profileColor.border
: "border-selected",
)}
>
{sectionStatus.overrideSource === "profile"
? t("button.overriddenBaseConfig", {
ns: "views/settings",
defaultValue: "Overridden (Base Config)",
})
: t("button.overriddenGlobal", {
ns: "views/settings",
defaultValue: "Overridden (Global)",
})}
</Badge>
)}
{sectionStatus.hasChanges && (
<Badge
variant="secondary"
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
>
{t("modified", { ns: "common", defaultValue: "Modified" })}
</Badge>
)}
</div>
</div>
<ConfigSectionTemplate
sectionKey={sectionKey}

View File

@ -107,7 +107,10 @@ export default function EnrichmentMetrics({
};
}
series[key].data.push({ x: statsIdx + 1, y: stat });
series[key].data.push({
x: statsIdx + 1,
y: rawKey.includes("description_speed") ? stat / 1000 : stat,
});
});
});
@ -115,6 +118,7 @@ export default function EnrichmentMetrics({
const grouped: {
[category: string]: {
categoryName: string;
unit: string;
speedSeries?: {
name: string;
metrics: Threshold;
@ -154,6 +158,7 @@ export default function EnrichmentMetrics({
if (!(categoryKey in grouped)) {
grouped[categoryKey] = {
categoryName,
unit: categoryKey.includes("description") ? "s" : "ms",
speedSeries: undefined,
eventsSeries: undefined,
};
@ -196,7 +201,7 @@ export default function EnrichmentMetrics({
key={`${group.categoryName}-speed`}
graphId={`${group.categoryName}-inference`}
name={t("enrichments.averageInf")}
unit="ms"
unit={group.unit}
threshold={group.speedSeries.metrics}
updateTimes={updateTimes}
data={[group.speedSeries]}