Miscellaneous fixes (#23124)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions

* use continuous expire date when loading reviews for recording cleanup

* reset heatmap filter when motion preview camera changes

* Add note about speed zones unit when enabled

* don't display fps warning for dedicated LPR cameras

* language tweaks

* allow changing camera type from management UI

* i18n

* fix ollama tool calling failure when conversation contains multimodal content from live frame tool results

* fix mypy

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
Josh Hawkins 2026-05-06 11:01:50 -05:00 committed by GitHub
parent 704ee9667c
commit 5211590866
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 288 additions and 10 deletions

View File

@ -37,6 +37,7 @@ from frigate.api.defs.response.chat_response import (
from frigate.api.defs.tags import Tags
from frigate.api.event import events
from frigate.config import FrigateConfig
from frigate.config.ui import UnitSystemEnum
from frigate.genai.utils import build_assistant_message_for_conversation
from frigate.jobs.vlm_watch import (
get_vlm_watch_job,
@ -1301,6 +1302,7 @@ async def chat_completion(
cameras_info = []
config = request.app.frigate_config
has_speed_zone = False
for camera_id in allowed_cameras:
if camera_id not in config.cameras:
continue
@ -1311,6 +1313,10 @@ async def chat_completion(
else camera_id.replace("_", " ").title()
)
zone_names = list(camera_config.zones.keys())
if not has_speed_zone:
has_speed_zone = any(
zone.distances for zone in camera_config.zones.values()
)
if zone_names:
cameras_info.append(
f" - {friendly_name} (ID: {camera_id}, zones: {', '.join(zone_names)})"
@ -1326,6 +1332,13 @@ async def chat_completion(
+ "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls."
)
speed_units_section = ""
if has_speed_zone:
speed_unit = (
"mph" if config.ui.unit_system == UnitSystemEnum.imperial else "km/h"
)
speed_units_section = f"\n\nReport object speeds to the user in {speed_unit}."
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
Current server local date and time: {current_date_str} at {current_time_str}
@ -1337,7 +1350,7 @@ When users ask about "today", "yesterday", "this week", etc., use the current da
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.
When a user refers to a specific object they have seen or describe with identifying details ("that green car", "the person in the red jacket", "a package left today"), prefer the find_similar_objects tool over search_objects. Use search_objects first only to locate the anchor event, then pass its id to find_similar_objects. For generic queries like "show me all cars today", keep using search_objects. If a user message begins with [attached_event:<id>], treat that event id as the anchor for any similarity or "tell me more" request in the same message and call find_similar_objects with that id.{cameras_section}"""
When a user refers to a specific object they have seen or describe with identifying details ("that green car", "the person in the red jacket", "a package left today"), prefer the find_similar_objects tool over search_objects. Use search_objects first only to locate the anchor event, then pass its id to find_similar_objects. For generic queries like "show me all cars today", keep using search_objects. If a user message begins with [attached_event:<id>], treat that event id as the anchor for any similarity or "tell me more" request in the same message and call find_similar_objects with that id.{cameras_section}{speed_units_section}"""
conversation.append(
{

View File

@ -1,5 +1,7 @@
"""Ollama Provider for Frigate AI."""
import base64
import binascii
import json
import logging
from typing import Any, AsyncGenerator, Optional
@ -16,6 +18,41 @@ from frigate.genai.utils import parse_tool_calls_from_message
logger = logging.getLogger(__name__)
def _normalize_multimodal_content(
content: Any,
) -> tuple[Optional[str], Optional[list[bytes]]]:
"""Convert OpenAI-style multimodal content to Ollama's (text, images) shape.
The chat API constructs user messages with content as a list of
``{"type": "text"}`` and ``{"type": "image_url"}`` parts when a tool
returns a live frame. Ollama's SDK requires content to be a string and
images to be passed in a separate field, so we extract each.
"""
if not isinstance(content, list):
return content, None
text_parts: list[str] = []
images: list[bytes] = []
for part in content:
if not isinstance(part, dict):
continue
part_type = part.get("type")
if part_type == "text":
text = part.get("text")
if text:
text_parts.append(str(text))
elif part_type == "image_url":
url = (part.get("image_url") or {}).get("url", "")
if isinstance(url, str) and url.startswith("data:"):
try:
encoded = url.split(",", 1)[1]
images.append(base64.b64decode(encoded, validate=True))
except (ValueError, IndexError, binascii.Error) as e:
logger.debug("Failed to decode multimodal image url: %s", e)
return ("\n".join(text_parts) if text_parts else None), (images or None)
@register_genai_provider(GenAIProviderEnum.ollama)
class OllamaClient(GenAIClient):
"""Generative AI client for Frigate using Ollama."""
@ -207,10 +244,13 @@ class OllamaClient(GenAIClient):
"""Build request_messages and params for chat (sync or stream)."""
request_messages = []
for msg in messages:
msg_dict = {
content, images = _normalize_multimodal_content(msg.get("content", ""))
msg_dict: dict[str, Any] = {
"role": msg.get("role"),
"content": msg.get("content", ""),
"content": content if content is not None else "",
}
if images:
msg_dict["images"] = images
if msg.get("tool_call_id"):
msg_dict["tool_call_id"] = msg["tool_call_id"]
if msg.get("name"):

View File

@ -351,9 +351,11 @@ class RecordingCleanup(threading.Thread):
)
.where(
ReviewSegment.camera == camera,
# need to ensure segments for all reviews starting
# before the expire date are included
ReviewSegment.start_time < motion_expire_date,
# candidate recordings can extend up to continuous_expire_date
# (the no-motion no-audio branch of the recordings query),
# so reviews must cover that full range to avoid deleting
# segments that overlap recent alerts/detections.
ReviewSegment.start_time < continuous_expire_date,
)
.order_by(ReviewSegment.start_time)
.namedtuples()

View File

@ -512,6 +512,14 @@
"inherit": "Inherit",
"enabled": "Enabled",
"disabled": "Disabled"
},
"cameraType": {
"title": "Camera Type",
"label": "Camera type",
"description": "Set the type for each camera. Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles. Most cameras should use the normal camera type unless the camera is specifically for LPR and has a tightly focused view on license plates.",
"normal": "Normal",
"dedicatedLpr": "Dedicated LPR",
"saveSuccess": "Updated camera type for {{cameraName}}. Restart Frigate to apply the changes."
}
},
"cameraReview": {
@ -1652,14 +1660,14 @@
"audioDetectionDisabled": "Audio detection is not enabled for this camera. Audio transcription requires audio detection to be active."
},
"detect": {
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended."
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit."
},
"faceRecognition": {
"globalDisabled": "Face recognition is not enabled at the global level. Enable it in global settings for camera-level face recognition to function.",
"globalDisabled": "Face recognition is not enabled at the global level. Enable it in Enrichments for camera-level face recognition to function.",
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Ensure 'person' is in the object tracking list."
},
"lpr": {
"globalDisabled": "License plate recognition is not enabled at the global level. Enable it in global settings for camera-level LPR to function.",
"globalDisabled": "License plate recognition is not enabled at the global level. Enable it in Enrichments for camera-level LPR to function.",
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked."
},
"record": {

View File

@ -12,6 +12,7 @@ const detect: SectionConfigOverrides = {
position: "after",
condition: (ctx) => {
if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false;
if (ctx.fullCameraConfig.type === "lpr") return false;
const detectFps = ctx.formData?.fps as number | undefined;
const streamFps = ctx.fullCameraConfig.detect?.fps;
return detectFps != null && streamFps != null && detectFps > 5;

View File

@ -1136,6 +1136,12 @@ function MotionReview({
);
const [isRegionFilterOpen, setIsRegionFilterOpen] = useState(false);
// reset filter when camera changes
useEffect(() => {
setMotionFilterCells(new Set());
setPendingFilterCells(new Set());
}, [motionPreviewsCamera]);
const objectReviewItems = useMemo(
() =>
(overlapReviewSegments ?? []).filter(

View File

@ -14,8 +14,10 @@ import { useTranslation } from "react-i18next";
import CameraEditForm from "@/components/settings/CameraEditForm";
import CameraWizardDialog from "@/components/settings/CameraWizardDialog";
import DeleteCameraDialog from "@/components/overlay/dialog/DeleteCameraDialog";
import { LuPencil, LuPlus, LuTrash2 } from "react-icons/lu";
import { LuExternalLink, LuPencil, LuPlus, LuTrash2 } from "react-icons/lu";
import { IoMdArrowRoundBack } from "react-icons/io";
import { Link } from "react-router-dom";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { isDesktop } from "react-device-detect";
import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
import { Switch } from "@/components/ui/switch";
@ -89,6 +91,13 @@ export default function CameraManagementView({
return [];
}, [config]);
const allCameras = useMemo(() => {
if (config) {
return Object.keys(config.cameras).sort();
}
return [];
}, [config]);
useEffect(() => {
document.title = t("documentTitle.cameraManagement");
}, [t]);
@ -235,6 +244,15 @@ export default function CameraManagementView({
onConfigChanged={updateConfig}
/>
)}
{config?.lpr?.enabled && allCameras.length > 0 && (
<CameraTypeSection
cameras={allCameras}
config={config}
onConfigChanged={updateConfig}
setRestartDialogOpen={setRestartDialogOpen}
/>
)}
</div>
</>
) : (
@ -497,6 +515,196 @@ function CameraConfigEnableSwitch({
);
}
type CameraTypeSectionProps = {
cameras: string[];
config: FrigateConfig | undefined;
onConfigChanged: () => Promise<unknown>;
setRestartDialogOpen: React.Dispatch<React.SetStateAction<boolean>>;
};
function CameraTypeSection({
cameras,
config,
onConfigChanged,
setRestartDialogOpen,
}: CameraTypeSectionProps) {
const { t } = useTranslation([
"views/settings",
"common",
"components/dialog",
]);
const { getLocaleDocUrl } = useDocDomain();
const [savingCamera, setSavingCamera] = useState<string | null>(null);
// Optimistic local state: the parsed config API doesn't reflect type
// changes until Frigate restarts, so we track saved values locally.
const [localOverrides, setLocalOverrides] = useState<Record<string, string>>(
{},
);
const handleTypeChange = useCallback(
async (camera: string, value: string) => {
setSavingCamera(camera);
try {
const typeValue = value === "lpr" ? "lpr" : null;
await axios.put("config/set", {
requires_restart: 1,
config_data: {
cameras: {
[camera]: {
type: typeValue,
},
},
},
});
await onConfigChanged();
setLocalOverrides((prev) => ({
...prev,
[camera]: value,
}));
toast.success(
t("cameraManagement.cameraType.saveSuccess", {
ns: "views/settings",
cameraName: camera,
}),
{
position: "top-center",
action: (
<a onClick={() => setRestartDialogOpen(true)}>
<Button>
{t("restart.button", { ns: "components/dialog" })}
</Button>
</a>
),
},
);
} catch (error) {
const errorMessage =
axios.isAxiosError(error) &&
(error.response?.data?.message || error.response?.data?.detail)
? error.response?.data?.message || error.response?.data?.detail
: t("toast.save.error.noMessage", { ns: "common" });
toast.error(
t("toast.save.error.title", { errorMessage, ns: "common" }),
{ position: "top-center" },
);
} finally {
setSavingCamera(null);
}
},
[onConfigChanged, setRestartDialogOpen, t],
);
const getCameraType = useCallback(
(camera: string): string => {
const localValue = localOverrides[camera];
if (localValue) return localValue;
const type = config?.cameras?.[camera]?.type;
return type === "lpr" ? "lpr" : "normal";
},
[config, localOverrides],
);
return (
<SettingsGroupCard
title={t("cameraManagement.cameraType.title", {
ns: "views/settings",
})}
>
<div className={SPLIT_ROW_CLASS_NAME}>
<div className="space-y-1.5">
<Label>
{t("cameraManagement.cameraType.label", {
ns: "views/settings",
})}
<RestartRequiredIndicator className="ml-1" />
</Label>
<p className="hidden text-sm text-muted-foreground md:block">
{t("cameraManagement.cameraType.description", {
ns: "views/settings",
})}
</p>
<div className="hidden items-center text-sm text-primary md:flex">
<Link
to={getLocaleDocUrl(
"configuration/license_plate_recognition#dedicated-lpr-cameras",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
<div className={`${CONTROL_COLUMN_CLASS_NAME} space-y-1.5`}>
<div className="max-w-md space-y-2 rounded-lg bg-secondary p-4">
{cameras.map((camera) => {
const currentType = getCameraType(camera);
const isSaving = savingCamera === camera;
return (
<div
key={camera}
className="flex flex-row items-center justify-between"
>
<CameraNameLabel camera={camera} />
{isSaving ? (
<ActivityIndicator className="h-5 w-20" size={16} />
) : (
<Select
value={currentType}
onValueChange={(v) => handleTypeChange(camera, v)}
>
<SelectTrigger className="h-7 w-full max-w-[140px] text-xs">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="normal">
{t("cameraManagement.cameraType.normal", {
ns: "views/settings",
})}
</SelectItem>
<SelectItem value="lpr">
{t("cameraManagement.cameraType.dedicatedLpr", {
ns: "views/settings",
})}
</SelectItem>
</SelectContent>
</Select>
)}
</div>
);
})}
</div>
<p className="text-sm text-muted-foreground md:hidden">
{t("cameraManagement.cameraType.description", {
ns: "views/settings",
})}
</p>
<div className="flex items-center text-sm text-primary md:hidden">
<Link
to={getLocaleDocUrl(
"configuration/license_plate_recognition#dedicated-lpr-cameras",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
</div>
</SettingsGroupCard>
);
}
type ProfileCameraEnableSectionProps = {
profileState: ProfileState;
cameras: string[];