Compare commits

...

6 Commits

Author SHA1 Message Date
Josh Hawkins
d014a3412e fix mypy 2026-05-06 09:26:31 -05:00
Josh Hawkins
d5effc52c0 fix ollama tool calling failure when conversation contains multimodal content from live frame tool results 2026-05-06 09:16:23 -05:00
Josh Hawkins
1972ba41fa i18n 2026-05-06 08:55:00 -05:00
Josh Hawkins
f23e09d107 allow changing camera type from management UI 2026-05-06 08:52:02 -05:00
Josh Hawkins
2e2956db13 language tweaks 2026-05-06 07:56:57 -05:00
Josh Hawkins
b1aad2bc6d don't display fps warning for dedicated LPR cameras 2026-05-06 07:56:44 -05:00
4 changed files with 263 additions and 6 deletions

View File

@ -1,5 +1,7 @@
"""Ollama Provider for Frigate AI."""
import base64
import binascii
import json
import logging
from typing import Any, AsyncGenerator, Optional
@ -16,6 +18,41 @@ from frigate.genai.utils import parse_tool_calls_from_message
logger = logging.getLogger(__name__)
def _normalize_multimodal_content(
content: Any,
) -> tuple[Optional[str], Optional[list[bytes]]]:
"""Convert OpenAI-style multimodal content to Ollama's (text, images) shape.
The chat API constructs user messages with content as a list of
``{"type": "text"}`` and ``{"type": "image_url"}`` parts when a tool
returns a live frame. Ollama's SDK requires content to be a string and
images to be passed in a separate field, so we extract each.
"""
if not isinstance(content, list):
return content, None
text_parts: list[str] = []
images: list[bytes] = []
for part in content:
if not isinstance(part, dict):
continue
part_type = part.get("type")
if part_type == "text":
text = part.get("text")
if text:
text_parts.append(str(text))
elif part_type == "image_url":
url = (part.get("image_url") or {}).get("url", "")
if isinstance(url, str) and url.startswith("data:"):
try:
encoded = url.split(",", 1)[1]
images.append(base64.b64decode(encoded, validate=True))
except (ValueError, IndexError, binascii.Error) as e:
logger.debug("Failed to decode multimodal image url: %s", e)
return ("\n".join(text_parts) if text_parts else None), (images or None)
@register_genai_provider(GenAIProviderEnum.ollama)
class OllamaClient(GenAIClient):
"""Generative AI client for Frigate using Ollama."""
@ -207,10 +244,13 @@ class OllamaClient(GenAIClient):
"""Build request_messages and params for chat (sync or stream)."""
request_messages = []
for msg in messages:
msg_dict = {
content, images = _normalize_multimodal_content(msg.get("content", ""))
msg_dict: dict[str, Any] = {
"role": msg.get("role"),
"content": msg.get("content", ""),
"content": content if content is not None else "",
}
if images:
msg_dict["images"] = images
if msg.get("tool_call_id"):
msg_dict["tool_call_id"] = msg["tool_call_id"]
if msg.get("name"):

View File

@ -501,6 +501,14 @@
"inherit": "Inherit",
"enabled": "Enabled",
"disabled": "Disabled"
},
"cameraType": {
"title": "Camera Type",
"label": "Camera type",
"description": "Set the type for each camera. Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles. Most cameras should use the normal camera type unless the camera is specifically for LPR and has a tightly focused view on license plates.",
"normal": "Normal",
"dedicatedLpr": "Dedicated LPR",
"saveSuccess": "Updated camera type for {{cameraName}}. Restart Frigate to apply the changes."
}
},
"cameraReview": {
@ -1641,14 +1649,14 @@
"audioDetectionDisabled": "Audio detection is not enabled for this camera. Audio transcription requires audio detection to be active."
},
"detect": {
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended."
"fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended. Higher values may cause performance issues and will not provide any benefit."
},
"faceRecognition": {
"globalDisabled": "Face recognition is not enabled at the global level. Enable it in global settings for camera-level face recognition to function.",
"globalDisabled": "Face recognition is not enabled at the global level. Enable it in Enrichments for camera-level face recognition to function.",
"personNotTracked": "Face recognition requires the 'person' object to be tracked. Ensure 'person' is in the object tracking list."
},
"lpr": {
"globalDisabled": "License plate recognition is not enabled at the global level. Enable it in global settings for camera-level LPR to function.",
"globalDisabled": "License plate recognition is not enabled at the global level. Enable it in Enrichments for camera-level LPR to function.",
"vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked."
},
"record": {

View File

@ -12,6 +12,7 @@ const detect: SectionConfigOverrides = {
position: "after",
condition: (ctx) => {
if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false;
if (ctx.fullCameraConfig.type === "lpr") return false;
const detectFps = ctx.formData?.fps as number | undefined;
const streamFps = ctx.fullCameraConfig.detect?.fps;
return detectFps != null && streamFps != null && detectFps > 5;

View File

@ -14,8 +14,10 @@ import { useTranslation } from "react-i18next";
import CameraEditForm from "@/components/settings/CameraEditForm";
import CameraWizardDialog from "@/components/settings/CameraWizardDialog";
import DeleteCameraDialog from "@/components/overlay/dialog/DeleteCameraDialog";
import { LuPencil, LuPlus, LuTrash2 } from "react-icons/lu";
import { LuExternalLink, LuPencil, LuPlus, LuTrash2 } from "react-icons/lu";
import { IoMdArrowRoundBack } from "react-icons/io";
import { Link } from "react-router-dom";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { isDesktop } from "react-device-detect";
import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
import { Switch } from "@/components/ui/switch";
@ -89,6 +91,13 @@ export default function CameraManagementView({
return [];
}, [config]);
const allCameras = useMemo(() => {
if (config) {
return Object.keys(config.cameras).sort();
}
return [];
}, [config]);
useEffect(() => {
document.title = t("documentTitle.cameraManagement");
}, [t]);
@ -235,6 +244,15 @@ export default function CameraManagementView({
onConfigChanged={updateConfig}
/>
)}
{config?.lpr?.enabled && allCameras.length > 0 && (
<CameraTypeSection
cameras={allCameras}
config={config}
onConfigChanged={updateConfig}
setRestartDialogOpen={setRestartDialogOpen}
/>
)}
</div>
</>
) : (
@ -497,6 +515,196 @@ function CameraConfigEnableSwitch({
);
}
type CameraTypeSectionProps = {
cameras: string[];
config: FrigateConfig | undefined;
onConfigChanged: () => Promise<unknown>;
setRestartDialogOpen: React.Dispatch<React.SetStateAction<boolean>>;
};
function CameraTypeSection({
cameras,
config,
onConfigChanged,
setRestartDialogOpen,
}: CameraTypeSectionProps) {
const { t } = useTranslation([
"views/settings",
"common",
"components/dialog",
]);
const { getLocaleDocUrl } = useDocDomain();
const [savingCamera, setSavingCamera] = useState<string | null>(null);
// Optimistic local state: the parsed config API doesn't reflect type
// changes until Frigate restarts, so we track saved values locally.
const [localOverrides, setLocalOverrides] = useState<Record<string, string>>(
{},
);
const handleTypeChange = useCallback(
async (camera: string, value: string) => {
setSavingCamera(camera);
try {
const typeValue = value === "lpr" ? "lpr" : null;
await axios.put("config/set", {
requires_restart: 1,
config_data: {
cameras: {
[camera]: {
type: typeValue,
},
},
},
});
await onConfigChanged();
setLocalOverrides((prev) => ({
...prev,
[camera]: value,
}));
toast.success(
t("cameraManagement.cameraType.saveSuccess", {
ns: "views/settings",
cameraName: camera,
}),
{
position: "top-center",
action: (
<a onClick={() => setRestartDialogOpen(true)}>
<Button>
{t("restart.button", { ns: "components/dialog" })}
</Button>
</a>
),
},
);
} catch (error) {
const errorMessage =
axios.isAxiosError(error) &&
(error.response?.data?.message || error.response?.data?.detail)
? error.response?.data?.message || error.response?.data?.detail
: t("toast.save.error.noMessage", { ns: "common" });
toast.error(
t("toast.save.error.title", { errorMessage, ns: "common" }),
{ position: "top-center" },
);
} finally {
setSavingCamera(null);
}
},
[onConfigChanged, setRestartDialogOpen, t],
);
const getCameraType = useCallback(
(camera: string): string => {
const localValue = localOverrides[camera];
if (localValue) return localValue;
const type = config?.cameras?.[camera]?.type;
return type === "lpr" ? "lpr" : "normal";
},
[config, localOverrides],
);
return (
<SettingsGroupCard
title={t("cameraManagement.cameraType.title", {
ns: "views/settings",
})}
>
<div className={SPLIT_ROW_CLASS_NAME}>
<div className="space-y-1.5">
<Label>
{t("cameraManagement.cameraType.label", {
ns: "views/settings",
})}
<RestartRequiredIndicator className="ml-1" />
</Label>
<p className="hidden text-sm text-muted-foreground md:block">
{t("cameraManagement.cameraType.description", {
ns: "views/settings",
})}
</p>
<div className="hidden items-center text-sm text-primary md:flex">
<Link
to={getLocaleDocUrl(
"configuration/license_plate_recognition#dedicated-lpr-cameras",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
<div className={`${CONTROL_COLUMN_CLASS_NAME} space-y-1.5`}>
<div className="max-w-md space-y-2 rounded-lg bg-secondary p-4">
{cameras.map((camera) => {
const currentType = getCameraType(camera);
const isSaving = savingCamera === camera;
return (
<div
key={camera}
className="flex flex-row items-center justify-between"
>
<CameraNameLabel camera={camera} />
{isSaving ? (
<ActivityIndicator className="h-5 w-20" size={16} />
) : (
<Select
value={currentType}
onValueChange={(v) => handleTypeChange(camera, v)}
>
<SelectTrigger className="h-7 w-full max-w-[140px] text-xs">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="normal">
{t("cameraManagement.cameraType.normal", {
ns: "views/settings",
})}
</SelectItem>
<SelectItem value="lpr">
{t("cameraManagement.cameraType.dedicatedLpr", {
ns: "views/settings",
})}
</SelectItem>
</SelectContent>
</Select>
)}
</div>
);
})}
</div>
<p className="text-sm text-muted-foreground md:hidden">
{t("cameraManagement.cameraType.description", {
ns: "views/settings",
})}
</p>
<div className="flex items-center text-sm text-primary md:hidden">
<Link
to={getLocaleDocUrl(
"configuration/license_plate_recognition#dedicated-lpr-cameras",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
</div>
</SettingsGroupCard>
);
}
type ProfileCameraEnableSectionProps = {
profileState: ProfileState;
cameras: string[];