Improvements

This commit is contained in:
Nicolas Mowen 2026-02-12 19:35:40 -07:00
parent c9591225ba
commit d5aa2a0341
5 changed files with 171 additions and 16 deletions

View File

@ -3,7 +3,7 @@
import base64
import json
import logging
from datetime import datetime, timezone
from datetime import datetime
from typing import Any, Dict, List, Optional
import cv2
@ -20,6 +20,7 @@ from frigate.api.defs.request.chat_body import ChatCompletionRequest
from frigate.api.defs.response.chat_response import (
ChatCompletionResponse,
ChatMessageResponse,
ToolCall,
)
from frigate.api.defs.tags import Tags
from frigate.api.event import events
@ -29,6 +30,29 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.chat])
def _format_events_with_local_time(events_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %H:%M:%S %Z")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %H:%M:%S %Z")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
class ToolExecuteRequest(BaseModel):
"""Request model for tool execution."""
@ -394,7 +418,7 @@ async def chat_completion(
tools = get_tool_definitions()
conversation = []
current_datetime = datetime.now(timezone.utc)
current_datetime = datetime.now()
current_date_str = current_datetime.strftime("%Y-%m-%d")
current_time_str = current_datetime.strftime("%H:%M:%S %Z")
@ -429,9 +453,10 @@ async def chat_completion(
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
Current date and time: {current_date_str} at {current_time_str} (UTC)
Current server local date and time: {current_date_str} at {current_time_str}
When users ask questions about "today", "yesterday", "this week", etc., use the current date above as reference.
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}"""
@ -471,6 +496,7 @@ Always be accurate with time calculations based on the current date provided.{ca
conversation.append(msg_dict)
tool_iterations = 0
tool_calls: List[ToolCall] = []
max_iterations = body.max_tool_iterations
logger.debug(
@ -517,8 +543,8 @@ Always be accurate with time calculations based on the current date provided.{ca
]
conversation.append(assistant_message)
tool_calls = response.get("tool_calls")
if not tool_calls:
pending_tool_calls = response.get("tool_calls")
if not pending_tool_calls:
logger.debug(
f"Chat completion finished with final answer (iterations: {tool_iterations})"
)
@ -531,6 +557,7 @@ Always be accurate with time calculations based on the current date provided.{ca
),
finish_reason=response.get("finish_reason", "stop"),
tool_iterations=tool_iterations,
tool_calls=tool_calls,
).model_dump(),
)
@ -538,11 +565,11 @@ Always be accurate with time calculations based on the current date provided.{ca
tool_iterations += 1
logger.debug(
f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): "
f"{len(tool_calls)} tool(s) to execute"
f"{len(pending_tool_calls)} tool(s) to execute"
)
tool_results = []
for tool_call in tool_calls:
for tool_call in pending_tool_calls:
tool_name = tool_call["name"]
tool_args = tool_call["arguments"]
tool_call_id = tool_call["id"]
@ -556,6 +583,12 @@ Always be accurate with time calculations based on the current date provided.{ca
tool_name, tool_args, request, allowed_cameras
)
# Add local time fields to search_objects results so the LLM doesn't hallucinate timestamps
if tool_name == "search_objects" and isinstance(
tool_result, list
):
tool_result = _format_events_with_local_time(tool_result)
if isinstance(tool_result, dict):
result_content = json.dumps(tool_result)
result_summary = tool_result
@ -573,6 +606,12 @@ Always be accurate with time calculations based on the current date provided.{ca
f"Tool {tool_name} (id: {tool_call_id}) completed successfully. "
f"Result: {json.dumps(result_summary, indent=2)}"
)
elif isinstance(tool_result, list):
result_content = json.dumps(tool_result)
logger.debug(
f"Tool {tool_name} (id: {tool_call_id}) completed successfully. "
f"Result: {len(tool_result)} item(s)"
)
elif isinstance(tool_result, str):
result_content = tool_result
logger.debug(
@ -586,6 +625,13 @@ Always be accurate with time calculations based on the current date provided.{ca
f"Result type: {type(tool_result).__name__}"
)
tool_calls.append(
ToolCall(
name=tool_name,
arguments=tool_args or {},
response=result_content,
)
)
tool_results.append(
{
"role": "tool",
@ -599,6 +645,13 @@ Always be accurate with time calculations based on the current date provided.{ca
exc_info=True,
)
error_content = json.dumps({"error": "Tool execution failed"})
tool_calls.append(
ToolCall(
name=tool_name,
arguments=tool_args or {},
response=error_content,
)
)
tool_results.append(
{
"role": "tool",
@ -628,6 +681,7 @@ Always be accurate with time calculations based on the current date provided.{ca
),
finish_reason="length",
tool_iterations=tool_iterations,
tool_calls=tool_calls,
).model_dump(),
)

View File

@ -5,8 +5,8 @@ from typing import Any, Optional
from pydantic import BaseModel, Field
class ToolCall(BaseModel):
"""A tool call from the LLM."""
class ToolCallInvocation(BaseModel):
"""A tool call requested by the LLM (before execution)."""
id: str = Field(description="Unique identifier for this tool call")
name: str = Field(description="Tool name to call")
@ -20,11 +20,24 @@ class ChatMessageResponse(BaseModel):
content: Optional[str] = Field(
default=None, description="Message content (None if tool calls present)"
)
tool_calls: Optional[list[ToolCall]] = Field(
tool_calls: Optional[list[ToolCallInvocation]] = Field(
default=None, description="Tool calls if LLM wants to call tools"
)
class ToolCall(BaseModel):
"""A tool that was executed during the completion, with its response."""
name: str = Field(description="Tool name that was called")
arguments: dict[str, Any] = Field(
default_factory=dict, description="Arguments passed to the tool"
)
response: str = Field(
default="",
description="The response or result returned from the tool execution",
)
class ChatCompletionResponse(BaseModel):
"""Response from chat completion."""
@ -35,3 +48,7 @@ class ChatCompletionResponse(BaseModel):
tool_iterations: int = Field(
default=0, description="Number of tool call iterations performed"
)
tool_calls: list[ToolCall] = Field(
default_factory=list,
description="List of tool calls that were executed during this completion",
)

View File

@ -1,5 +1,8 @@
{
"placeholder": "Ask anything...",
"error": "Something went wrong. Please try again.",
"processing": "Processing..."
"processing": "Processing...",
"toolsUsed": "Used: {{tools}}",
"showTools": "Show tools ({{count}})",
"hideTools": "Hide tools"
}

View File

@ -0,0 +1,66 @@
import { useState } from "react";
import { useTranslation } from "react-i18next";
import ReactMarkdown from "react-markdown";
import { Button } from "@/components/ui/button";
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "@/components/ui/collapsible";
export type ToolCall = {
name: string;
arguments?: Record<string, unknown>;
response?: string;
};
type AssistantMessageProps = {
content: string;
toolCalls?: ToolCall[];
};
export function AssistantMessage({
content,
toolCalls,
}: AssistantMessageProps) {
const { t } = useTranslation(["views/chat"]);
const [open, setOpen] = useState(false);
const hasToolCalls = toolCalls && toolCalls.length > 0;
return (
<div className="flex flex-col gap-2">
<ReactMarkdown>{content}</ReactMarkdown>
{hasToolCalls && (
<Collapsible open={open} onOpenChange={setOpen}>
<CollapsibleTrigger asChild>
<Button
variant="ghost"
size="sm"
className="h-auto py-1 text-xs text-muted-foreground hover:text-foreground"
>
{open
? t("hideTools")
: t("showTools", { count: toolCalls.length })}
</Button>
</CollapsibleTrigger>
<CollapsibleContent>
<ul className="mt-2 space-y-2 border-l-2 border-muted-foreground/30 pl-3">
{toolCalls.map((tc, idx) => (
<li key={idx} className="text-xs">
<span className="font-medium text-muted-foreground">
{tc.name}
</span>
{tc.response != null && tc.response !== "" && (
<pre className="mt-1 max-h-32 overflow-auto rounded bg-muted/50 p-2 text-[10px]">
{tc.response}
</pre>
)}
</li>
))}
</ul>
</CollapsibleContent>
</Collapsible>
)}
</div>
);
}

View File

@ -4,9 +4,16 @@ import { FaArrowUpLong } from "react-icons/fa6";
import { useTranslation } from "react-i18next";
import { useState, useCallback } from "react";
import axios from "axios";
import ReactMarkdown from "react-markdown";
import {
AssistantMessage,
type ToolCall,
} from "@/components/chat/AssistantMessage";
type ChatMessage = { role: "user" | "assistant"; content: string };
type ChatMessage = {
role: "user" | "assistant";
content: string;
toolCalls?: ToolCall[];
};
export default function ChatPage() {
const { t } = useTranslation(["views/chat"]);
@ -32,12 +39,17 @@ export default function ChatPage() {
}));
const { data } = await axios.post<{
message: { role: string; content: string | null };
tool_calls?: ToolCall[];
}>("chat/completion", { messages: apiMessages });
const content = data.message?.content ?? "";
setMessages((prev) => [
...prev,
{ role: "assistant", content: content || " " },
{
role: "assistant",
content: content || " ",
toolCalls: data.tool_calls?.length ? data.tool_calls : undefined,
},
]);
} catch {
setError(t("error"));
@ -59,7 +71,10 @@ export default function ChatPage() {
}
>
{msg.role === "assistant" ? (
<ReactMarkdown>{msg.content}</ReactMarkdown>
<AssistantMessage
content={msg.content}
toolCalls={msg.toolCalls}
/>
) : (
msg.content
)}