Compare commits

...

8 Commits

Author SHA1 Message Date
mathieu-d
802493ce6c
Merge 0f3dd097ec into 01a7ec1060 2026-05-01 04:05:36 +10:00
Nicolas Mowen
01a7ec1060
Miscellaneous fixes (#23044)
* Move openai specific workaround so it doesn't apply to other providers

* Fix gemini tool calling

* Improve efficiency of frame listing for previews

* debug replay fixes

- initial selection without changing the radio button in the dialog would select 1 hour (rather than 1 minute)
- use CLIPS_DIR instead of CACHE_DIR so that longer replay clips don't cause tmpfs cache overflows

* don't re-render the tracking details overlay on every video time tick

* change pinned to planned

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2026-04-30 12:53:34 -05:00
Josh Hawkins
95b5b89ed9
Miscellaneous fixes (#23032)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
* ensure embeddings process restarts after maintainer thread crash

* add docs link to media sync settings

* fix color

Co-authored-by: Copilot <copilot@github.com>

* match link color with other sections

* ensure recording staleness threshold scales with segment_time

* docs tweak

* Fix llama.cpp media marker

* Fix gemini tools call

---------

Co-authored-by: Copilot <copilot@github.com>
Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2026-04-29 16:20:19 -06:00
matieu-d
0f3dd097ec Prepare for pull request. Remove specific configurations 2026-04-17 22:25:46 +02:00
matieu-d
2a4d7e4766 Prepare for pull request. Remove specific configurations 2026-04-14 23:14:31 +02:00
matieu-d
46415ffeb5 Add Hailo-10H detector configuration to global.json 2026-04-14 22:54:58 +02:00
matieu-d
e35ab0b8a1 Add support of temperature reading for hailo 10H 2026-04-14 22:54:58 +02:00
matieu-d
837373547d H10 support patch 2026-04-14 22:54:58 +02:00
27 changed files with 647 additions and 71 deletions

View File

@ -26,7 +26,7 @@ _Please read the [contributing guidelines](https://github.com/blakeblackshear/fr
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to discussion with maintainers (**required** for large/pinned features):
- Link to discussion with maintainers (**required** for any large or "planned" features):
## For new features

View File

@ -19,8 +19,8 @@ jobs:
days-before-stale: 30
days-before-close: 3
exempt-draft-pr: true
exempt-issue-labels: "pinned,security"
exempt-pr-labels: "pinned,security,dependencies"
exempt-issue-labels: "planned,security"
exempt-pr-labels: "planned,security,dependencies"
operations-per-run: 120
- name: Print outputs
env:

View File

@ -12,7 +12,7 @@ If you've found a bug and want to fix it, go for it. Link to the relevant issue
Every new feature adds scope that the maintainers must test, maintain, and support long-term. Before writing code for a new feature:
1. **Check for existing discussion.** Search [feature requests](https://github.com/blakeblackshear/frigate/issues) and [discussions](https://github.com/blakeblackshear/frigate/discussions) to see if it's been proposed or discussed. Pinned feature requests are on our radar — we plan to get to them, but we don't maintain a public roadmap or timeline. Check in with us first if you have interest in contributing to one.
1. **Check for existing discussion.** Search [feature requests](https://github.com/blakeblackshear/frigate/issues) and [discussions](https://github.com/blakeblackshear/frigate/discussions) to see if it's been proposed or discussed. Feature requests tagged with "planned" are on our radar — we plan to get to them, but we don't maintain a public roadmap or timeline. Check in with us first if you have interest in contributing to one.
2. **Start a discussion or feature request first.** This helps ensure your idea aligns with Frigate's direction before you invest time building it. Community interest in a feature request helps us gauge demand, though a great idea is a great idea even without a crowd behind it.
3. **Be open to "no".** We try to be thoughtful about what we take on, and sometimes that means saying no to good code if the feature isn't the right fit for the project. These calls are sometimes subjective, and we won't always get them right. We're happy to discuss and reconsider.

View File

@ -21,6 +21,13 @@ local: version
--tag frigate:latest \
--load
localh10: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg HAILORT_VERSION=5.1.1 \
--build-arg HAILORT_GIT_REPO=mathieu-d/hailort \
--tag frigate:latest \
--load
debug: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg DEBUG=true \

View File

@ -12,6 +12,11 @@ services:
build:
context: .
dockerfile: docker/main/Dockerfile
# Use args to specify hailort version and location
# args:
# HAILORT_VERSION: "5.1.1"
# HAILORT_GIT_REPO: "mathieu-d/hailort"
# Use target devcontainer-trt for TensorRT dev
target: devcontainer
cache_from:
@ -29,6 +34,7 @@ services:
# devices:
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
volumes:
- .:/workspace/frigate:cached
- ./web/dist:/opt/frigate/web:cached

View File

@ -0,0 +1,7 @@
#!/bin/bash
# Update package list and install hailo driver version 5.1.1 for Hailo-10H
sudo apt update
sudo apt install -y hailo-h10-all=5.1.1

View File

@ -157,6 +157,8 @@ FROM base AS wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
ARG DEBUG=false
ARG HAILORT_VERSION=4.21.0
ARG HAILORT_GIT_REPO=frigate-nvr/hailort
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \

View File

@ -2,13 +2,11 @@
set -euxo pipefail
hailo_version="4.21.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64"
fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
wget -qO- "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-${HAILORT_VERSION}-cp311-cp311-linux_${arch}.whl"

View File

@ -171,7 +171,7 @@ When choosing images to include in the face training set it is recommended to al
- If it is difficult to make out details in a persons face it will not be helpful in training.
- Avoid images with extreme under/over-exposure.
- Avoid blurry / pixelated images.
- Avoid training on infrared (gray-scale). The models are trained on color images and will be able to extract features from gray-scale images.
- Avoid training on infrared (gray-scale). The models are trained on color images and will not be able to extract features from gray-scale images.
- Using images of people wearing hats / sunglasses may confuse the model.
- Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid over-fitting.

View File

@ -1368,12 +1368,17 @@ def preview_gif(
file_start = f"preview_{camera_name}-"
start_file = f"{file_start}{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}{end_ts}.{PREVIEW_FRAME_TYPE}"
camera_files = [
entry.name
for entry in os.scandir(preview_dir)
if entry.name.startswith(file_start)
]
camera_files.sort()
selected_previews = []
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
for file in camera_files:
if file < start_file:
continue
@ -1550,12 +1555,17 @@ def preview_mp4(
file_start = f"preview_{camera_name}-"
start_file = f"{file_start}{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}{end_ts}.{PREVIEW_FRAME_TYPE}"
camera_files = [
entry.name
for entry in os.scandir(preview_dir)
if entry.name.startswith(file_start)
]
camera_files.sort()
selected_previews = []
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
for file in camera_files:
if file < start_file:
continue

View File

@ -148,12 +148,17 @@ def get_preview_frames_from_cache(camera_name: str, start_ts: float, end_ts: flo
file_start = f"preview_{camera_name}-"
start_file = f"{file_start}{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}{end_ts}.{PREVIEW_FRAME_TYPE}"
camera_files = [
entry.name
for entry in os.scandir(preview_dir)
if entry.name.startswith(file_start)
]
camera_files.sort()
selected_previews = []
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
for file in camera_files:
if file < start_file:
continue

View File

@ -15,7 +15,7 @@ TRIGGER_DIR = f"{CLIPS_DIR}/triggers"
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache"
REPLAY_CAMERA_PREFIX = "_replay_"
REPLAY_DIR = os.path.join(CACHE_DIR, "replay")
REPLAY_DIR = os.path.join(CLIPS_DIR, "replay")
PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"

View File

@ -366,12 +366,17 @@ class ReviewDescriptionProcessor(PostProcessorApi):
file_start = f"preview_{camera}-"
start_file = f"{file_start}{start_time}.webp"
end_file = f"{file_start}{end_time}.webp"
camera_files = [
entry.name
for entry in os.scandir(preview_dir)
if entry.name.startswith(file_start)
]
camera_files.sort()
all_frames: list[str] = []
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
for file in camera_files:
if file < start_file:
if len(all_frames):
all_frames[0] = os.path.join(preview_dir, file)

View File

@ -0,0 +1,415 @@
import logging
import os
import subprocess
import threading
import urllib.request
from functools import partial
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from pydantic import ConfigDict, Field
from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
)
from frigate.object_detection.util import RequestStore, ResponseStore
logger = logging.getLogger(__name__)
# ----------------- Utility Functions ----------------- #
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
"""
Resize an image with unchanged aspect ratio using padding.
Assumes input image shape is (H, W, 3).
"""
if image.ndim == 4 and image.shape[0] == 1:
image = image[0]
h, w = image.shape[:2]
scale = min(model_w / w, model_h / h)
new_w, new_h = int(w * scale), int(h * scale)
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
x_offset = (model_w - new_w) // 2
y_offset = (model_h - new_h) // 2
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
resized_image
)
return padded_image
# ----------------- Global Constants ----------------- #
DETECTOR_KEY = "hailo10h"
ARCH = None
H10H_DEFAULT_MODEL = "yolov6n.hef"
H10H_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v5.2.0/hailo10h/yolov6n.hef"
def detect_hailo_arch():
try:
result = subprocess.run(
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
)
if result.returncode != 0:
logger.error(f"Inference error: {result.stderr}")
return None
for line in result.stdout.split("\n"):
if "Device Architecture" in line:
if "HAILO10H" in line:
return "hailo10h"
logger.error("Inference error: Could not determine Hailo architecture.")
return None
except Exception as e:
logger.error(f"Inference error: {e}")
return None
# ----------------- HailoAsyncInference Class ----------------- #
class HailoAsyncInference:
def __init__(
self,
hef_path: str,
input_store: RequestStore,
output_store: ResponseStore,
batch_size: int = 1,
input_type: Optional[str] = None,
output_type: Optional[Dict[str, str]] = None,
send_original_frame: bool = False,
) -> None:
# when importing hailo it activates the driver
# which leaves processes running even though it may not be used.
try:
from hailo_platform import (
HEF,
FormatType,
HailoSchedulingAlgorithm,
VDevice,
)
except ModuleNotFoundError:
pass
self.input_store = input_store
self.output_store = output_store
params = VDevice.create_params()
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
self.hef = HEF(hef_path)
self.target = VDevice(params)
self.infer_model = self.target.create_infer_model(hef_path)
self.infer_model.set_batch_size(batch_size)
if input_type is not None:
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
if output_type is not None:
for output_name, output_type in output_type.items():
self.infer_model.output(output_name).set_format_type(
getattr(FormatType, output_type)
)
self.output_type = output_type
self.send_original_frame = send_original_frame
def callback(
self,
completion_info,
bindings_list: List,
input_batch: List,
request_ids: List[int],
):
if completion_info.exception:
logger.error(f"Inference error: {completion_info.exception}")
else:
for i, bindings in enumerate(bindings_list):
if len(bindings._output_names) == 1:
result = bindings.output().get_buffer()
else:
result = {
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
for name in bindings._output_names
}
self.output_store.put(request_ids[i], (input_batch[i], result))
def _create_bindings(self, configured_infer_model) -> object:
if self.output_type is None:
output_buffers = {
output_info.name: np.empty(
self.infer_model.output(output_info.name).shape,
dtype=getattr(
np, str(output_info.format.type).split(".")[1].lower()
),
)
for output_info in self.hef.get_output_vstream_infos()
}
else:
output_buffers = {
name: np.empty(
self.infer_model.output(name).shape,
dtype=getattr(np, self.output_type[name].lower()),
)
for name in self.output_type
}
return configured_infer_model.create_bindings(output_buffers=output_buffers)
def get_input_shape(self) -> Tuple[int, ...]:
return self.hef.get_input_vstream_infos()[0].shape
def run(self) -> None:
job = None
with self.infer_model.configure() as configured_infer_model:
while True:
batch_data = self.input_store.get()
if batch_data is None:
break
request_id, frame_data = batch_data
preprocessed_batch = [frame_data]
request_ids = [request_id]
input_batch = preprocessed_batch # non-send_original_frame mode
bindings_list = []
for frame in preprocessed_batch:
bindings = self._create_bindings(configured_infer_model)
bindings.input().set_buffer(np.array(frame))
bindings_list.append(bindings)
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
job = configured_infer_model.run_async(
bindings_list,
partial(
self.callback,
input_batch=input_batch,
request_ids=request_ids,
bindings_list=bindings_list,
),
)
if job is not None:
job.wait(100)
# ----------------- HailoDetector Class ----------------- #
class HailoDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: "HailoDetectorConfig"):
global ARCH
ARCH = detect_hailo_arch()
self.cache_dir = MODEL_CACHE_DIR
self.device_type = detector_config.device
self.model_height = (
detector_config.model.height
if hasattr(detector_config.model, "height")
else None
)
self.model_width = (
detector_config.model.width
if hasattr(detector_config.model, "width")
else None
)
self.model_type = (
detector_config.model.model_type
if hasattr(detector_config.model, "model_type")
else None
)
self.tensor_format = (
detector_config.model.input_tensor
if hasattr(detector_config.model, "input_tensor")
else None
)
self.pixel_format = (
detector_config.model.input_pixel_format
if hasattr(detector_config.model, "input_pixel_format")
else None
)
self.input_dtype = (
detector_config.model.input_dtype
if hasattr(detector_config.model, "input_dtype")
else None
)
self.output_type = "FLOAT32"
self.set_path_and_url(detector_config.model.path)
self.working_model_path = self.check_and_prepare()
self.batch_size = 1
self.input_store = RequestStore()
self.response_store = ResponseStore()
try:
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
self.inference_engine = HailoAsyncInference(
self.working_model_path,
self.input_store,
self.response_store,
self.batch_size,
)
self.input_shape = self.inference_engine.get_input_shape()
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
self.inference_thread = threading.Thread(
target=self.inference_engine.run, daemon=True
)
self.inference_thread.start()
except Exception as e:
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
raise
def set_path_and_url(self, path: str = None):
if not path:
self.model_path = None
self.url = None
return
if self.is_url(path):
self.url = path
self.model_path = None
else:
self.model_path = path
self.url = None
def is_url(self, url: str) -> bool:
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("www.")
)
@staticmethod
def extract_model_name(path: str = None, url: str = None) -> str:
if path and path.endswith(".hef"):
return os.path.basename(path)
elif url and url.endswith(".hef"):
return os.path.basename(url)
else:
return H10H_DEFAULT_MODEL
@staticmethod
def download_model(url: str, destination: str):
if not url.endswith(".hef"):
raise ValueError("Invalid model URL. Only .hef files are supported.")
try:
urllib.request.urlretrieve(url, destination)
logger.debug(f"Downloaded model to {destination}")
except Exception as e:
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
def check_and_prepare(self) -> str:
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
model_name = self.extract_model_name(self.model_path, self.url)
cached_model_path = os.path.join(self.cache_dir, model_name)
if not self.model_path and not self.url:
if os.path.exists(cached_model_path):
logger.debug(f"Model found in cache: {cached_model_path}")
return cached_model_path
else:
logger.debug(f"Downloading default model: {model_name}")
self.download_model(H10H_DEFAULT_URL, cached_model_path)
elif self.url:
logger.debug(f"Downloading model from URL: {self.url}")
self.download_model(self.url, cached_model_path)
elif self.model_path:
if os.path.exists(self.model_path):
logger.debug(f"Using existing model at: {self.model_path}")
return self.model_path
else:
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
return cached_model_path
def detect_raw(self, tensor_input):
tensor_input = self.preprocess(tensor_input)
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
tensor_input = np.expand_dims(tensor_input, axis=0)
request_id = self.input_store.put(tensor_input)
try:
_, infer_results = self.response_store.get(request_id, timeout=1.0)
except TimeoutError:
logger.error(
f"Timeout waiting for inference results for request {request_id}"
)
if not self.inference_thread.is_alive():
raise RuntimeError(
"HailoRT inference thread has stopped, restart required."
)
return np.zeros((20, 6), dtype=np.float32)
if isinstance(infer_results, list) and len(infer_results) == 1:
infer_results = infer_results[0]
threshold = 0.4
all_detections = []
for class_id, detection_set in enumerate(infer_results):
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
continue
for det in detection_set:
if det.shape[0] < 5:
continue
score = float(det[4])
if score < threshold:
continue
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
if len(all_detections) == 0:
detections_array = np.zeros((20, 6), dtype=np.float32)
else:
detections_array = np.array(all_detections, dtype=np.float32)
if detections_array.shape[0] > 20:
detections_array = detections_array[:20, :]
elif detections_array.shape[0] < 20:
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
detections_array = np.vstack((detections_array, pad))
return detections_array
def preprocess(self, image):
if isinstance(image, np.ndarray):
processed = preprocess_tensor(
image, self.input_shape[1], self.input_shape[0]
)
return np.expand_dims(processed, axis=0)
else:
raise ValueError("Unsupported image format for preprocessing")
def close(self):
"""Properly shuts down the inference engine and releases the VDevice."""
logger.debug("[CLOSE] Closing HailoDetector")
try:
if hasattr(self, "inference_engine"):
if hasattr(self.inference_engine, "target"):
self.inference_engine.target.release()
logger.debug("Hailo VDevice released successfully")
except Exception as e:
logger.error(f"Failed to close Hailo device: {e}")
raise
def __del__(self):
"""Destructor to ensure cleanup when the object is deleted."""
self.close()
# ----------------- HailoDetectorConfig Class ----------------- #
class HailoDetectorConfig(BaseDetectorConfig):
"""Hailo10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
model_config = ConfigDict(
title="Hailo-10H",
)
type: Literal[DETECTOR_KEY]
device: str = Field(
default="PCIe",
title="Device Type",
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
)

View File

@ -4,6 +4,7 @@ import base64
import json
import logging
import os
import sys
import threading
from json.decoder import JSONDecodeError
from multiprocessing.synchronize import Event as MpEvent
@ -52,6 +53,14 @@ class EmbeddingProcess(FrigateProcess):
self.stop_event,
)
maintainer.start()
maintainer.join()
# If the maintainer thread exited but no shutdown was requested, it
# crashed. Surface as a non-zero exit so the watchdog restarts us
# instead of treating the silent thread death as a clean shutdown.
if not self.stop_event.is_set():
logger.error("Embeddings maintainer thread exited unexpectedly")
sys.exit(1)
class EmbeddingsContext:

View File

@ -153,9 +153,6 @@ Each line represents a detection state, not necessarily unique individuals. The
if "other_concerns" in schema.get("required", []):
schema["required"].remove("other_concerns")
# OpenAI strict mode requires additionalProperties: false on all objects
schema["additionalProperties"] = False
response_format = {
"type": "json_schema",
"json_schema": {

View File

@ -136,22 +136,44 @@ class GeminiClient(GenAIClient):
)
)
elif role == "assistant":
gemini_messages.append(
types.Content(
role="model", parts=[types.Part.from_text(text=content)]
)
)
parts: list[types.Part] = []
if content:
parts.append(types.Part.from_text(text=content))
for tc in msg.get("tool_calls") or []:
func = tc.get("function") or {}
tc_name = func.get("name") or ""
tc_args: Any = func.get("arguments")
if isinstance(tc_args, str):
try:
tc_args = json.loads(tc_args)
except (json.JSONDecodeError, TypeError):
tc_args = {}
if not isinstance(tc_args, dict):
tc_args = {}
if tc_name:
parts.append(
types.Part.from_function_call(
name=tc_name, args=tc_args
)
)
if not parts:
parts.append(types.Part.from_text(text=" "))
gemini_messages.append(types.Content(role="model", parts=parts))
elif role == "tool":
# Handle tool response
function_response = {
"name": msg.get("name", ""),
"response": content,
}
response_payload = (
content if isinstance(content, dict) else {"result": content}
)
gemini_messages.append(
types.Content(
role="function",
parts=[
types.Part.from_function_response(function_response) # type: ignore[misc,call-arg,arg-type]
types.Part.from_function_response(
name=msg.get("name")
or msg.get("tool_call_id")
or "",
response=response_payload,
)
],
)
)
@ -343,22 +365,44 @@ class GeminiClient(GenAIClient):
)
)
elif role == "assistant":
gemini_messages.append(
types.Content(
role="model", parts=[types.Part.from_text(text=content)]
)
)
parts: list[types.Part] = []
if content:
parts.append(types.Part.from_text(text=content))
for tc in msg.get("tool_calls") or []:
func = tc.get("function") or {}
tc_name = func.get("name") or ""
tc_args: Any = func.get("arguments")
if isinstance(tc_args, str):
try:
tc_args = json.loads(tc_args)
except (json.JSONDecodeError, TypeError):
tc_args = {}
if not isinstance(tc_args, dict):
tc_args = {}
if tc_name:
parts.append(
types.Part.from_function_call(
name=tc_name, args=tc_args
)
)
if not parts:
parts.append(types.Part.from_text(text=" "))
gemini_messages.append(types.Content(role="model", parts=parts))
elif role == "tool":
# Handle tool response
function_response = {
"name": msg.get("name", ""),
"response": content,
}
response_payload = (
content if isinstance(content, dict) else {"result": content}
)
gemini_messages.append(
types.Content(
role="function",
parts=[
types.Part.from_function_response(function_response) # type: ignore[misc,call-arg,arg-type]
types.Part.from_function_response(
name=msg.get("name")
or msg.get("tool_call_id")
or "",
response=response_payload,
)
],
)
)

View File

@ -44,6 +44,7 @@ class LlamaCppClient(GenAIClient):
_supports_tools: bool
_image_token_cache: dict[tuple[int, int], int]
_text_baseline_tokens: int | None
_media_marker: str
def _init_provider(self) -> str | None:
"""Initialize the client and query model metadata from the server."""
@ -56,6 +57,7 @@ class LlamaCppClient(GenAIClient):
self._supports_tools = False
self._image_token_cache = {}
self._text_baseline_tokens = None
self._media_marker = "<__media__>"
base_url = (
self.genai_config.base_url.rstrip("/")
@ -141,6 +143,13 @@ class LlamaCppClient(GenAIClient):
chat_caps = props.get("chat_template_caps", {})
self._supports_tools = chat_caps.get("supports_tools", False)
# Media marker for multimodal embeddings; the server randomizes this
# per startup unless LLAMA_MEDIA_MARKER is set, so we must read it
# from /props rather than hardcoding "<__media__>".
media_marker = props.get("media_marker")
if isinstance(media_marker, str) and media_marker:
self._media_marker = media_marker
logger.info(
"llama.cpp model '%s' initialized — context: %s, vision: %s, audio: %s, tools: %s",
configured_model,
@ -465,10 +474,11 @@ class LlamaCppClient(GenAIClient):
jpeg_bytes = _to_jpeg(img)
to_encode = jpeg_bytes if jpeg_bytes is not None else img
encoded = base64.b64encode(to_encode).decode("utf-8")
# prompt_string must contain <__media__> placeholder for image tokenization
# prompt_string must contain the server's media marker placeholder.
# The marker is randomized per server startup (read from /props).
content.append(
{
"prompt_string": "<__media__>\n",
"prompt_string": f"{self._media_marker}\n",
"multimodal_data": [encoded], # type: ignore[dict-item]
}
)

View File

@ -73,8 +73,17 @@ class OpenAIClient(GenAIClient):
**self.genai_config.runtime_options,
}
if response_format:
# OpenAI strict mode requires additionalProperties: false on the schema
if response_format.get("type") == "json_schema" and response_format.get(
"json_schema", {}
).get("strict"):
schema = response_format.get("json_schema", {}).get("schema")
if isinstance(schema, dict):
schema["additionalProperties"] = False
request_params["response_format"] = response_format
result = self.provider.chat.completions.create(**request_params)
if (
result is not None
and hasattr(result, "choices")

View File

@ -123,6 +123,15 @@ def get_detector_temperature(
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "hailo10h":
# Get temperatures for Hailo devices
hailo_temps = get_hailo_temps()
if hailo_temps:
hailo_device_names = sorted(hailo_temps.keys())
index = detector_index_by_type.get("hailo10h", 0)
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "rknn":
# Rockchip temperatures are handled by the GPU / NPU stats
# as there are not detector specific temperatures

View File

@ -24,7 +24,7 @@ from frigate.config.camera.updater import (
)
from frigate.const import PROCESS_PRIORITY_HIGH
from frigate.log import LogPipe
from frigate.util.builtin import EventsPerSecond
from frigate.util.builtin import EventsPerSecond, get_ffmpeg_arg_list
from frigate.util.ffmpeg import start_or_restart_ffmpeg, stop_ffmpeg
from frigate.util.image import (
FrameManager,
@ -34,6 +34,23 @@ from frigate.util.process import FrigateProcess
logger = logging.getLogger(__name__)
# all built-in record presets use this segment_time
DEFAULT_RECORD_SEGMENT_TIME = 10
def _get_record_segment_time(config: CameraConfig) -> int:
"""Extract -segment_time from the camera's record output args."""
record_args = get_ffmpeg_arg_list(config.ffmpeg.output_args.record)
if record_args and record_args[0].startswith("preset"):
return DEFAULT_RECORD_SEGMENT_TIME
try:
idx = record_args.index("-segment_time")
return int(record_args[idx + 1])
except (ValueError, IndexError):
return DEFAULT_RECORD_SEGMENT_TIME
def capture_frames(
ffmpeg_process: sp.Popen[Any],
@ -164,6 +181,12 @@ class CameraWatchdog(threading.Thread):
self.latest_cache_segment_time: float = 0
self.record_enable_time: datetime | None = None
# `valid` segments are published with the segment's start time, so the
# gap between consecutive publishes can reach 2 * segment_time. Pad the
# staleness threshold so it's never tighter than that worst case.
segment_time = _get_record_segment_time(self.config)
self.record_stale_threshold = max(120, 2 * segment_time + 30)
# Stall tracking (based on last processed frame)
self._stall_timestamps: deque[float] = deque()
self._stall_active: bool = False
@ -413,16 +436,17 @@ class CameraWatchdog(threading.Thread):
# ensure segments are still being created and that they have valid video data
# Skip checks during grace period to allow segments to start being created
stale_window = timedelta(seconds=self.record_stale_threshold)
cache_stale = not in_grace_period and now_utc > (
latest_cache_dt + timedelta(seconds=120)
latest_cache_dt + stale_window
)
valid_stale = not in_grace_period and now_utc > (
latest_valid_dt + timedelta(seconds=120)
latest_valid_dt + stale_window
)
invalid_stale_condition = (
self.latest_invalid_segment_time > 0
and not in_grace_period
and now_utc > (latest_invalid_dt + timedelta(seconds=120))
and now_utc > (latest_invalid_dt + stale_window)
and self.latest_valid_segment_time
<= self.latest_invalid_segment_time
)
@ -439,7 +463,7 @@ class CameraWatchdog(threading.Thread):
)
self.logger.error(
f"{reason} for {self.config.name} in the last 120s. Restarting the ffmpeg record process..."
f"{reason} for {self.config.name} in the last {self.record_stale_threshold}s. Restarting the ffmpeg record process..."
)
p["process"] = start_or_restart_ffmpeg(
p["cmd"],

View File

@ -28,6 +28,7 @@ class MonitoredProcess:
restart_timestamps: deque[float] = field(
default_factory=lambda: deque(maxlen=MAX_RESTARTS)
)
clean_exit_logged: bool = False
def is_restarting_too_fast(self, now: float) -> bool:
while (
@ -72,7 +73,9 @@ class FrigateWatchdog(threading.Thread):
exitcode = entry.process.exitcode
if exitcode == 0:
logger.info("Process %s exited cleanly, not restarting", entry.name)
if not entry.clean_exit_logged:
logger.info("Process %s exited cleanly, not restarting", entry.name)
entry.clean_exit_logged = True
return
logger.warning(

View File

@ -397,6 +397,14 @@
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"hailo10h": {
"label": "Hailo-10H",
"description": "Hailo-10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
"device": {
"label": "Device Type",
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"memryx": {
"label": "MemryX",
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",

View File

@ -391,10 +391,8 @@ export default function MobileReviewSettingsDrawer({
className="flex w-full items-center justify-center gap-2"
aria-label={t("title", { ns: "views/replay" })}
onClick={() => {
const now = new Date(latestTime * 1000);
now.setHours(now.getHours() - 1);
setDebugReplayRange({
after: now.getTime() / 1000,
after: latestTime - 60,
before: latestTime,
});
setSelectedReplayOption("1");
@ -541,11 +539,9 @@ export default function MobileReviewSettingsDrawer({
return;
}
const hours = parseInt(option);
const minutes = parseInt(option, 10);
const end = latestTime;
const now = new Date(end * 1000);
now.setHours(now.getHours() - hours);
setDebugReplayRange({ after: now.getTime() / 1000, before: end });
setDebugReplayRange({ after: end - minutes * 60, before: end });
};
content = (

View File

@ -396,7 +396,6 @@ export default function HlsVideoPlayer({
}}
>
<ObjectTrackOverlay
key={`overlay-${currentTime}`}
camera={camera}
showBoundingBoxes={!isPlaying}
currentTime={currentTime}

View File

@ -728,10 +728,8 @@ export function RecordingView({
setShareTimestampOpen(true);
}}
onDebugReplayClick={() => {
const now = new Date(timeRange.before * 1000);
now.setHours(now.getHours() - 1);
setDebugReplayRange({
after: now.getTime() / 1000,
after: timeRange.before - 60,
before: timeRange.before,
});
setDebugReplayMode("select");

View File

@ -10,13 +10,16 @@ import axios from "axios";
import { toast } from "sonner";
import { useJobStatus } from "@/api/ws";
import { Switch } from "@/components/ui/switch";
import { LuCheck, LuX } from "react-icons/lu";
import { LuCheck, LuExternalLink, LuX } from "react-icons/lu";
import { cn } from "@/lib/utils";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { MediaSyncResults, MediaSyncStats } from "@/types/ws";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { Link } from "react-router-dom";
export default function MediaSyncSettingsView() {
const { t } = useTranslation("views/settings");
const { getLocaleDocUrl } = useDocDomain();
const [selectedMediaTypes, setSelectedMediaTypes] = useState<string[]>([
"all",
]);
@ -109,13 +112,25 @@ export default function MediaSyncSettingsView() {
<Heading as="h4" className="mb-2 hidden md:block">
{t("maintenance.sync.title")}
</Heading>
<div className="max-w-6xl">
<div className="mb-5 mt-2 flex max-w-5xl flex-col gap-2 text-sm text-muted-foreground">
<p>{t("maintenance.sync.desc")}</p>
<div className="flex items-center text-primary-variant">
<Link
to={getLocaleDocUrl(
"configuration/record#syncing-media-files-with-disk",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
</div>
<div className="space-y-6">
{/* Media Types Selection */}
<div>