mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-03-10 02:29:19 +03:00
Compare commits
4 Commits
ca4485754e
...
dd22831422
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dd22831422 | ||
|
|
c2e667c0dd | ||
|
|
c9bd907721 | ||
|
|
a2c43ad8bb |
@ -589,23 +589,38 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
request.app.frigate_config = config
|
||||
request.app.genai_manager.update_config(config)
|
||||
|
||||
if request.app.stats_emitter is not None:
|
||||
request.app.stats_emitter.config = config
|
||||
|
||||
if body.update_topic:
|
||||
if body.update_topic.startswith("config/cameras/"):
|
||||
_, _, camera, field = body.update_topic.split("/")
|
||||
|
||||
if field == "add":
|
||||
settings = config.cameras[camera]
|
||||
elif field == "remove":
|
||||
settings = old_config.cameras[camera]
|
||||
if camera == "*":
|
||||
# Wildcard: fan out update to all cameras
|
||||
enum_value = CameraConfigUpdateEnum[field]
|
||||
for camera_name in config.cameras:
|
||||
settings = config.get_nested_object(
|
||||
f"config/cameras/{camera_name}/{field}"
|
||||
)
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(enum_value, camera_name),
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
if field == "add":
|
||||
settings = config.cameras[camera]
|
||||
elif field == "remove":
|
||||
settings = old_config.cameras[camera]
|
||||
else:
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(
|
||||
CameraConfigUpdateEnum[field], camera
|
||||
),
|
||||
settings,
|
||||
)
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(
|
||||
CameraConfigUpdateEnum[field], camera
|
||||
),
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
# Generic handling for global config updates
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
|
||||
@ -1281,6 +1281,13 @@ def preview_gif(
|
||||
else:
|
||||
# need to generate from existing images
|
||||
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||
|
||||
if not os.path.isdir(preview_dir):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Preview not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
file_start = f"preview_{camera_name}"
|
||||
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
@ -1456,6 +1463,13 @@ def preview_mp4(
|
||||
else:
|
||||
# need to generate from existing images
|
||||
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||
|
||||
if not os.path.isdir(preview_dir):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Preview not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
file_start = f"preview_{camera_name}"
|
||||
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
|
||||
@ -242,6 +242,14 @@ class CameraConfig(FrigateBaseModel):
|
||||
def create_ffmpeg_cmds(self):
|
||||
if "_ffmpeg_cmds" in self:
|
||||
return
|
||||
self._build_ffmpeg_cmds()
|
||||
|
||||
def recreate_ffmpeg_cmds(self):
|
||||
"""Force regeneration of ffmpeg commands from current config."""
|
||||
self._build_ffmpeg_cmds()
|
||||
|
||||
def _build_ffmpeg_cmds(self):
|
||||
"""Build ffmpeg commands from the current ffmpeg config."""
|
||||
ffmpeg_cmds = []
|
||||
for ffmpeg_input in self.ffmpeg.inputs:
|
||||
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
|
||||
|
||||
@ -17,6 +17,7 @@ class CameraConfigUpdateEnum(str, Enum):
|
||||
birdseye = "birdseye"
|
||||
detect = "detect"
|
||||
enabled = "enabled"
|
||||
ffmpeg = "ffmpeg"
|
||||
motion = "motion" # includes motion and motion masks
|
||||
notifications = "notifications"
|
||||
objects = "objects"
|
||||
@ -91,6 +92,9 @@ class CameraConfigUpdateSubscriber:
|
||||
|
||||
if update_type == CameraConfigUpdateEnum.audio:
|
||||
config.audio = updated_config
|
||||
elif update_type == CameraConfigUpdateEnum.ffmpeg:
|
||||
config.ffmpeg = updated_config
|
||||
config.recreate_ffmpeg_cmds()
|
||||
elif update_type == CameraConfigUpdateEnum.audio_transcription:
|
||||
config.audio_transcription = updated_config
|
||||
elif update_type == CameraConfigUpdateEnum.birdseye:
|
||||
|
||||
@ -12,6 +12,7 @@ from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.classification import LicensePlateRecognitionConfig
|
||||
from frigate.data_processing.common.license_plate.mixin import (
|
||||
WRITE_DEBUG_IMAGES,
|
||||
LicensePlateProcessingMixin,
|
||||
@ -47,6 +48,11 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
super().__init__(config, metrics, model_runner)
|
||||
|
||||
def update_config(self, lpr_config: LicensePlateRecognitionConfig) -> None:
|
||||
"""Update LPR config at runtime."""
|
||||
self.lpr_config = lpr_config
|
||||
logger.debug("LPR config updated dynamically")
|
||||
|
||||
def process_data(
|
||||
self, data: dict[str, Any], data_type: PostProcessDataEnum
|
||||
) -> None:
|
||||
|
||||
@ -19,6 +19,7 @@ from frigate.comms.event_metadata_updater import (
|
||||
)
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.classification import FaceRecognitionConfig
|
||||
from frigate.const import FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.data_processing.common.face.model import (
|
||||
ArcFaceRecognizer,
|
||||
@ -95,6 +96,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
self.recognizer.build()
|
||||
|
||||
def update_config(self, face_config: FaceRecognitionConfig) -> None:
|
||||
"""Update face recognition config at runtime."""
|
||||
self.face_config = face_config
|
||||
logger.debug("Face recognition config updated dynamically")
|
||||
|
||||
def __download_models(self, path: str) -> None:
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
|
||||
@ -8,6 +8,7 @@ import numpy as np
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.classification import LicensePlateRecognitionConfig
|
||||
from frigate.data_processing.common.license_plate.mixin import (
|
||||
LicensePlateProcessingMixin,
|
||||
)
|
||||
@ -40,6 +41,11 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
|
||||
self.camera_current_cars: dict[str, list[str]] = {}
|
||||
super().__init__(config, metrics)
|
||||
|
||||
def update_config(self, lpr_config: LicensePlateRecognitionConfig) -> None:
|
||||
"""Update LPR config at runtime."""
|
||||
self.lpr_config = lpr_config
|
||||
logger.debug("LPR config updated dynamically")
|
||||
|
||||
def process_frame(
|
||||
self,
|
||||
obj_data: dict[str, Any],
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
"""Base runner implementation for ONNX models."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
@ -10,6 +11,11 @@ from typing import Any
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
try:
|
||||
import zmq as _zmq
|
||||
except ImportError:
|
||||
_zmq = None
|
||||
|
||||
from frigate.util.model import get_ort_providers
|
||||
from frigate.util.rknn_converter import auto_convert_model, is_rknn_compatible
|
||||
|
||||
@ -546,12 +552,213 @@ class RKNNModelRunner(BaseModelRunner):
|
||||
pass
|
||||
|
||||
|
||||
class ZmqEmbeddingRunner(BaseModelRunner):
|
||||
"""Send preprocessed embedding tensors over ZMQ to an external inference service.
|
||||
|
||||
This enables offloading ONNX embedding inference (e.g. ArcFace face recognition,
|
||||
Jina semantic search) to a native host process that has access to hardware
|
||||
acceleration unavailable inside Docker, such as CoreML/ANE on Apple Silicon.
|
||||
|
||||
Protocol:
|
||||
- Request is a multipart message: [ header_json_bytes, tensor_bytes ]
|
||||
where header is:
|
||||
{
|
||||
"shape": List[int], # e.g. [1, 3, 112, 112]
|
||||
"dtype": str, # numpy dtype, e.g. "float32"
|
||||
"model_type": str, # e.g. "arcface"
|
||||
}
|
||||
tensor_bytes are the raw C-order bytes of the input tensor.
|
||||
|
||||
- Response is either:
|
||||
a) Multipart [ header_json_bytes, embedding_bytes ] with header specifying
|
||||
shape and dtype of the returned embedding; or
|
||||
b) Single frame of raw float32 bytes (embedding vector, batch-first).
|
||||
|
||||
On timeout or error, a zero embedding is returned so the caller can degrade
|
||||
gracefully (the face will simply not be recognized for that frame).
|
||||
|
||||
Configuration example (face_recognition.device):
|
||||
face_recognition:
|
||||
enabled: true
|
||||
model_size: large
|
||||
device: "zmq://host.docker.internal:5556"
|
||||
"""
|
||||
|
||||
# Model type → primary input name (used to answer get_input_names())
|
||||
_INPUT_NAMES: dict[str, list[str]] = {}
|
||||
|
||||
# Model type → model input spatial width
|
||||
_INPUT_WIDTHS: dict[str, int] = {}
|
||||
|
||||
# Model type → embedding output dimensionality (used for zero-fallback shape)
|
||||
_OUTPUT_DIMS: dict[str, int] = {}
|
||||
|
||||
@classmethod
|
||||
def _init_model_maps(cls) -> None:
|
||||
"""Populate the model maps lazily to avoid circular imports at module load."""
|
||||
if cls._INPUT_NAMES:
|
||||
return
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
cls._INPUT_NAMES = {
|
||||
EnrichmentModelTypeEnum.arcface.value: ["data"],
|
||||
EnrichmentModelTypeEnum.facenet.value: ["data"],
|
||||
EnrichmentModelTypeEnum.jina_v1.value: ["pixel_values"],
|
||||
EnrichmentModelTypeEnum.jina_v2.value: ["pixel_values"],
|
||||
}
|
||||
cls._INPUT_WIDTHS = {
|
||||
EnrichmentModelTypeEnum.arcface.value: 112,
|
||||
EnrichmentModelTypeEnum.facenet.value: 160,
|
||||
EnrichmentModelTypeEnum.jina_v1.value: 224,
|
||||
EnrichmentModelTypeEnum.jina_v2.value: 224,
|
||||
}
|
||||
cls._OUTPUT_DIMS = {
|
||||
EnrichmentModelTypeEnum.arcface.value: 512,
|
||||
EnrichmentModelTypeEnum.facenet.value: 128,
|
||||
EnrichmentModelTypeEnum.jina_v1.value: 768,
|
||||
EnrichmentModelTypeEnum.jina_v2.value: 768,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str,
|
||||
model_type: str,
|
||||
request_timeout_ms: int = 60000,
|
||||
linger_ms: int = 0,
|
||||
):
|
||||
if _zmq is None:
|
||||
raise ImportError(
|
||||
"pyzmq is required for ZmqEmbeddingRunner. Install it with: pip install pyzmq"
|
||||
)
|
||||
self._init_model_maps()
|
||||
# "zmq://host:port" is the Frigate config sentinel; ZMQ sockets need "tcp://host:port"
|
||||
self._endpoint = endpoint.replace("zmq://", "tcp://", 1)
|
||||
self._model_type = model_type
|
||||
self._request_timeout_ms = request_timeout_ms
|
||||
self._linger_ms = linger_ms
|
||||
self._context = _zmq.Context()
|
||||
self._socket = None
|
||||
self._needs_reset = False
|
||||
self._lock = threading.Lock()
|
||||
self._create_socket()
|
||||
logger.info(
|
||||
f"ZmqEmbeddingRunner({model_type}): connected to {endpoint}"
|
||||
)
|
||||
|
||||
def _create_socket(self) -> None:
|
||||
if self._socket is not None:
|
||||
try:
|
||||
self._socket.close(linger=self._linger_ms)
|
||||
except Exception:
|
||||
pass
|
||||
self._socket = self._context.socket(_zmq.REQ)
|
||||
self._socket.setsockopt(_zmq.RCVTIMEO, self._request_timeout_ms)
|
||||
self._socket.setsockopt(_zmq.SNDTIMEO, self._request_timeout_ms)
|
||||
self._socket.setsockopt(_zmq.LINGER, self._linger_ms)
|
||||
self._socket.connect(self._endpoint)
|
||||
|
||||
def get_input_names(self) -> list[str]:
|
||||
return self._INPUT_NAMES.get(self._model_type, ["data"])
|
||||
|
||||
def get_input_width(self) -> int:
|
||||
return self._INPUT_WIDTHS.get(self._model_type, -1)
|
||||
|
||||
def run(self, inputs: dict[str, Any]) -> list[np.ndarray]:
|
||||
"""Send the primary input tensor over ZMQ and return the embedding.
|
||||
|
||||
For single-input models (ArcFace, FaceNet) the entire inputs dict maps to
|
||||
one tensor. For multi-input models only the first tensor is sent; those
|
||||
models are not yet supported for ZMQ offload.
|
||||
"""
|
||||
tensor_input = np.ascontiguousarray(next(iter(inputs.values())))
|
||||
batch_size = tensor_input.shape[0]
|
||||
|
||||
with self._lock:
|
||||
# Lazy reset: if a previous call errored, reset the socket now — before any
|
||||
# ZMQ operations — so we don't manipulate sockets inside an error handler where
|
||||
# Frigate's own ZMQ threads may be polling and could hit a libzmq assertion.
|
||||
# The lock ensures only one thread touches the socket at a time (ZMQ REQ
|
||||
# sockets are not thread-safe; concurrent calls from the reindex thread and
|
||||
# the normal embedding maintainer thread would corrupt the socket state).
|
||||
if self._needs_reset:
|
||||
self._reset_socket()
|
||||
self._needs_reset = False
|
||||
|
||||
try:
|
||||
header = {
|
||||
"shape": list(tensor_input.shape),
|
||||
"dtype": str(tensor_input.dtype.name),
|
||||
"model_type": self._model_type,
|
||||
}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
payload_bytes = memoryview(tensor_input.tobytes(order="C"))
|
||||
|
||||
self._socket.send_multipart([header_bytes, payload_bytes])
|
||||
reply_frames = self._socket.recv_multipart()
|
||||
return self._decode_response(reply_frames)
|
||||
|
||||
except _zmq.Again:
|
||||
logger.warning(
|
||||
f"ZmqEmbeddingRunner({self._model_type}): request timed out, will reset socket before next call"
|
||||
)
|
||||
self._needs_reset = True
|
||||
return [np.zeros((batch_size, self._get_output_dim()), dtype=np.float32)]
|
||||
except _zmq.ZMQError as exc:
|
||||
logger.error(f"ZmqEmbeddingRunner({self._model_type}) ZMQError: {exc}, will reset socket before next call")
|
||||
self._needs_reset = True
|
||||
return [np.zeros((batch_size, self._get_output_dim()), dtype=np.float32)]
|
||||
except Exception as exc:
|
||||
logger.error(f"ZmqEmbeddingRunner({self._model_type}) unexpected error: {exc}")
|
||||
return [np.zeros((batch_size, self._get_output_dim()), dtype=np.float32)]
|
||||
|
||||
def _reset_socket(self) -> None:
|
||||
try:
|
||||
self._create_socket()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _decode_response(self, frames: list[bytes]) -> list[np.ndarray]:
|
||||
try:
|
||||
if len(frames) >= 2:
|
||||
header = json.loads(frames[0].decode("utf-8"))
|
||||
shape = tuple(header.get("shape", []))
|
||||
dtype = np.dtype(header.get("dtype", "float32"))
|
||||
return [np.frombuffer(frames[1], dtype=dtype).reshape(shape)]
|
||||
elif len(frames) == 1:
|
||||
# Raw float32 bytes — reshape to (1, embedding_dim)
|
||||
arr = np.frombuffer(frames[0], dtype=np.float32)
|
||||
return [arr.reshape((1, -1))]
|
||||
else:
|
||||
logger.warning(f"ZmqEmbeddingRunner({self._model_type}): empty reply")
|
||||
return [np.zeros((1, self._get_output_dim()), dtype=np.float32)]
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
f"ZmqEmbeddingRunner({self._model_type}): failed to decode response: {exc}"
|
||||
)
|
||||
return [np.zeros((1, self._get_output_dim()), dtype=np.float32)]
|
||||
|
||||
def _get_output_dim(self) -> int:
|
||||
return self._OUTPUT_DIMS.get(self._model_type, 512)
|
||||
|
||||
def __del__(self) -> None:
|
||||
try:
|
||||
if self._socket is not None:
|
||||
self._socket.close(linger=self._linger_ms)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def get_optimized_runner(
|
||||
model_path: str, device: str | None, model_type: str, **kwargs
|
||||
) -> BaseModelRunner:
|
||||
"""Get an optimized runner for the hardware."""
|
||||
device = device or "AUTO"
|
||||
|
||||
# ZMQ embedding runner — offloads ONNX inference to a native host process.
|
||||
# Triggered when device is a ZMQ endpoint, e.g. "zmq://host.docker.internal:5556".
|
||||
if device.startswith("zmq://"):
|
||||
return ZmqEmbeddingRunner(endpoint=device, model_type=model_type)
|
||||
|
||||
if device != "CPU" and is_rknn_compatible(model_path):
|
||||
rknn_path = auto_convert_model(model_path)
|
||||
|
||||
|
||||
@ -99,6 +99,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.classification_config_subscriber = ConfigSubscriber(
|
||||
"config/classification/custom/"
|
||||
)
|
||||
self.bird_classification_config_subscriber = ConfigSubscriber(
|
||||
"config/classification", exact=True
|
||||
)
|
||||
self.face_recognition_config_subscriber = ConfigSubscriber(
|
||||
"config/face_recognition", exact=True
|
||||
)
|
||||
self.lpr_config_subscriber = ConfigSubscriber("config/lpr", exact=True)
|
||||
|
||||
# Configure Frigate DB
|
||||
db = SqliteVecQueueDatabase(
|
||||
@ -273,6 +280,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
while not self.stop_event.is_set():
|
||||
self.config_updater.check_for_updates()
|
||||
self._check_classification_config_updates()
|
||||
self._check_bird_classification_config_updates()
|
||||
self._check_face_recognition_config_updates()
|
||||
self._check_lpr_config_updates()
|
||||
self._process_requests()
|
||||
self._process_updates()
|
||||
self._process_recordings_updates()
|
||||
@ -284,6 +294,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
self.config_updater.stop()
|
||||
self.classification_config_subscriber.stop()
|
||||
self.bird_classification_config_subscriber.stop()
|
||||
self.face_recognition_config_subscriber.stop()
|
||||
self.lpr_config_subscriber.stop()
|
||||
self.event_subscriber.stop()
|
||||
self.event_end_subscriber.stop()
|
||||
self.recordings_subscriber.stop()
|
||||
@ -356,6 +369,62 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
|
||||
def _check_bird_classification_config_updates(self) -> None:
|
||||
"""Check for bird classification config updates."""
|
||||
topic, classification_config = (
|
||||
self.bird_classification_config_subscriber.check_for_update()
|
||||
)
|
||||
|
||||
if topic is None:
|
||||
return
|
||||
|
||||
self.config.classification = classification_config
|
||||
logger.debug("Applied dynamic bird classification config update")
|
||||
|
||||
def _check_face_recognition_config_updates(self) -> None:
|
||||
"""Check for face recognition config updates."""
|
||||
topic, face_config = self.face_recognition_config_subscriber.check_for_update()
|
||||
|
||||
if topic is None:
|
||||
return
|
||||
|
||||
previous_min_area = self.config.face_recognition.min_area
|
||||
self.config.face_recognition = face_config
|
||||
|
||||
for camera_config in self.config.cameras.values():
|
||||
if camera_config.face_recognition.min_area == previous_min_area:
|
||||
camera_config.face_recognition.min_area = face_config.min_area
|
||||
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(processor, FaceRealTimeProcessor):
|
||||
processor.update_config(face_config)
|
||||
|
||||
logger.debug("Applied dynamic face recognition config update")
|
||||
|
||||
def _check_lpr_config_updates(self) -> None:
|
||||
"""Check for LPR config updates."""
|
||||
topic, lpr_config = self.lpr_config_subscriber.check_for_update()
|
||||
|
||||
if topic is None:
|
||||
return
|
||||
|
||||
previous_min_area = self.config.lpr.min_area
|
||||
self.config.lpr = lpr_config
|
||||
|
||||
for camera_config in self.config.cameras.values():
|
||||
if camera_config.lpr.min_area == previous_min_area:
|
||||
camera_config.lpr.min_area = lpr_config.min_area
|
||||
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(processor, LicensePlateRealTimeProcessor):
|
||||
processor.update_config(lpr_config)
|
||||
|
||||
for processor in self.post_processors:
|
||||
if isinstance(processor, LicensePlatePostProcessor):
|
||||
processor.update_config(lpr_config)
|
||||
|
||||
logger.debug("Applied dynamic LPR config update")
|
||||
|
||||
def _process_requests(self) -> None:
|
||||
"""Process embeddings requests"""
|
||||
|
||||
|
||||
@ -273,17 +273,13 @@ class BirdsEyeFrameManager:
|
||||
stop_event: mp.Event,
|
||||
):
|
||||
self.config = config
|
||||
self.mode = config.birdseye.mode
|
||||
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
|
||||
self.frame_shape = (height, width)
|
||||
self.yuv_shape = (height * 3 // 2, width)
|
||||
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
|
||||
self.canvas = Canvas(width, height, config.birdseye.layout.scaling_factor)
|
||||
self.stop_event = stop_event
|
||||
self.inactivity_threshold = config.birdseye.inactivity_threshold
|
||||
|
||||
if config.birdseye.layout.max_cameras:
|
||||
self.last_refresh_time = 0
|
||||
self.last_refresh_time = 0
|
||||
|
||||
# initialize the frame as black and with the Frigate logo
|
||||
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
|
||||
@ -426,7 +422,7 @@ class BirdsEyeFrameManager:
|
||||
and self.config.cameras[cam].enabled
|
||||
and cam_data["last_active_frame"] > 0
|
||||
and cam_data["current_frame_time"] - cam_data["last_active_frame"]
|
||||
< self.inactivity_threshold
|
||||
< self.config.birdseye.inactivity_threshold
|
||||
]
|
||||
)
|
||||
logger.debug(f"Active cameras: {active_cameras}")
|
||||
|
||||
@ -15,6 +15,7 @@ from ws4py.server.wsgirefserver import (
|
||||
)
|
||||
from ws4py.server.wsgiutils import WebSocketWSGIApplication
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
|
||||
from frigate.comms.ws import WebSocket
|
||||
from frigate.config import FrigateConfig
|
||||
@ -138,6 +139,7 @@ class OutputProcess(FrigateProcess):
|
||||
CameraConfigUpdateEnum.record,
|
||||
],
|
||||
)
|
||||
birdseye_config_subscriber = ConfigSubscriber("config/birdseye", exact=True)
|
||||
|
||||
jsmpeg_cameras: dict[str, JsmpegCamera] = {}
|
||||
birdseye: Birdseye | None = None
|
||||
@ -167,6 +169,20 @@ class OutputProcess(FrigateProcess):
|
||||
websocket_thread.start()
|
||||
|
||||
while not self.stop_event.is_set():
|
||||
update_topic, birdseye_config = (
|
||||
birdseye_config_subscriber.check_for_update()
|
||||
)
|
||||
|
||||
if update_topic is not None:
|
||||
previous_global_mode = self.config.birdseye.mode
|
||||
self.config.birdseye = birdseye_config
|
||||
|
||||
for camera_config in self.config.cameras.values():
|
||||
if camera_config.birdseye.mode == previous_global_mode:
|
||||
camera_config.birdseye.mode = birdseye_config.mode
|
||||
|
||||
logger.debug("Applied dynamic birdseye config update")
|
||||
|
||||
# check if there is an updated config
|
||||
updates = config_subscriber.check_for_updates()
|
||||
|
||||
@ -297,6 +313,7 @@ class OutputProcess(FrigateProcess):
|
||||
birdseye.stop()
|
||||
|
||||
config_subscriber.stop()
|
||||
birdseye_config_subscriber.stop()
|
||||
websocket_server.manager.close_all()
|
||||
websocket_server.manager.stop()
|
||||
websocket_server.manager.join()
|
||||
|
||||
261
frigate/test/http_api/test_http_config_set.py
Normal file
261
frigate/test/http_api/test_http_config_set.py
Normal file
@ -0,0 +1,261 @@
|
||||
"""Tests for the config_set endpoint's wildcard camera propagation."""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import ruamel.yaml
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.updater import (
|
||||
CameraConfigUpdateEnum,
|
||||
CameraConfigUpdatePublisher,
|
||||
CameraConfigUpdateTopic,
|
||||
)
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||
|
||||
|
||||
class TestConfigSetWildcardPropagation(BaseTestHttp):
|
||||
"""Test that wildcard camera updates fan out to all cameras."""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp(models=[Event, Recordings, ReviewSegment])
|
||||
self.minimal_config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"cameras": {
|
||||
"front_door": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
},
|
||||
"back_yard": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 720,
|
||||
"width": 1280,
|
||||
"fps": 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def _create_app_with_publisher(self):
|
||||
"""Create app with a mocked config publisher."""
|
||||
from fastapi import Request
|
||||
|
||||
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
||||
from frigate.api.fastapi_app import create_fastapi_app
|
||||
|
||||
mock_publisher = Mock(spec=CameraConfigUpdatePublisher)
|
||||
mock_publisher.publisher = MagicMock()
|
||||
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
self.db,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
mock_publisher,
|
||||
None,
|
||||
enforce_default_admin=False,
|
||||
)
|
||||
|
||||
async def mock_get_current_user(request: Request):
|
||||
username = request.headers.get("remote-user")
|
||||
role = request.headers.get("remote-role")
|
||||
return {"username": username, "role": role}
|
||||
|
||||
async def mock_get_allowed_cameras_for_filter(request: Request):
|
||||
return list(self.minimal_config.get("cameras", {}).keys())
|
||||
|
||||
app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||
app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
mock_get_allowed_cameras_for_filter
|
||||
)
|
||||
|
||||
return app, mock_publisher
|
||||
|
||||
def _write_config_file(self):
|
||||
"""Write the minimal config to a temp YAML file and return the path."""
|
||||
yaml = ruamel.yaml.YAML()
|
||||
f = tempfile.NamedTemporaryFile(mode="w", suffix=".yml", delete=False)
|
||||
yaml.dump(self.minimal_config, f)
|
||||
f.close()
|
||||
return f.name
|
||||
|
||||
@patch("frigate.api.app.find_config_file")
|
||||
def test_wildcard_detect_update_fans_out_to_all_cameras(self, mock_find_config):
|
||||
"""config/cameras/*/detect fans out to all cameras."""
|
||||
config_path = self._write_config_file()
|
||||
mock_find_config.return_value = config_path
|
||||
|
||||
try:
|
||||
app, mock_publisher = self._create_app_with_publisher()
|
||||
with AuthTestClient(app) as client:
|
||||
resp = client.put(
|
||||
"/config/set",
|
||||
json={
|
||||
"config_data": {"detect": {"fps": 15}},
|
||||
"update_topic": "config/cameras/*/detect",
|
||||
"requires_restart": 0,
|
||||
},
|
||||
)
|
||||
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
data = resp.json()
|
||||
self.assertTrue(data["success"])
|
||||
|
||||
# Verify publish_update called for each camera
|
||||
self.assertEqual(mock_publisher.publish_update.call_count, 2)
|
||||
|
||||
published_cameras = set()
|
||||
for c in mock_publisher.publish_update.call_args_list:
|
||||
topic = c[0][0]
|
||||
self.assertIsInstance(topic, CameraConfigUpdateTopic)
|
||||
self.assertEqual(topic.update_type, CameraConfigUpdateEnum.detect)
|
||||
published_cameras.add(topic.camera)
|
||||
|
||||
self.assertEqual(published_cameras, {"front_door", "back_yard"})
|
||||
|
||||
# Global publisher should NOT be called for wildcard
|
||||
mock_publisher.publisher.publish.assert_not_called()
|
||||
finally:
|
||||
os.unlink(config_path)
|
||||
|
||||
@patch("frigate.api.app.find_config_file")
|
||||
def test_wildcard_motion_update_fans_out(self, mock_find_config):
|
||||
"""config/cameras/*/motion fans out to all cameras."""
|
||||
config_path = self._write_config_file()
|
||||
mock_find_config.return_value = config_path
|
||||
|
||||
try:
|
||||
app, mock_publisher = self._create_app_with_publisher()
|
||||
with AuthTestClient(app) as client:
|
||||
resp = client.put(
|
||||
"/config/set",
|
||||
json={
|
||||
"config_data": {"motion": {"threshold": 30}},
|
||||
"update_topic": "config/cameras/*/motion",
|
||||
"requires_restart": 0,
|
||||
},
|
||||
)
|
||||
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
published_cameras = set()
|
||||
for c in mock_publisher.publish_update.call_args_list:
|
||||
topic = c[0][0]
|
||||
self.assertEqual(topic.update_type, CameraConfigUpdateEnum.motion)
|
||||
published_cameras.add(topic.camera)
|
||||
|
||||
self.assertEqual(published_cameras, {"front_door", "back_yard"})
|
||||
finally:
|
||||
os.unlink(config_path)
|
||||
|
||||
@patch("frigate.api.app.find_config_file")
|
||||
def test_camera_specific_topic_only_updates_one_camera(self, mock_find_config):
|
||||
"""config/cameras/front_door/detect only updates front_door."""
|
||||
config_path = self._write_config_file()
|
||||
mock_find_config.return_value = config_path
|
||||
|
||||
try:
|
||||
app, mock_publisher = self._create_app_with_publisher()
|
||||
with AuthTestClient(app) as client:
|
||||
resp = client.put(
|
||||
"/config/set",
|
||||
json={
|
||||
"config_data": {
|
||||
"cameras": {"front_door": {"detect": {"fps": 20}}}
|
||||
},
|
||||
"update_topic": "config/cameras/front_door/detect",
|
||||
"requires_restart": 0,
|
||||
},
|
||||
)
|
||||
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
# Only one camera updated
|
||||
self.assertEqual(mock_publisher.publish_update.call_count, 1)
|
||||
topic = mock_publisher.publish_update.call_args[0][0]
|
||||
self.assertEqual(topic.camera, "front_door")
|
||||
self.assertEqual(topic.update_type, CameraConfigUpdateEnum.detect)
|
||||
|
||||
# Global publisher should NOT be called
|
||||
mock_publisher.publisher.publish.assert_not_called()
|
||||
finally:
|
||||
os.unlink(config_path)
|
||||
|
||||
@patch("frigate.api.app.find_config_file")
|
||||
def test_wildcard_sends_merged_per_camera_config(self, mock_find_config):
|
||||
"""Wildcard fan-out sends each camera's own merged config."""
|
||||
config_path = self._write_config_file()
|
||||
mock_find_config.return_value = config_path
|
||||
|
||||
try:
|
||||
app, mock_publisher = self._create_app_with_publisher()
|
||||
with AuthTestClient(app) as client:
|
||||
resp = client.put(
|
||||
"/config/set",
|
||||
json={
|
||||
"config_data": {"detect": {"fps": 15}},
|
||||
"update_topic": "config/cameras/*/detect",
|
||||
"requires_restart": 0,
|
||||
},
|
||||
)
|
||||
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
for c in mock_publisher.publish_update.call_args_list:
|
||||
camera_detect_config = c[0][1]
|
||||
self.assertIsNotNone(camera_detect_config)
|
||||
self.assertTrue(hasattr(camera_detect_config, "fps"))
|
||||
finally:
|
||||
os.unlink(config_path)
|
||||
|
||||
@patch("frigate.api.app.find_config_file")
|
||||
def test_non_camera_global_topic_uses_generic_publish(self, mock_find_config):
|
||||
"""Non-camera topics (e.g. config/live) use the generic publisher."""
|
||||
config_path = self._write_config_file()
|
||||
mock_find_config.return_value = config_path
|
||||
|
||||
try:
|
||||
app, mock_publisher = self._create_app_with_publisher()
|
||||
with AuthTestClient(app) as client:
|
||||
resp = client.put(
|
||||
"/config/set",
|
||||
json={
|
||||
"config_data": {"live": {"height": 720}},
|
||||
"update_topic": "config/live",
|
||||
"requires_restart": 0,
|
||||
},
|
||||
)
|
||||
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
# Global topic publisher called
|
||||
mock_publisher.publisher.publish.assert_called_once()
|
||||
|
||||
# Camera-level publish_update NOT called
|
||||
mock_publisher.publish_update.assert_not_called()
|
||||
finally:
|
||||
os.unlink(config_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@ -151,7 +151,9 @@ def sync_recordings(
|
||||
|
||||
max_inserts = 1000
|
||||
for batch in chunked(recordings_to_delete, max_inserts):
|
||||
RecordingsToDelete.insert_many(batch).execute()
|
||||
RecordingsToDelete.insert_many(
|
||||
[{"id": r["id"]} for r in batch]
|
||||
).execute()
|
||||
|
||||
try:
|
||||
deleted = (
|
||||
|
||||
@ -214,7 +214,11 @@ class CameraWatchdog(threading.Thread):
|
||||
self.config_subscriber = CameraConfigUpdateSubscriber(
|
||||
None,
|
||||
{config.name: config},
|
||||
[CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record],
|
||||
[
|
||||
CameraConfigUpdateEnum.enabled,
|
||||
CameraConfigUpdateEnum.ffmpeg,
|
||||
CameraConfigUpdateEnum.record,
|
||||
],
|
||||
)
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.was_enabled = self.config.enabled
|
||||
@ -254,9 +258,13 @@ class CameraWatchdog(threading.Thread):
|
||||
self._last_record_status = status
|
||||
self._last_status_update_time = now
|
||||
|
||||
def _check_config_updates(self) -> dict[str, list[str]]:
|
||||
"""Check for config updates and return the update dict."""
|
||||
return self.config_subscriber.check_for_updates()
|
||||
|
||||
def _update_enabled_state(self) -> bool:
|
||||
"""Fetch the latest config and update enabled state."""
|
||||
self.config_subscriber.check_for_updates()
|
||||
self._check_config_updates()
|
||||
return self.config.enabled
|
||||
|
||||
def reset_capture_thread(
|
||||
@ -317,7 +325,24 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
# 1 second watchdog loop
|
||||
while not self.stop_event.wait(1):
|
||||
enabled = self._update_enabled_state()
|
||||
updates = self._check_config_updates()
|
||||
|
||||
# Handle ffmpeg config changes by restarting all ffmpeg processes
|
||||
if "ffmpeg" in updates and self.config.enabled:
|
||||
self.logger.debug(
|
||||
"FFmpeg config updated for %s, restarting ffmpeg processes",
|
||||
self.config.name,
|
||||
)
|
||||
self.stop_all_ffmpeg()
|
||||
self.start_all_ffmpeg()
|
||||
self.latest_valid_segment_time = 0
|
||||
self.latest_invalid_segment_time = 0
|
||||
self.latest_cache_segment_time = 0
|
||||
self.record_enable_time = datetime.now().astimezone(timezone.utc)
|
||||
last_restart_time = datetime.now().timestamp()
|
||||
continue
|
||||
|
||||
enabled = self.config.enabled
|
||||
if enabled != self.was_enabled:
|
||||
if enabled:
|
||||
self.logger.debug(f"Enabling camera {self.config.name}")
|
||||
|
||||
275
tools/zmq_embedding_server.py
Normal file
275
tools/zmq_embedding_server.py
Normal file
@ -0,0 +1,275 @@
|
||||
"""ZMQ Embedding Server — native Mac (Apple Silicon) inference service.
|
||||
|
||||
Runs ONNX models using hardware acceleration unavailable inside Docker on macOS,
|
||||
specifically CoreML and the Apple Neural Engine. Frigate's Docker container
|
||||
connects to this server over ZMQ TCP, sends preprocessed tensors, and receives
|
||||
embedding vectors back.
|
||||
|
||||
Supported models:
|
||||
- ArcFace (face recognition, 512-dim output)
|
||||
- FaceNet (face recognition, 128-dim output)
|
||||
- Jina V1/V2 vision (semantic search, 768-dim output)
|
||||
|
||||
Requirements (install outside Docker, on the Mac host):
|
||||
pip install onnxruntime pyzmq numpy
|
||||
|
||||
Usage:
|
||||
# ArcFace face recognition (port 5556):
|
||||
python tools/zmq_embedding_server.py \\
|
||||
--model /config/model_cache/facedet/arcface.onnx \\
|
||||
--model-type arcface \\
|
||||
--port 5556
|
||||
|
||||
# Jina V1 vision semantic search (port 5557):
|
||||
python tools/zmq_embedding_server.py \\
|
||||
--model /config/model_cache/jinaai/jina-clip-v1/vision_model_quantized.onnx \\
|
||||
--model-type jina_v1 \\
|
||||
--port 5557
|
||||
|
||||
Frigate config (docker-compose / config.yaml):
|
||||
face_recognition:
|
||||
enabled: true
|
||||
model_size: large
|
||||
device: "zmq://host.docker.internal:5556"
|
||||
|
||||
semantic_search:
|
||||
enabled: true
|
||||
model_size: small
|
||||
device: "zmq://host.docker.internal:5557"
|
||||
|
||||
Protocol (REQ/REP):
|
||||
Request: multipart [ header_json_bytes, tensor_bytes ]
|
||||
header = {
|
||||
"shape": [batch, channels, height, width], # e.g. [1, 3, 112, 112]
|
||||
"dtype": "float32",
|
||||
"model_type": "arcface",
|
||||
}
|
||||
tensor_bytes = raw C-order numpy bytes
|
||||
|
||||
Response: multipart [ header_json_bytes, embedding_bytes ]
|
||||
header = {
|
||||
"shape": [batch, embedding_dim], # e.g. [1, 512]
|
||||
"dtype": "float32",
|
||||
}
|
||||
embedding_bytes = raw C-order numpy bytes
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import zmq
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s %(name)s: %(message)s",
|
||||
)
|
||||
logger = logging.getLogger("zmq_embedding_server")
|
||||
|
||||
|
||||
# Models that require ORT_ENABLE_BASIC optimization to avoid graph fusion issues
|
||||
# (e.g. SimplifiedLayerNormFusion creates nodes that some providers can't handle).
|
||||
_COMPLEX_MODELS = {"jina_v1", "jina_v2"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ONNX Runtime session (CoreML preferred on Apple Silicon)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def build_ort_session(model_path: str, model_type: str = ""):
|
||||
"""Create an ONNX Runtime InferenceSession, preferring CoreML on macOS.
|
||||
|
||||
Jina V1/V2 models use ORT_ENABLE_BASIC graph optimization to avoid
|
||||
fusion passes (e.g. SimplifiedLayerNormFusion) that produce unsupported
|
||||
nodes. All other models use the default ORT_ENABLE_ALL.
|
||||
"""
|
||||
import onnxruntime as ort
|
||||
|
||||
available = ort.get_available_providers()
|
||||
logger.info(f"Available ORT providers: {available}")
|
||||
|
||||
# Prefer CoreMLExecutionProvider on Apple Silicon for ANE/GPU acceleration.
|
||||
# Falls back automatically to CPUExecutionProvider if CoreML is unavailable.
|
||||
preferred = []
|
||||
if "CoreMLExecutionProvider" in available:
|
||||
preferred.append("CoreMLExecutionProvider")
|
||||
logger.info("Using CoreMLExecutionProvider (Apple Neural Engine / GPU)")
|
||||
else:
|
||||
logger.warning(
|
||||
"CoreMLExecutionProvider not available — falling back to CPU. "
|
||||
"Install onnxruntime-silicon or a CoreML-enabled onnxruntime build."
|
||||
)
|
||||
|
||||
preferred.append("CPUExecutionProvider")
|
||||
|
||||
sess_options = None
|
||||
if model_type in _COMPLEX_MODELS:
|
||||
sess_options = ort.SessionOptions()
|
||||
sess_options.graph_optimization_level = (
|
||||
ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
|
||||
)
|
||||
logger.info(f"Using ORT_ENABLE_BASIC optimization for {model_type}")
|
||||
|
||||
session = ort.InferenceSession(model_path, sess_options=sess_options, providers=preferred)
|
||||
|
||||
input_names = [inp.name for inp in session.get_inputs()]
|
||||
output_names = [out.name for out in session.get_outputs()]
|
||||
logger.info(f"Model loaded: inputs={input_names}, outputs={output_names}")
|
||||
return session
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Inference helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_arcface(session, tensor: np.ndarray) -> np.ndarray:
|
||||
"""Run ArcFace — input (1, 3, 112, 112) float32, output (1, 512) float32."""
|
||||
outputs = session.run(None, {"data": tensor})
|
||||
return outputs[0] # shape (1, 512)
|
||||
|
||||
|
||||
def run_generic(session, tensor: np.ndarray) -> np.ndarray:
|
||||
"""Generic single-input ONNX model runner."""
|
||||
input_name = session.get_inputs()[0].name
|
||||
outputs = session.run(None, {input_name: tensor})
|
||||
return outputs[0]
|
||||
|
||||
|
||||
_RUNNERS = {
|
||||
"arcface": run_arcface,
|
||||
"facenet": run_generic,
|
||||
"jina_v1": run_generic,
|
||||
"jina_v2": run_generic,
|
||||
}
|
||||
|
||||
# Model type → input shape for warmup inference (triggers CoreML JIT compilation
|
||||
# before the first real request arrives, avoiding a ZMQ timeout on cold start).
|
||||
_WARMUP_SHAPES = {
|
||||
"arcface": (1, 3, 112, 112),
|
||||
"facenet": (1, 3, 160, 160),
|
||||
"jina_v1": (1, 3, 224, 224),
|
||||
"jina_v2": (1, 3, 224, 224),
|
||||
}
|
||||
|
||||
|
||||
def warmup(session, model_type: str) -> None:
|
||||
"""Run a dummy inference to trigger CoreML JIT compilation."""
|
||||
shape = _WARMUP_SHAPES.get(model_type)
|
||||
if shape is None:
|
||||
return
|
||||
logger.info(f"Warming up CoreML model ({model_type})…")
|
||||
dummy = np.zeros(shape, dtype=np.float32)
|
||||
try:
|
||||
runner = _RUNNERS.get(model_type, run_generic)
|
||||
runner(session, dummy)
|
||||
logger.info("Warmup complete")
|
||||
except Exception as exc:
|
||||
logger.warning(f"Warmup failed (non-fatal): {exc}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ZMQ server loop
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def serve(session, port: int, model_type: str) -> None:
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REP)
|
||||
socket.bind(f"tcp://0.0.0.0:{port}")
|
||||
logger.info(f"Listening on tcp://0.0.0.0:{port} (model_type={model_type})")
|
||||
|
||||
runner = _RUNNERS.get(model_type, run_generic)
|
||||
|
||||
def _shutdown(sig, frame):
|
||||
logger.info("Shutting down…")
|
||||
socket.close(linger=0)
|
||||
context.term()
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, _shutdown)
|
||||
signal.signal(signal.SIGTERM, _shutdown)
|
||||
|
||||
while True:
|
||||
try:
|
||||
frames = socket.recv_multipart()
|
||||
except zmq.ZMQError as exc:
|
||||
logger.error(f"recv error: {exc}")
|
||||
continue
|
||||
|
||||
if len(frames) < 2:
|
||||
logger.warning(f"Received unexpected frame count: {len(frames)}, ignoring")
|
||||
socket.send_multipart([b"{}"])
|
||||
continue
|
||||
|
||||
try:
|
||||
header = json.loads(frames[0].decode("utf-8"))
|
||||
shape = tuple(header["shape"])
|
||||
dtype = np.dtype(header.get("dtype", "float32"))
|
||||
tensor = np.frombuffer(frames[1], dtype=dtype).reshape(shape)
|
||||
except Exception as exc:
|
||||
logger.error(f"Failed to decode request: {exc}")
|
||||
socket.send_multipart([b"{}"])
|
||||
continue
|
||||
|
||||
try:
|
||||
t0 = time.monotonic()
|
||||
embedding = runner(session, tensor)
|
||||
elapsed_ms = (time.monotonic() - t0) * 1000
|
||||
if elapsed_ms > 2000:
|
||||
logger.warning(f"slow inference {elapsed_ms:.1f}ms shape={shape}")
|
||||
resp_header = json.dumps(
|
||||
{"shape": list(embedding.shape), "dtype": str(embedding.dtype.name)}
|
||||
).encode("utf-8")
|
||||
resp_payload = memoryview(np.ascontiguousarray(embedding).tobytes())
|
||||
socket.send_multipart([resp_header, resp_payload])
|
||||
except Exception as exc:
|
||||
logger.error(f"Inference error: {exc}")
|
||||
# Return a zero embedding so the client can degrade gracefully
|
||||
zero = np.zeros((1, 512), dtype=np.float32)
|
||||
resp_header = json.dumps(
|
||||
{"shape": list(zero.shape), "dtype": "float32"}
|
||||
).encode("utf-8")
|
||||
socket.send_multipart([resp_header, memoryview(zero.tobytes())])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="ZMQ Embedding Server for Frigate")
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
required=True,
|
||||
help="Path to the ONNX model file (e.g. /config/model_cache/facedet/arcface.onnx)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-type",
|
||||
default="arcface",
|
||||
choices=list(_RUNNERS.keys()),
|
||||
help="Model type key (default: arcface)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=5556,
|
||||
help="TCP port to listen on (default: 5556)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.exists(args.model):
|
||||
logger.error(f"Model file not found: {args.model}")
|
||||
sys.exit(1)
|
||||
|
||||
logger.info(f"Loading model: {args.model}")
|
||||
session = build_ort_session(args.model, model_type=args.model_type)
|
||||
warmup(session, model_type=args.model_type)
|
||||
serve(session, port=args.port, model_type=args.model_type)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -25,14 +25,7 @@ const audio: SectionConfigOverrides = {
|
||||
},
|
||||
},
|
||||
global: {
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"listen",
|
||||
"filters",
|
||||
"min_volume",
|
||||
"max_not_heard",
|
||||
"num_threads",
|
||||
],
|
||||
restartRequired: ["num_threads"],
|
||||
},
|
||||
camera: {
|
||||
restartRequired: ["num_threads"],
|
||||
|
||||
@ -28,10 +28,7 @@ const birdseye: SectionConfigOverrides = {
|
||||
"width",
|
||||
"height",
|
||||
"quality",
|
||||
"mode",
|
||||
"layout.scaling_factor",
|
||||
"inactivity_threshold",
|
||||
"layout.max_cameras",
|
||||
"idle_heartbeat_fps",
|
||||
],
|
||||
uiSchema: {
|
||||
|
||||
@ -3,7 +3,7 @@ import type { SectionConfigOverrides } from "./types";
|
||||
const classification: SectionConfigOverrides = {
|
||||
base: {
|
||||
sectionDocs: "/configuration/custom_classification/object_classification",
|
||||
restartRequired: ["bird.enabled", "bird.threshold"],
|
||||
restartRequired: ["bird.enabled"],
|
||||
hiddenFields: ["custom"],
|
||||
advancedFields: [],
|
||||
},
|
||||
|
||||
@ -30,16 +30,7 @@ const detect: SectionConfigOverrides = {
|
||||
],
|
||||
},
|
||||
global: {
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"width",
|
||||
"height",
|
||||
"fps",
|
||||
"min_initialized",
|
||||
"max_disappeared",
|
||||
"annotation_offset",
|
||||
"stationary",
|
||||
],
|
||||
restartRequired: ["width", "height", "min_initialized", "max_disappeared"],
|
||||
},
|
||||
camera: {
|
||||
restartRequired: ["width", "height", "min_initialized", "max_disappeared"],
|
||||
|
||||
@ -32,18 +32,7 @@ const faceRecognition: SectionConfigOverrides = {
|
||||
"blur_confidence_filter",
|
||||
"device",
|
||||
],
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"model_size",
|
||||
"unknown_score",
|
||||
"detection_threshold",
|
||||
"recognition_threshold",
|
||||
"min_area",
|
||||
"min_faces",
|
||||
"save_attempts",
|
||||
"blur_confidence_filter",
|
||||
"device",
|
||||
],
|
||||
restartRequired: ["enabled", "model_size", "device"],
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@ -116,16 +116,7 @@ const ffmpeg: SectionConfigOverrides = {
|
||||
},
|
||||
},
|
||||
global: {
|
||||
restartRequired: [
|
||||
"path",
|
||||
"global_args",
|
||||
"hwaccel_args",
|
||||
"input_args",
|
||||
"output_args",
|
||||
"retry_interval",
|
||||
"apple_compatibility",
|
||||
"gpu",
|
||||
],
|
||||
restartRequired: [],
|
||||
fieldOrder: [
|
||||
"hwaccel_args",
|
||||
"path",
|
||||
@ -162,17 +153,7 @@ const ffmpeg: SectionConfigOverrides = {
|
||||
fieldGroups: {
|
||||
cameraFfmpeg: ["input_args", "hwaccel_args", "output_args"],
|
||||
},
|
||||
restartRequired: [
|
||||
"inputs",
|
||||
"path",
|
||||
"global_args",
|
||||
"hwaccel_args",
|
||||
"input_args",
|
||||
"output_args",
|
||||
"retry_interval",
|
||||
"apple_compatibility",
|
||||
"gpu",
|
||||
],
|
||||
restartRequired: [],
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@ -40,21 +40,7 @@ const lpr: SectionConfigOverrides = {
|
||||
"device",
|
||||
"replace_rules",
|
||||
],
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"model_size",
|
||||
"detection_threshold",
|
||||
"min_area",
|
||||
"recognition_threshold",
|
||||
"min_plate_length",
|
||||
"format",
|
||||
"match_distance",
|
||||
"known_plates",
|
||||
"enhancement",
|
||||
"debug_save_plates",
|
||||
"device",
|
||||
"replace_rules",
|
||||
],
|
||||
restartRequired: ["model_size", "enhancement", "device"],
|
||||
uiSchema: {
|
||||
format: {
|
||||
"ui:options": { size: "md" },
|
||||
|
||||
@ -31,18 +31,7 @@ const motion: SectionConfigOverrides = {
|
||||
],
|
||||
},
|
||||
global: {
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"threshold",
|
||||
"lightning_threshold",
|
||||
"skip_motion_threshold",
|
||||
"improve_contrast",
|
||||
"contour_area",
|
||||
"delta_alpha",
|
||||
"frame_alpha",
|
||||
"frame_height",
|
||||
"mqtt_off_delay",
|
||||
],
|
||||
restartRequired: ["frame_height"],
|
||||
},
|
||||
camera: {
|
||||
restartRequired: ["frame_height"],
|
||||
|
||||
@ -83,7 +83,7 @@ const objects: SectionConfigOverrides = {
|
||||
},
|
||||
},
|
||||
global: {
|
||||
restartRequired: ["track", "alert", "detect", "filters", "genai"],
|
||||
restartRequired: [],
|
||||
hiddenFields: [
|
||||
"enabled_in_config",
|
||||
"mask",
|
||||
|
||||
@ -29,16 +29,7 @@ const record: SectionConfigOverrides = {
|
||||
},
|
||||
},
|
||||
global: {
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"expire_interval",
|
||||
"continuous",
|
||||
"motion",
|
||||
"alerts",
|
||||
"detections",
|
||||
"preview",
|
||||
"export",
|
||||
],
|
||||
restartRequired: [],
|
||||
},
|
||||
camera: {
|
||||
restartRequired: [],
|
||||
|
||||
@ -44,7 +44,7 @@ const review: SectionConfigOverrides = {
|
||||
},
|
||||
},
|
||||
global: {
|
||||
restartRequired: ["alerts", "detections", "genai"],
|
||||
restartRequired: [],
|
||||
},
|
||||
camera: {
|
||||
restartRequired: [],
|
||||
|
||||
@ -27,14 +27,7 @@ const snapshots: SectionConfigOverrides = {
|
||||
},
|
||||
},
|
||||
global: {
|
||||
restartRequired: [
|
||||
"enabled",
|
||||
"bounding_box",
|
||||
"crop",
|
||||
"quality",
|
||||
"timestamp",
|
||||
"retain",
|
||||
],
|
||||
restartRequired: [],
|
||||
hiddenFields: ["enabled_in_config", "required_zones"],
|
||||
},
|
||||
camera: {
|
||||
|
||||
@ -3,14 +3,7 @@ import type { SectionConfigOverrides } from "./types";
|
||||
const telemetry: SectionConfigOverrides = {
|
||||
base: {
|
||||
sectionDocs: "/configuration/reference",
|
||||
restartRequired: [
|
||||
"network_interfaces",
|
||||
"stats.amd_gpu_stats",
|
||||
"stats.intel_gpu_stats",
|
||||
"stats.intel_gpu_device",
|
||||
"stats.network_bandwidth",
|
||||
"version_check",
|
||||
],
|
||||
restartRequired: ["version_check"],
|
||||
fieldOrder: ["network_interfaces", "stats", "version_check"],
|
||||
advancedFields: [],
|
||||
},
|
||||
|
||||
@ -56,6 +56,7 @@ import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import { StatusBarMessagesContext } from "@/context/statusbar-provider";
|
||||
import {
|
||||
cameraUpdateTopicMap,
|
||||
globalCameraDefaultSections,
|
||||
buildOverrides,
|
||||
buildConfigDataForPath,
|
||||
sanitizeSectionData as sharedSanitizeSectionData,
|
||||
@ -234,7 +235,10 @@ export function ConfigSection({
|
||||
? cameraUpdateTopicMap[sectionPath]
|
||||
? `config/cameras/${cameraName}/${cameraUpdateTopicMap[sectionPath]}`
|
||||
: undefined
|
||||
: `config/${sectionPath}`;
|
||||
: globalCameraDefaultSections.has(sectionPath) &&
|
||||
cameraUpdateTopicMap[sectionPath]
|
||||
? `config/cameras/*/${cameraUpdateTopicMap[sectionPath]}`
|
||||
: `config/${sectionPath}`;
|
||||
// Default: show title for camera level (since it might be collapsible), hide for global
|
||||
const shouldShowTitle = showTitle ?? effectiveLevel === "camera";
|
||||
|
||||
@ -827,7 +831,7 @@ export function ConfigSection({
|
||||
|
||||
<div
|
||||
className={cn(
|
||||
"w-full border-t border-secondary bg-background pb-5 pt-0",
|
||||
"w-full border-t border-secondary bg-background pt-0",
|
||||
!noStickyButtons && "sticky bottom-0 z-50",
|
||||
)}
|
||||
>
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { useCallback, useContext, useEffect, useMemo } from "react";
|
||||
import { useCallback, useContext, useEffect, useMemo, useRef } from "react";
|
||||
import { useLocation, useNavigate, useSearchParams } from "react-router-dom";
|
||||
import { usePersistence } from "./use-persistence";
|
||||
import { useUserPersistence } from "./use-user-persistence";
|
||||
@ -12,20 +12,28 @@ export function useOverlayState<S>(
|
||||
const location = useLocation();
|
||||
const navigate = useNavigate();
|
||||
|
||||
const currentLocationState = useMemo(() => location.state, [location]);
|
||||
const locationRef = useRef(location);
|
||||
locationRef.current = location;
|
||||
|
||||
const setOverlayStateValue = useCallback(
|
||||
(value: S, replace: boolean = false) => {
|
||||
const newLocationState = { ...currentLocationState };
|
||||
const loc = locationRef.current;
|
||||
const currentValue = loc.state?.[key] as S | undefined;
|
||||
|
||||
if (Object.is(currentValue, value)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const newLocationState = { ...loc.state };
|
||||
newLocationState[key] = value;
|
||||
navigate(location.pathname + (preserveSearch ? location.search : ""), {
|
||||
navigate(loc.pathname + (preserveSearch ? loc.search : ""), {
|
||||
state: newLocationState,
|
||||
replace,
|
||||
});
|
||||
},
|
||||
// we know that these deps are correct
|
||||
// locationRef is stable so we don't need it in deps
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[key, currentLocationState, navigate],
|
||||
[key, navigate, preserveSearch],
|
||||
);
|
||||
|
||||
const overlayStateValue = useMemo<S | undefined>(
|
||||
@ -47,7 +55,9 @@ export function usePersistedOverlayState<S extends string>(
|
||||
] {
|
||||
const location = useLocation();
|
||||
const navigate = useNavigate();
|
||||
const currentLocationState = useMemo(() => location.state, [location]);
|
||||
|
||||
const locationRef = useRef(location);
|
||||
locationRef.current = location;
|
||||
|
||||
// currently selected value
|
||||
|
||||
@ -63,14 +73,21 @@ export function usePersistedOverlayState<S extends string>(
|
||||
|
||||
const setOverlayStateValue = useCallback(
|
||||
(value: S | undefined, replace: boolean = false) => {
|
||||
const loc = locationRef.current;
|
||||
const currentValue = loc.state?.[key] as S | undefined;
|
||||
|
||||
if (Object.is(currentValue, value)) {
|
||||
return;
|
||||
}
|
||||
|
||||
setPersistedValue(value);
|
||||
const newLocationState = { ...currentLocationState };
|
||||
const newLocationState = { ...loc.state };
|
||||
newLocationState[key] = value;
|
||||
navigate(location.pathname, { state: newLocationState, replace });
|
||||
navigate(loc.pathname, { state: newLocationState, replace });
|
||||
},
|
||||
// we know that these deps are correct
|
||||
// locationRef is stable so we don't need it in deps
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[key, currentLocationState, navigate],
|
||||
[key, navigate, setPersistedValue],
|
||||
);
|
||||
|
||||
return [
|
||||
@ -98,7 +115,9 @@ export function useUserPersistedOverlayState<S extends string>(
|
||||
const { auth } = useContext(AuthContext);
|
||||
const location = useLocation();
|
||||
const navigate = useNavigate();
|
||||
const currentLocationState = useMemo(() => location.state, [location]);
|
||||
|
||||
const locationRef = useRef(location);
|
||||
locationRef.current = location;
|
||||
|
||||
// currently selected value from URL state
|
||||
const overlayStateValue = useMemo<S | undefined>(
|
||||
@ -112,14 +131,21 @@ export function useUserPersistedOverlayState<S extends string>(
|
||||
|
||||
const setOverlayStateValue = useCallback(
|
||||
(value: S | undefined, replace: boolean = false) => {
|
||||
const loc = locationRef.current;
|
||||
const currentValue = loc.state?.[key] as S | undefined;
|
||||
|
||||
if (Object.is(currentValue, value)) {
|
||||
return;
|
||||
}
|
||||
|
||||
setPersistedValue(value);
|
||||
const newLocationState = { ...currentLocationState };
|
||||
const newLocationState = { ...loc.state };
|
||||
newLocationState[key] = value;
|
||||
navigate(location.pathname, { state: newLocationState, replace });
|
||||
navigate(loc.pathname, { state: newLocationState, replace });
|
||||
},
|
||||
// we know that these deps are correct
|
||||
// locationRef is stable so we don't need it in deps
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[key, currentLocationState, navigate, setPersistedValue],
|
||||
[key, navigate, setPersistedValue],
|
||||
);
|
||||
|
||||
// Don't return a value until auth has finished loading
|
||||
@ -142,17 +168,21 @@ export function useHashState<S extends string>(): [
|
||||
const location = useLocation();
|
||||
const navigate = useNavigate();
|
||||
|
||||
const locationRef = useRef(location);
|
||||
locationRef.current = location;
|
||||
|
||||
const setHash = useCallback(
|
||||
(value: S | undefined) => {
|
||||
const loc = locationRef.current;
|
||||
if (!value) {
|
||||
navigate(location.pathname);
|
||||
navigate(loc.pathname);
|
||||
} else {
|
||||
navigate(`${location.pathname}#${value}`, { state: location.state });
|
||||
navigate(`${loc.pathname}#${value}`, { state: loc.state });
|
||||
}
|
||||
},
|
||||
// we know that these deps are correct
|
||||
// locationRef is stable so we don't need it in deps
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[location, navigate],
|
||||
[navigate],
|
||||
);
|
||||
|
||||
const hash = useMemo(
|
||||
|
||||
@ -479,14 +479,7 @@ const CAMERA_SELECT_BUTTON_PAGES = [
|
||||
"regionGrid",
|
||||
];
|
||||
|
||||
const ALLOWED_VIEWS_FOR_VIEWER = ["ui", "debug", "notifications"];
|
||||
|
||||
const LARGE_BOTTOM_MARGIN_PAGES = [
|
||||
"masksAndZones",
|
||||
"motionTuner",
|
||||
"mediaSync",
|
||||
"regionGrid",
|
||||
];
|
||||
const ALLOWED_VIEWS_FOR_VIEWER = ["profileSettings", "notifications"];
|
||||
|
||||
// keys for camera sections
|
||||
const CAMERA_SECTION_MAPPING: Record<string, SettingsType> = {
|
||||
@ -1362,9 +1355,9 @@ export default function Settings() {
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<SidebarProvider>
|
||||
<Sidebar variant="inset" className="relative mb-8 pl-0 pt-0">
|
||||
<SidebarContent className="scrollbar-container mb-24 overflow-y-auto border-r-[1px] border-secondary bg-background py-2">
|
||||
<SidebarProvider className="relative h-full min-h-0 flex-1">
|
||||
<Sidebar variant="inset" className="absolute h-full pl-0 pt-0">
|
||||
<SidebarContent className="scrollbar-container overflow-y-auto border-r-[1px] border-secondary bg-background py-2">
|
||||
<SidebarMenu>
|
||||
{settingsGroups.map((group) => {
|
||||
const filteredItems = group.items.filter((item) =>
|
||||
@ -1452,8 +1445,7 @@ export default function Settings() {
|
||||
<SidebarInset>
|
||||
<div
|
||||
className={cn(
|
||||
"scrollbar-container mb-16 flex-1 overflow-y-auto p-2 pr-0",
|
||||
LARGE_BOTTOM_MARGIN_PAGES.includes(pageToggle) && "mb-24",
|
||||
"scrollbar-container flex-1 overflow-y-auto pl-2 pr-0 pt-2",
|
||||
)}
|
||||
>
|
||||
{(() => {
|
||||
|
||||
@ -54,6 +54,20 @@ export const cameraUpdateTopicMap: Record<string, string> = {
|
||||
ui: "ui",
|
||||
};
|
||||
|
||||
// Sections where global config serves as the default for per-camera config.
|
||||
// Global updates to these sections are fanned out to all cameras via wildcard.
|
||||
export const globalCameraDefaultSections = new Set([
|
||||
"detect",
|
||||
"objects",
|
||||
"motion",
|
||||
"record",
|
||||
"snapshots",
|
||||
"review",
|
||||
"audio",
|
||||
"notifications",
|
||||
"ffmpeg",
|
||||
]);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// buildOverrides — pure recursive diff of current vs stored config & defaults
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -476,6 +490,9 @@ export function prepareSectionSavePayload(opts: {
|
||||
if (level === "camera" && cameraName) {
|
||||
const topic = cameraUpdateTopicMap[sectionPath];
|
||||
updateTopic = topic ? `config/cameras/${cameraName}/${topic}` : undefined;
|
||||
} else if (globalCameraDefaultSections.has(sectionPath)) {
|
||||
const topic = cameraUpdateTopicMap[sectionPath];
|
||||
updateTopic = topic ? `config/cameras/*/${topic}` : `config/${sectionPath}`;
|
||||
} else {
|
||||
updateTopic = `config/${sectionPath}`;
|
||||
}
|
||||
|
||||
@ -632,9 +632,10 @@ export default function DraggableGridLayout({
|
||||
toggleStats={() => toggleStats(camera.name)}
|
||||
volumeState={volumeStates[camera.name]}
|
||||
setVolumeState={(value) =>
|
||||
setVolumeStates({
|
||||
setVolumeStates((prev) => ({
|
||||
...prev,
|
||||
[camera.name]: value,
|
||||
})
|
||||
}))
|
||||
}
|
||||
muteAll={muteAll}
|
||||
unmuteAll={unmuteAll}
|
||||
|
||||
@ -131,12 +131,10 @@ export default function MotionSearchView({
|
||||
);
|
||||
|
||||
// Camera previews – defer until dialog is closed
|
||||
const allPreviews = useCameraPreviews(
|
||||
isSearchDialogOpen ? { after: 0, before: 0 } : timeRange,
|
||||
{
|
||||
camera: selectedCamera ?? undefined,
|
||||
},
|
||||
);
|
||||
const allPreviews = useCameraPreviews(timeRange, {
|
||||
camera: selectedCamera ?? undefined,
|
||||
fetchPreviews: !isSearchDialogOpen,
|
||||
});
|
||||
|
||||
// ROI state
|
||||
const [polygonPoints, setPolygonPoints] = useState<number[][]>([]);
|
||||
|
||||
@ -210,7 +210,7 @@ export default function UiSettingsView() {
|
||||
];
|
||||
|
||||
return (
|
||||
<div className="flex size-full flex-col md:pb-8">
|
||||
<div className="flex size-full flex-col">
|
||||
<Toaster position="top-center" closeButton={true} />
|
||||
<Heading as="h4" className="mb-3">
|
||||
{t("general.title")}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user