mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-03-21 07:38:22 +03:00
Compare commits
6 Commits
d4208eba15
...
1e449de06b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e449de06b | ||
|
|
192aba901a | ||
|
|
947ddfa542 | ||
|
|
9eb037c369 | ||
|
|
a0b8271532 | ||
|
|
a2c43ad8bb |
@ -266,6 +266,12 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||
pip3 install -U /deps/wheels/*.whl
|
||||
|
||||
# Install Axera Engine
|
||||
RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
|
||||
|
||||
ENV PATH="${PATH}:/usr/bin/axcl"
|
||||
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
|
||||
|
||||
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
|
||||
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
|
||||
bash -c "bash /deps/install_memryx.sh"
|
||||
|
||||
@ -73,6 +73,7 @@ cd /tmp/nginx
|
||||
--with-file-aio \
|
||||
--with-http_sub_module \
|
||||
--with-http_ssl_module \
|
||||
--with-http_v2_module \
|
||||
--with-http_auth_request_module \
|
||||
--with-http_realip_module \
|
||||
--with-threads \
|
||||
|
||||
@ -63,6 +63,9 @@ http {
|
||||
server {
|
||||
include listen.conf;
|
||||
|
||||
# enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
|
||||
http2 on;
|
||||
|
||||
# vod settings
|
||||
vod_base_url '';
|
||||
vod_segments_base_url '';
|
||||
|
||||
@ -49,6 +49,11 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
|
||||
|
||||
**AXERA** <CommunityBadge />
|
||||
|
||||
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
|
||||
|
||||
|
||||
**For Testing**
|
||||
|
||||
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
||||
@ -1478,6 +1483,41 @@ model:
|
||||
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
|
||||
```
|
||||
|
||||
## AXERA
|
||||
|
||||
Hardware accelerated object detection is supported on the following SoCs:
|
||||
|
||||
- AX650N
|
||||
- AX8850N
|
||||
|
||||
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
|
||||
|
||||
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
|
||||
|
||||
### Configuration
|
||||
|
||||
When configuring the AXEngine detector, you have to specify the model name.
|
||||
|
||||
#### yolov9
|
||||
|
||||
A yolov9 model is provided in the container at `/axmodels` and is used by this detector type by default.
|
||||
|
||||
Use the model configuration shown below when using the axengine detector with the default axmodel:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
axengine:
|
||||
type: axengine
|
||||
|
||||
model:
|
||||
path: frigate-yolov9-tiny
|
||||
model_type: yolo-generic
|
||||
width: 320
|
||||
height: 320
|
||||
tensor_format: bgr
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
# Models
|
||||
|
||||
Some model types are not included in Frigate by default.
|
||||
@ -1571,12 +1611,12 @@ YOLOv9 model can be exported as ONNX using the command below. You can copy and p
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=t --build-arg IMG_SIZE=320 --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y cmake libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.10.4 /uv /bin/
|
||||
WORKDIR /yolov9
|
||||
ADD https://github.com/WongKinYiu/yolov9.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 onnxscript
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier==0.4.* onnxscript
|
||||
ARG MODEL_SIZE
|
||||
ARG IMG_SIZE
|
||||
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt
|
||||
|
||||
@ -103,6 +103,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
|
||||
|
||||
**AXERA** <CommunityBadge />
|
||||
|
||||
- [AXEngine](#axera): axera models can run on AXERA NPUs via AXEngine, delivering highly efficient object detection.
|
||||
|
||||
:::
|
||||
|
||||
### Hailo-8
|
||||
@ -288,6 +292,14 @@ The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms fo
|
||||
| ssd mobilenet | ~ 25 ms |
|
||||
| yolov5m | ~ 118 ms |
|
||||
|
||||
### AXERA
|
||||
|
||||
- **AXEngine** Default model is **yolov9**
|
||||
|
||||
| Name | AXERA AX650N/AX8850N Inference Time |
|
||||
| ---------------- | ----------------------------------- |
|
||||
| yolov9-tiny | ~ 4 ms |
|
||||
|
||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||
|
||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||
@ -308,4 +320,4 @@ Basically - When you increase the resolution and/or the frame rate of the stream
|
||||
|
||||
YES! The Coral does not help with decoding video streams.
|
||||
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
@ -439,6 +439,39 @@ or add these options to your `docker run` command:
|
||||
|
||||
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
|
||||
|
||||
### AXERA
|
||||
|
||||
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
|
||||
|
||||
#### Installation
|
||||
|
||||
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
|
||||
|
||||
Follow these steps for installation:
|
||||
|
||||
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
|
||||
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||
3. Run the script with `./user_installation.sh`
|
||||
|
||||
#### Setup
|
||||
|
||||
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
|
||||
|
||||
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
|
||||
|
||||
```yaml
|
||||
devices:
|
||||
- /dev/axcl_host
|
||||
- /dev/ax_mmb_dev
|
||||
- /dev/msg_userdev
|
||||
```
|
||||
|
||||
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
|
||||
|
||||
#### Configuration
|
||||
|
||||
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
|
||||
|
||||
## Docker
|
||||
|
||||
Running through Docker with Docker Compose is the recommended install method.
|
||||
|
||||
@ -15,7 +15,6 @@ class ReviewMetadata(BaseModel):
|
||||
)
|
||||
confidence: float = Field(
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Confidence in the analysis, from 0 to 1.",
|
||||
)
|
||||
potential_threat_level: int = Field(
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
"""Base runner implementation for ONNX models."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
@ -10,6 +11,11 @@ from typing import Any
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
try:
|
||||
import zmq as _zmq
|
||||
except ImportError:
|
||||
_zmq = None
|
||||
|
||||
from frigate.util.model import get_ort_providers
|
||||
from frigate.util.rknn_converter import auto_convert_model, is_rknn_compatible
|
||||
|
||||
@ -546,12 +552,213 @@ class RKNNModelRunner(BaseModelRunner):
|
||||
pass
|
||||
|
||||
|
||||
class ZmqEmbeddingRunner(BaseModelRunner):
|
||||
"""Send preprocessed embedding tensors over ZMQ to an external inference service.
|
||||
|
||||
This enables offloading ONNX embedding inference (e.g. ArcFace face recognition,
|
||||
Jina semantic search) to a native host process that has access to hardware
|
||||
acceleration unavailable inside Docker, such as CoreML/ANE on Apple Silicon.
|
||||
|
||||
Protocol:
|
||||
- Request is a multipart message: [ header_json_bytes, tensor_bytes ]
|
||||
where header is:
|
||||
{
|
||||
"shape": List[int], # e.g. [1, 3, 112, 112]
|
||||
"dtype": str, # numpy dtype, e.g. "float32"
|
||||
"model_type": str, # e.g. "arcface"
|
||||
}
|
||||
tensor_bytes are the raw C-order bytes of the input tensor.
|
||||
|
||||
- Response is either:
|
||||
a) Multipart [ header_json_bytes, embedding_bytes ] with header specifying
|
||||
shape and dtype of the returned embedding; or
|
||||
b) Single frame of raw float32 bytes (embedding vector, batch-first).
|
||||
|
||||
On timeout or error, a zero embedding is returned so the caller can degrade
|
||||
gracefully (the face will simply not be recognized for that frame).
|
||||
|
||||
Configuration example (face_recognition.device):
|
||||
face_recognition:
|
||||
enabled: true
|
||||
model_size: large
|
||||
device: "zmq://host.docker.internal:5556"
|
||||
"""
|
||||
|
||||
# Model type → primary input name (used to answer get_input_names())
|
||||
_INPUT_NAMES: dict[str, list[str]] = {}
|
||||
|
||||
# Model type → model input spatial width
|
||||
_INPUT_WIDTHS: dict[str, int] = {}
|
||||
|
||||
# Model type → embedding output dimensionality (used for zero-fallback shape)
|
||||
_OUTPUT_DIMS: dict[str, int] = {}
|
||||
|
||||
@classmethod
|
||||
def _init_model_maps(cls) -> None:
|
||||
"""Populate the model maps lazily to avoid circular imports at module load."""
|
||||
if cls._INPUT_NAMES:
|
||||
return
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
cls._INPUT_NAMES = {
|
||||
EnrichmentModelTypeEnum.arcface.value: ["data"],
|
||||
EnrichmentModelTypeEnum.facenet.value: ["data"],
|
||||
EnrichmentModelTypeEnum.jina_v1.value: ["pixel_values"],
|
||||
EnrichmentModelTypeEnum.jina_v2.value: ["pixel_values"],
|
||||
}
|
||||
cls._INPUT_WIDTHS = {
|
||||
EnrichmentModelTypeEnum.arcface.value: 112,
|
||||
EnrichmentModelTypeEnum.facenet.value: 160,
|
||||
EnrichmentModelTypeEnum.jina_v1.value: 224,
|
||||
EnrichmentModelTypeEnum.jina_v2.value: 224,
|
||||
}
|
||||
cls._OUTPUT_DIMS = {
|
||||
EnrichmentModelTypeEnum.arcface.value: 512,
|
||||
EnrichmentModelTypeEnum.facenet.value: 128,
|
||||
EnrichmentModelTypeEnum.jina_v1.value: 768,
|
||||
EnrichmentModelTypeEnum.jina_v2.value: 768,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str,
|
||||
model_type: str,
|
||||
request_timeout_ms: int = 60000,
|
||||
linger_ms: int = 0,
|
||||
):
|
||||
if _zmq is None:
|
||||
raise ImportError(
|
||||
"pyzmq is required for ZmqEmbeddingRunner. Install it with: pip install pyzmq"
|
||||
)
|
||||
self._init_model_maps()
|
||||
# "zmq://host:port" is the Frigate config sentinel; ZMQ sockets need "tcp://host:port"
|
||||
self._endpoint = endpoint.replace("zmq://", "tcp://", 1)
|
||||
self._model_type = model_type
|
||||
self._request_timeout_ms = request_timeout_ms
|
||||
self._linger_ms = linger_ms
|
||||
self._context = _zmq.Context()
|
||||
self._socket = None
|
||||
self._needs_reset = False
|
||||
self._lock = threading.Lock()
|
||||
self._create_socket()
|
||||
logger.info(
|
||||
f"ZmqEmbeddingRunner({model_type}): connected to {endpoint}"
|
||||
)
|
||||
|
||||
def _create_socket(self) -> None:
|
||||
if self._socket is not None:
|
||||
try:
|
||||
self._socket.close(linger=self._linger_ms)
|
||||
except Exception:
|
||||
pass
|
||||
self._socket = self._context.socket(_zmq.REQ)
|
||||
self._socket.setsockopt(_zmq.RCVTIMEO, self._request_timeout_ms)
|
||||
self._socket.setsockopt(_zmq.SNDTIMEO, self._request_timeout_ms)
|
||||
self._socket.setsockopt(_zmq.LINGER, self._linger_ms)
|
||||
self._socket.connect(self._endpoint)
|
||||
|
||||
def get_input_names(self) -> list[str]:
|
||||
return self._INPUT_NAMES.get(self._model_type, ["data"])
|
||||
|
||||
def get_input_width(self) -> int:
|
||||
return self._INPUT_WIDTHS.get(self._model_type, -1)
|
||||
|
||||
def run(self, inputs: dict[str, Any]) -> list[np.ndarray]:
|
||||
"""Send the primary input tensor over ZMQ and return the embedding.
|
||||
|
||||
For single-input models (ArcFace, FaceNet) the entire inputs dict maps to
|
||||
one tensor. For multi-input models only the first tensor is sent; those
|
||||
models are not yet supported for ZMQ offload.
|
||||
"""
|
||||
tensor_input = np.ascontiguousarray(next(iter(inputs.values())))
|
||||
batch_size = tensor_input.shape[0]
|
||||
|
||||
with self._lock:
|
||||
# Lazy reset: if a previous call errored, reset the socket now — before any
|
||||
# ZMQ operations — so we don't manipulate sockets inside an error handler where
|
||||
# Frigate's own ZMQ threads may be polling and could hit a libzmq assertion.
|
||||
# The lock ensures only one thread touches the socket at a time (ZMQ REQ
|
||||
# sockets are not thread-safe; concurrent calls from the reindex thread and
|
||||
# the normal embedding maintainer thread would corrupt the socket state).
|
||||
if self._needs_reset:
|
||||
self._reset_socket()
|
||||
self._needs_reset = False
|
||||
|
||||
try:
|
||||
header = {
|
||||
"shape": list(tensor_input.shape),
|
||||
"dtype": str(tensor_input.dtype.name),
|
||||
"model_type": self._model_type,
|
||||
}
|
||||
header_bytes = json.dumps(header).encode("utf-8")
|
||||
payload_bytes = memoryview(tensor_input.tobytes(order="C"))
|
||||
|
||||
self._socket.send_multipart([header_bytes, payload_bytes])
|
||||
reply_frames = self._socket.recv_multipart()
|
||||
return self._decode_response(reply_frames)
|
||||
|
||||
except _zmq.Again:
|
||||
logger.warning(
|
||||
f"ZmqEmbeddingRunner({self._model_type}): request timed out, will reset socket before next call"
|
||||
)
|
||||
self._needs_reset = True
|
||||
return [np.zeros((batch_size, self._get_output_dim()), dtype=np.float32)]
|
||||
except _zmq.ZMQError as exc:
|
||||
logger.error(f"ZmqEmbeddingRunner({self._model_type}) ZMQError: {exc}, will reset socket before next call")
|
||||
self._needs_reset = True
|
||||
return [np.zeros((batch_size, self._get_output_dim()), dtype=np.float32)]
|
||||
except Exception as exc:
|
||||
logger.error(f"ZmqEmbeddingRunner({self._model_type}) unexpected error: {exc}")
|
||||
return [np.zeros((batch_size, self._get_output_dim()), dtype=np.float32)]
|
||||
|
||||
def _reset_socket(self) -> None:
|
||||
try:
|
||||
self._create_socket()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _decode_response(self, frames: list[bytes]) -> list[np.ndarray]:
|
||||
try:
|
||||
if len(frames) >= 2:
|
||||
header = json.loads(frames[0].decode("utf-8"))
|
||||
shape = tuple(header.get("shape", []))
|
||||
dtype = np.dtype(header.get("dtype", "float32"))
|
||||
return [np.frombuffer(frames[1], dtype=dtype).reshape(shape)]
|
||||
elif len(frames) == 1:
|
||||
# Raw float32 bytes — reshape to (1, embedding_dim)
|
||||
arr = np.frombuffer(frames[0], dtype=np.float32)
|
||||
return [arr.reshape((1, -1))]
|
||||
else:
|
||||
logger.warning(f"ZmqEmbeddingRunner({self._model_type}): empty reply")
|
||||
return [np.zeros((1, self._get_output_dim()), dtype=np.float32)]
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
f"ZmqEmbeddingRunner({self._model_type}): failed to decode response: {exc}"
|
||||
)
|
||||
return [np.zeros((1, self._get_output_dim()), dtype=np.float32)]
|
||||
|
||||
def _get_output_dim(self) -> int:
|
||||
return self._OUTPUT_DIMS.get(self._model_type, 512)
|
||||
|
||||
def __del__(self) -> None:
|
||||
try:
|
||||
if self._socket is not None:
|
||||
self._socket.close(linger=self._linger_ms)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def get_optimized_runner(
|
||||
model_path: str, device: str | None, model_type: str, **kwargs
|
||||
) -> BaseModelRunner:
|
||||
"""Get an optimized runner for the hardware."""
|
||||
device = device or "AUTO"
|
||||
|
||||
# ZMQ embedding runner — offloads ONNX inference to a native host process.
|
||||
# Triggered when device is a ZMQ endpoint, e.g. "zmq://host.docker.internal:5556".
|
||||
if device.startswith("zmq://"):
|
||||
return ZmqEmbeddingRunner(endpoint=device, model_type=model_type)
|
||||
|
||||
if device != "CPU" and is_rknn_compatible(model_path):
|
||||
rknn_path = auto_convert_model(model_path)
|
||||
|
||||
|
||||
86
frigate/detectors/plugins/axengine.py
Normal file
86
frigate/detectors/plugins/axengine.py
Normal file
@ -0,0 +1,86 @@
|
||||
import logging
|
||||
import os.path
|
||||
import re
|
||||
import urllib.request
|
||||
from typing import Literal
|
||||
|
||||
import axengine as axe
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
from frigate.util.model import post_process_yolo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "axengine"
|
||||
|
||||
supported_models = {
|
||||
ModelTypeEnum.yologeneric: "frigate-yolov9-.*$",
|
||||
}
|
||||
|
||||
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "axengine_cache/")
|
||||
|
||||
|
||||
class AxengineDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
|
||||
|
||||
class Axengine(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, config: AxengineDetectorConfig):
|
||||
logger.info("__init__ axengine")
|
||||
super().__init__(config)
|
||||
self.height = config.model.height
|
||||
self.width = config.model.width
|
||||
model_path = config.model.path or "frigate-yolov9-tiny"
|
||||
model_props = self.parse_model_input(model_path)
|
||||
self.session = axe.InferenceSession(model_props["path"])
|
||||
|
||||
def __del__(self):
|
||||
pass
|
||||
|
||||
def parse_model_input(self, model_path):
|
||||
model_props = {}
|
||||
model_props["preset"] = True
|
||||
|
||||
model_matched = False
|
||||
|
||||
for model_type, pattern in supported_models.items():
|
||||
if re.match(pattern, model_path):
|
||||
model_matched = True
|
||||
model_props["model_type"] = model_type
|
||||
|
||||
if model_matched:
|
||||
model_props["filename"] = model_path + ".axmodel"
|
||||
model_props["path"] = model_cache_dir + model_props["filename"]
|
||||
|
||||
if not os.path.isfile(model_props["path"]):
|
||||
self.download_model(model_props["filename"])
|
||||
else:
|
||||
supported_models_str = ", ".join(model[1:-1] for model in supported_models)
|
||||
raise Exception(
|
||||
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
|
||||
)
|
||||
return model_props
|
||||
|
||||
def download_model(self, filename):
|
||||
if not os.path.isdir(model_cache_dir):
|
||||
os.mkdir(model_cache_dir)
|
||||
|
||||
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
|
||||
urllib.request.urlretrieve(
|
||||
f"{HF_ENDPOINT}/AXERA-TECH/frigate-resource/resolve/axmodel/{filename}",
|
||||
model_cache_dir + filename,
|
||||
)
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
results = None
|
||||
results = self.session.run(None, {"images": tensor_input})
|
||||
if self.detector_config.model.model_type == ModelTypeEnum.yologeneric:
|
||||
return post_process_yolo(results, self.width, self.height)
|
||||
else:
|
||||
raise ValueError(
|
||||
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
|
||||
)
|
||||
@ -181,6 +181,10 @@ Each line represents a detection state, not necessarily unique individuals. Pare
|
||||
try:
|
||||
metadata = ReviewMetadata.model_validate_json(clean_json)
|
||||
|
||||
# Normalize confidence if model returned a percentage (e.g. 85 instead of 0.85)
|
||||
if metadata.confidence > 1.0:
|
||||
metadata.confidence = min(metadata.confidence / 100.0, 1.0)
|
||||
|
||||
# If any verified objects (contain parentheses with name), set to 0
|
||||
if any("(" in obj for obj in review_data["unified_objects"]):
|
||||
metadata.potential_threat_level = 0
|
||||
|
||||
275
tools/zmq_embedding_server.py
Normal file
275
tools/zmq_embedding_server.py
Normal file
@ -0,0 +1,275 @@
|
||||
"""ZMQ Embedding Server — native Mac (Apple Silicon) inference service.
|
||||
|
||||
Runs ONNX models using hardware acceleration unavailable inside Docker on macOS,
|
||||
specifically CoreML and the Apple Neural Engine. Frigate's Docker container
|
||||
connects to this server over ZMQ TCP, sends preprocessed tensors, and receives
|
||||
embedding vectors back.
|
||||
|
||||
Supported models:
|
||||
- ArcFace (face recognition, 512-dim output)
|
||||
- FaceNet (face recognition, 128-dim output)
|
||||
- Jina V1/V2 vision (semantic search, 768-dim output)
|
||||
|
||||
Requirements (install outside Docker, on the Mac host):
|
||||
pip install onnxruntime pyzmq numpy
|
||||
|
||||
Usage:
|
||||
# ArcFace face recognition (port 5556):
|
||||
python tools/zmq_embedding_server.py \\
|
||||
--model /config/model_cache/facedet/arcface.onnx \\
|
||||
--model-type arcface \\
|
||||
--port 5556
|
||||
|
||||
# Jina V1 vision semantic search (port 5557):
|
||||
python tools/zmq_embedding_server.py \\
|
||||
--model /config/model_cache/jinaai/jina-clip-v1/vision_model_quantized.onnx \\
|
||||
--model-type jina_v1 \\
|
||||
--port 5557
|
||||
|
||||
Frigate config (docker-compose / config.yaml):
|
||||
face_recognition:
|
||||
enabled: true
|
||||
model_size: large
|
||||
device: "zmq://host.docker.internal:5556"
|
||||
|
||||
semantic_search:
|
||||
enabled: true
|
||||
model_size: small
|
||||
device: "zmq://host.docker.internal:5557"
|
||||
|
||||
Protocol (REQ/REP):
|
||||
Request: multipart [ header_json_bytes, tensor_bytes ]
|
||||
header = {
|
||||
"shape": [batch, channels, height, width], # e.g. [1, 3, 112, 112]
|
||||
"dtype": "float32",
|
||||
"model_type": "arcface",
|
||||
}
|
||||
tensor_bytes = raw C-order numpy bytes
|
||||
|
||||
Response: multipart [ header_json_bytes, embedding_bytes ]
|
||||
header = {
|
||||
"shape": [batch, embedding_dim], # e.g. [1, 512]
|
||||
"dtype": "float32",
|
||||
}
|
||||
embedding_bytes = raw C-order numpy bytes
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import zmq
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s %(name)s: %(message)s",
|
||||
)
|
||||
logger = logging.getLogger("zmq_embedding_server")
|
||||
|
||||
|
||||
# Models that require ORT_ENABLE_BASIC optimization to avoid graph fusion issues
|
||||
# (e.g. SimplifiedLayerNormFusion creates nodes that some providers can't handle).
|
||||
_COMPLEX_MODELS = {"jina_v1", "jina_v2"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ONNX Runtime session (CoreML preferred on Apple Silicon)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def build_ort_session(model_path: str, model_type: str = ""):
|
||||
"""Create an ONNX Runtime InferenceSession, preferring CoreML on macOS.
|
||||
|
||||
Jina V1/V2 models use ORT_ENABLE_BASIC graph optimization to avoid
|
||||
fusion passes (e.g. SimplifiedLayerNormFusion) that produce unsupported
|
||||
nodes. All other models use the default ORT_ENABLE_ALL.
|
||||
"""
|
||||
import onnxruntime as ort
|
||||
|
||||
available = ort.get_available_providers()
|
||||
logger.info(f"Available ORT providers: {available}")
|
||||
|
||||
# Prefer CoreMLExecutionProvider on Apple Silicon for ANE/GPU acceleration.
|
||||
# Falls back automatically to CPUExecutionProvider if CoreML is unavailable.
|
||||
preferred = []
|
||||
if "CoreMLExecutionProvider" in available:
|
||||
preferred.append("CoreMLExecutionProvider")
|
||||
logger.info("Using CoreMLExecutionProvider (Apple Neural Engine / GPU)")
|
||||
else:
|
||||
logger.warning(
|
||||
"CoreMLExecutionProvider not available — falling back to CPU. "
|
||||
"Install onnxruntime-silicon or a CoreML-enabled onnxruntime build."
|
||||
)
|
||||
|
||||
preferred.append("CPUExecutionProvider")
|
||||
|
||||
sess_options = None
|
||||
if model_type in _COMPLEX_MODELS:
|
||||
sess_options = ort.SessionOptions()
|
||||
sess_options.graph_optimization_level = (
|
||||
ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
|
||||
)
|
||||
logger.info(f"Using ORT_ENABLE_BASIC optimization for {model_type}")
|
||||
|
||||
session = ort.InferenceSession(model_path, sess_options=sess_options, providers=preferred)
|
||||
|
||||
input_names = [inp.name for inp in session.get_inputs()]
|
||||
output_names = [out.name for out in session.get_outputs()]
|
||||
logger.info(f"Model loaded: inputs={input_names}, outputs={output_names}")
|
||||
return session
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Inference helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_arcface(session, tensor: np.ndarray) -> np.ndarray:
|
||||
"""Run ArcFace — input (1, 3, 112, 112) float32, output (1, 512) float32."""
|
||||
outputs = session.run(None, {"data": tensor})
|
||||
return outputs[0] # shape (1, 512)
|
||||
|
||||
|
||||
def run_generic(session, tensor: np.ndarray) -> np.ndarray:
|
||||
"""Generic single-input ONNX model runner."""
|
||||
input_name = session.get_inputs()[0].name
|
||||
outputs = session.run(None, {input_name: tensor})
|
||||
return outputs[0]
|
||||
|
||||
|
||||
_RUNNERS = {
|
||||
"arcface": run_arcface,
|
||||
"facenet": run_generic,
|
||||
"jina_v1": run_generic,
|
||||
"jina_v2": run_generic,
|
||||
}
|
||||
|
||||
# Model type → input shape for warmup inference (triggers CoreML JIT compilation
|
||||
# before the first real request arrives, avoiding a ZMQ timeout on cold start).
|
||||
_WARMUP_SHAPES = {
|
||||
"arcface": (1, 3, 112, 112),
|
||||
"facenet": (1, 3, 160, 160),
|
||||
"jina_v1": (1, 3, 224, 224),
|
||||
"jina_v2": (1, 3, 224, 224),
|
||||
}
|
||||
|
||||
|
||||
def warmup(session, model_type: str) -> None:
|
||||
"""Run a dummy inference to trigger CoreML JIT compilation."""
|
||||
shape = _WARMUP_SHAPES.get(model_type)
|
||||
if shape is None:
|
||||
return
|
||||
logger.info(f"Warming up CoreML model ({model_type})…")
|
||||
dummy = np.zeros(shape, dtype=np.float32)
|
||||
try:
|
||||
runner = _RUNNERS.get(model_type, run_generic)
|
||||
runner(session, dummy)
|
||||
logger.info("Warmup complete")
|
||||
except Exception as exc:
|
||||
logger.warning(f"Warmup failed (non-fatal): {exc}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ZMQ server loop
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def serve(session, port: int, model_type: str) -> None:
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.REP)
|
||||
socket.bind(f"tcp://0.0.0.0:{port}")
|
||||
logger.info(f"Listening on tcp://0.0.0.0:{port} (model_type={model_type})")
|
||||
|
||||
runner = _RUNNERS.get(model_type, run_generic)
|
||||
|
||||
def _shutdown(sig, frame):
|
||||
logger.info("Shutting down…")
|
||||
socket.close(linger=0)
|
||||
context.term()
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, _shutdown)
|
||||
signal.signal(signal.SIGTERM, _shutdown)
|
||||
|
||||
while True:
|
||||
try:
|
||||
frames = socket.recv_multipart()
|
||||
except zmq.ZMQError as exc:
|
||||
logger.error(f"recv error: {exc}")
|
||||
continue
|
||||
|
||||
if len(frames) < 2:
|
||||
logger.warning(f"Received unexpected frame count: {len(frames)}, ignoring")
|
||||
socket.send_multipart([b"{}"])
|
||||
continue
|
||||
|
||||
try:
|
||||
header = json.loads(frames[0].decode("utf-8"))
|
||||
shape = tuple(header["shape"])
|
||||
dtype = np.dtype(header.get("dtype", "float32"))
|
||||
tensor = np.frombuffer(frames[1], dtype=dtype).reshape(shape)
|
||||
except Exception as exc:
|
||||
logger.error(f"Failed to decode request: {exc}")
|
||||
socket.send_multipart([b"{}"])
|
||||
continue
|
||||
|
||||
try:
|
||||
t0 = time.monotonic()
|
||||
embedding = runner(session, tensor)
|
||||
elapsed_ms = (time.monotonic() - t0) * 1000
|
||||
if elapsed_ms > 2000:
|
||||
logger.warning(f"slow inference {elapsed_ms:.1f}ms shape={shape}")
|
||||
resp_header = json.dumps(
|
||||
{"shape": list(embedding.shape), "dtype": str(embedding.dtype.name)}
|
||||
).encode("utf-8")
|
||||
resp_payload = memoryview(np.ascontiguousarray(embedding).tobytes())
|
||||
socket.send_multipart([resp_header, resp_payload])
|
||||
except Exception as exc:
|
||||
logger.error(f"Inference error: {exc}")
|
||||
# Return a zero embedding so the client can degrade gracefully
|
||||
zero = np.zeros((1, 512), dtype=np.float32)
|
||||
resp_header = json.dumps(
|
||||
{"shape": list(zero.shape), "dtype": "float32"}
|
||||
).encode("utf-8")
|
||||
socket.send_multipart([resp_header, memoryview(zero.tobytes())])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="ZMQ Embedding Server for Frigate")
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
required=True,
|
||||
help="Path to the ONNX model file (e.g. /config/model_cache/facedet/arcface.onnx)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-type",
|
||||
default="arcface",
|
||||
choices=list(_RUNNERS.keys()),
|
||||
help="Model type key (default: arcface)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=5556,
|
||||
help="TCP port to listen on (default: 5556)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.exists(args.model):
|
||||
logger.error(f"Model file not found: {args.model}")
|
||||
sys.exit(1)
|
||||
|
||||
logger.info(f"Loading model: {args.model}")
|
||||
session = build_ort_session(args.model, model_type=args.model_type)
|
||||
warmup(session, model_type=args.model_type)
|
||||
serve(session, port=args.port, model_type=args.model_type)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
72
web/package-lock.json
generated
72
web/package-lock.json
generated
@ -72,8 +72,6 @@
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-router-dom": "^6.30.3",
|
||||
"react-swipeable": "^7.0.2",
|
||||
"react-tracked": "^2.0.1",
|
||||
"react-use-websocket": "^4.8.1",
|
||||
"react-zoom-pan-pinch": "^3.7.0",
|
||||
"remark-gfm": "^4.0.0",
|
||||
"scroll-into-view-if-needed": "^3.1.0",
|
||||
@ -4400,7 +4398,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@rjsf/core/-/core-6.3.1.tgz",
|
||||
"integrity": "sha512-LTjFz5Fk3FlbgFPJ+OJi1JdWJyiap9dSpx8W6u7JHNB7K5VbwzJe8gIU45XWLHzWFGDHKPm89VrUzjOs07TPtg==",
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.23",
|
||||
"lodash-es": "^4.17.23",
|
||||
@ -4475,7 +4472,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@rjsf/utils/-/utils-6.3.1.tgz",
|
||||
"integrity": "sha512-ve2KHl1ITYG8QIonnuK83/T1k/5NuxP4D1egVqP9Hz2ub28kgl0rNMwmRSxXs3WIbCcMW9g3ox+daVrbSNc4Mw==",
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@x0k/json-schema-merge": "^1.0.2",
|
||||
"fast-uri": "^3.1.0",
|
||||
@ -5149,7 +5145,6 @@
|
||||
"integrity": "sha512-MdiXf+nDuMvY0gJKxyfZ7/6UFsETO7mGKF54MVD/ekJS6HdFtpZFBgrh6Pseu64XTb2MLyFPlbW6hj8HYRQNOQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
@ -5159,7 +5154,6 @@
|
||||
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz",
|
||||
"integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"csstype": "^3.2.2"
|
||||
}
|
||||
@ -5170,7 +5164,6 @@
|
||||
"integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
|
||||
"devOptional": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"peerDependencies": {
|
||||
"@types/react": "^19.2.0"
|
||||
}
|
||||
@ -5300,7 +5293,6 @@
|
||||
"integrity": "sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==",
|
||||
"dev": true,
|
||||
"license": "BSD-2-Clause",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/scope-manager": "7.12.0",
|
||||
"@typescript-eslint/types": "7.12.0",
|
||||
@ -5593,7 +5585,6 @@
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
|
||||
"integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
},
|
||||
@ -5755,7 +5746,6 @@
|
||||
"resolved": "https://registry.npmjs.org/apexcharts/-/apexcharts-3.52.0.tgz",
|
||||
"integrity": "sha512-7dg0ADKs8AA89iYMZMe2sFDG0XK5PfqllKV9N+i3hKHm3vEtdhwz8AlXGm+/b0nJ6jKiaXsqci5LfVxNhtB+dA==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@yr/monotone-cubic-spline": "^1.0.3",
|
||||
"svg.draggable.js": "^2.2.2",
|
||||
@ -5966,7 +5956,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"caniuse-lite": "^1.0.30001646",
|
||||
"electron-to-chromium": "^1.5.4",
|
||||
@ -6467,7 +6456,6 @@
|
||||
"resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz",
|
||||
"integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"funding": {
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/kossnocorp"
|
||||
@ -6907,7 +6895,6 @@
|
||||
"integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@eslint-community/eslint-utils": "^4.2.0",
|
||||
"@eslint-community/regexpp": "^4.6.1",
|
||||
@ -6963,7 +6950,6 @@
|
||||
"resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.1.0.tgz",
|
||||
"integrity": "sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"eslint-config-prettier": "bin/cli.js"
|
||||
},
|
||||
@ -7887,7 +7873,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.23.2"
|
||||
},
|
||||
@ -8533,8 +8518,7 @@
|
||||
"url": "https://github.com/sponsors/lavrton"
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/levn": {
|
||||
"version": "0.4.1",
|
||||
@ -9683,8 +9667,7 @@
|
||||
"version": "0.52.2",
|
||||
"resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.52.2.tgz",
|
||||
"integrity": "sha512-GEQWEZmfkOGLdd3XK8ryrfWz3AIP8YymVXiPHEdewrUq7mh0qrKrfHLNCXcbB6sTnMLnOZ3ztSiKcciFUkIJwQ==",
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/monaco-languageserver-types": {
|
||||
"version": "0.4.0",
|
||||
@ -10392,7 +10375,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"nanoid": "^3.3.8",
|
||||
"picocolors": "^1.1.1",
|
||||
@ -10527,7 +10509,6 @@
|
||||
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz",
|
||||
"integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"prettier": "bin/prettier.cjs"
|
||||
},
|
||||
@ -10676,11 +10657,6 @@
|
||||
"url": "https://github.com/sponsors/wooorm"
|
||||
}
|
||||
},
|
||||
"node_modules/proxy-compare": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/proxy-compare/-/proxy-compare-3.0.0.tgz",
|
||||
"integrity": "sha512-y44MCkgtZUCT9tZGuE278fB7PWVf7fRYy0vbRXAts2o5F0EfC4fIQrvQQGBJo1WJbFcVLXzApOscyJuZqHQc1w=="
|
||||
},
|
||||
"node_modules/proxy-from-env": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
|
||||
@ -10733,7 +10709,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
|
||||
"integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
@ -10798,7 +10773,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz",
|
||||
"integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"scheduler": "^0.27.0"
|
||||
},
|
||||
@ -10860,7 +10834,6 @@
|
||||
"resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.52.1.tgz",
|
||||
"integrity": "sha512-uNKIhaoICJ5KQALYZ4TOaOLElyM+xipord+Ha3crEFhTntdLvWZqVY49Wqd/0GiVCA/f9NjemLeiNPjG7Hpurg==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12.22.0"
|
||||
},
|
||||
@ -11115,29 +11088,6 @@
|
||||
"react": "^16.8.3 || ^17 || ^18 || ^19.0.0 || ^19.0.0-rc"
|
||||
}
|
||||
},
|
||||
"node_modules/react-tracked": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/react-tracked/-/react-tracked-2.0.1.tgz",
|
||||
"integrity": "sha512-qjbmtkO2IcW+rB2cFskRWDTjKs/w9poxvNnduacjQA04LWxOoLy9J8WfIEq1ahifQ/tVJQECrQPBm+UEzKRDtg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"proxy-compare": "^3.0.0",
|
||||
"use-context-selector": "^2.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": ">=18.0.0",
|
||||
"scheduler": ">=0.19.0"
|
||||
}
|
||||
},
|
||||
"node_modules/react-use-websocket": {
|
||||
"version": "4.8.1",
|
||||
"resolved": "https://registry.npmjs.org/react-use-websocket/-/react-use-websocket-4.8.1.tgz",
|
||||
"integrity": "sha512-FTXuG5O+LFozmu1BRfrzl7UIQngECvGJmL7BHsK4TYXuVt+mCizVA8lT0hGSIF0Z0TedF7bOo1nRzOUdginhDw==",
|
||||
"peerDependencies": {
|
||||
"react": ">= 18.0.0",
|
||||
"react-dom": ">= 18.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/react-zoom-pan-pinch": {
|
||||
"version": "3.7.0",
|
||||
"resolved": "https://registry.npmjs.org/react-zoom-pan-pinch/-/react-zoom-pan-pinch-3.7.0.tgz",
|
||||
@ -11549,8 +11499,7 @@
|
||||
"version": "0.27.0",
|
||||
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
|
||||
"integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==",
|
||||
"license": "MIT",
|
||||
"peer": true
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/scroll-into-view-if-needed": {
|
||||
"version": "3.1.0",
|
||||
@ -12049,7 +11998,6 @@
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.9.tgz",
|
||||
"integrity": "sha512-1SEOvRr6sSdV5IDf9iC+NU4dhwdqzF4zKKq3sAbasUWHEM6lsMhX+eNN5gkPx1BvLFEnZQEUFbXnGj8Qlp83Pg==",
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@alloc/quick-lru": "^5.2.0",
|
||||
"arg": "^5.0.2",
|
||||
@ -12232,7 +12180,6 @@
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
@ -12411,7 +12358,6 @@
|
||||
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
|
||||
"devOptional": true,
|
||||
"license": "Apache-2.0",
|
||||
"peer": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
@ -12627,15 +12573,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/use-context-selector": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/use-context-selector/-/use-context-selector-2.0.0.tgz",
|
||||
"integrity": "sha512-owfuSmUNd3eNp3J9CdDl0kMgfidV+MkDvHPpvthN5ThqM+ibMccNE0k+Iq7TWC6JPFvGZqanqiGCuQx6DyV24g==",
|
||||
"peerDependencies": {
|
||||
"react": ">=18.0.0",
|
||||
"scheduler": ">=0.19.0"
|
||||
}
|
||||
},
|
||||
"node_modules/use-long-press": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/use-long-press/-/use-long-press-3.2.0.tgz",
|
||||
@ -12771,7 +12708,6 @@
|
||||
"integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"esbuild": "^0.25.0",
|
||||
"fdir": "^6.4.4",
|
||||
@ -12896,7 +12832,6 @@
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
@ -12910,7 +12845,6 @@
|
||||
"integrity": "sha512-IP7gPK3LS3Fvn44x30X1dM9vtawm0aesAa2yBIZ9vQf+qB69NXC5776+Qmcr7ohUXIQuLhk7xQR0aSUIDPqavg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@vitest/expect": "3.0.7",
|
||||
"@vitest/mocker": "3.0.7",
|
||||
|
||||
@ -78,8 +78,6 @@
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-router-dom": "^6.30.3",
|
||||
"react-swipeable": "^7.0.2",
|
||||
"react-tracked": "^2.0.1",
|
||||
"react-use-websocket": "^4.8.1",
|
||||
"react-zoom-pan-pinch": "^3.7.0",
|
||||
"remark-gfm": "^4.0.0",
|
||||
"scroll-into-view-if-needed": "^3.1.0",
|
||||
|
||||
@ -1,23 +0,0 @@
|
||||
diff --git a/node_modules/react-use-websocket/dist/lib/use-websocket.js b/node_modules/react-use-websocket/dist/lib/use-websocket.js
|
||||
index f01db48..b30aff2 100644
|
||||
--- a/node_modules/react-use-websocket/dist/lib/use-websocket.js
|
||||
+++ b/node_modules/react-use-websocket/dist/lib/use-websocket.js
|
||||
@@ -139,15 +139,15 @@ var useWebSocket = function (url, options, connect) {
|
||||
}
|
||||
protectedSetLastMessage = function (message) {
|
||||
if (!expectClose_1) {
|
||||
- (0, react_dom_1.flushSync)(function () { return setLastMessage(message); });
|
||||
+ setLastMessage(message);
|
||||
}
|
||||
};
|
||||
protectedSetReadyState = function (state) {
|
||||
if (!expectClose_1) {
|
||||
- (0, react_dom_1.flushSync)(function () { return setReadyState(function (prev) {
|
||||
+ setReadyState(function (prev) {
|
||||
var _a;
|
||||
return (__assign(__assign({}, prev), (convertedUrl.current && (_a = {}, _a[convertedUrl.current] = state, _a))));
|
||||
- }); });
|
||||
+ });
|
||||
}
|
||||
};
|
||||
if (createOrJoin_1) {
|
||||
78
web/src/api/WsProvider.tsx
Normal file
78
web/src/api/WsProvider.tsx
Normal file
@ -0,0 +1,78 @@
|
||||
import { baseUrl } from "./baseUrl";
|
||||
import { ReactNode, useCallback, useEffect, useRef } from "react";
|
||||
import { WsSendContext } from "./wsContext";
|
||||
import type { Update } from "./wsContext";
|
||||
import { processWsMessage, resetWsStore } from "./ws";
|
||||
|
||||
export function WsProvider({ children }: { children: ReactNode }) {
|
||||
const wsUrl = `${baseUrl.replace(/^http/, "ws")}ws`;
|
||||
const wsRef = useRef<WebSocket | null>(null);
|
||||
const reconnectTimer = useRef<ReturnType<typeof setTimeout> | null>(null);
|
||||
const reconnectAttempt = useRef(0);
|
||||
const unmounted = useRef(false);
|
||||
|
||||
const sendJsonMessage = useCallback((msg: unknown) => {
|
||||
if (wsRef.current?.readyState === WebSocket.OPEN) {
|
||||
wsRef.current.send(JSON.stringify(msg));
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
unmounted.current = false;
|
||||
|
||||
function connect() {
|
||||
if (unmounted.current) return;
|
||||
|
||||
const ws = new WebSocket(wsUrl);
|
||||
wsRef.current = ws;
|
||||
|
||||
ws.onopen = () => {
|
||||
reconnectAttempt.current = 0;
|
||||
ws.send(
|
||||
JSON.stringify({ topic: "onConnect", message: "", retain: false }),
|
||||
);
|
||||
};
|
||||
|
||||
ws.onmessage = (event: MessageEvent) => {
|
||||
processWsMessage(event.data as string);
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
if (unmounted.current) return;
|
||||
const delay = Math.min(1000 * 2 ** reconnectAttempt.current, 30000);
|
||||
reconnectAttempt.current++;
|
||||
reconnectTimer.current = setTimeout(connect, delay);
|
||||
};
|
||||
|
||||
ws.onerror = () => {
|
||||
ws.close();
|
||||
};
|
||||
}
|
||||
|
||||
connect();
|
||||
|
||||
return () => {
|
||||
unmounted.current = true;
|
||||
if (reconnectTimer.current) {
|
||||
clearTimeout(reconnectTimer.current);
|
||||
}
|
||||
wsRef.current?.close();
|
||||
resetWsStore();
|
||||
};
|
||||
}, [wsUrl]);
|
||||
|
||||
const send = useCallback(
|
||||
(message: Update) => {
|
||||
sendJsonMessage({
|
||||
topic: message.topic,
|
||||
payload: message.payload,
|
||||
retain: message.retain,
|
||||
});
|
||||
},
|
||||
[sendJsonMessage],
|
||||
);
|
||||
|
||||
return (
|
||||
<WsSendContext.Provider value={send}>{children}</WsSendContext.Provider>
|
||||
);
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
import { baseUrl } from "./baseUrl";
|
||||
import { SWRConfig } from "swr";
|
||||
import { WsProvider } from "./ws";
|
||||
import { WsProvider } from "./WsProvider";
|
||||
import axios from "axios";
|
||||
import { ReactNode } from "react";
|
||||
import { isRedirectingToLogin, setRedirectingToLogin } from "./auth-redirect";
|
||||
|
||||
@ -1,6 +1,11 @@
|
||||
import { baseUrl } from "./baseUrl";
|
||||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
import useWebSocket, { ReadyState } from "react-use-websocket";
|
||||
import {
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
useSyncExternalStore,
|
||||
} from "react";
|
||||
import {
|
||||
EmbeddingsReindexProgressType,
|
||||
FrigateCameraState,
|
||||
@ -14,8 +19,11 @@ import {
|
||||
Job,
|
||||
} from "@/types/ws";
|
||||
import { FrigateStats } from "@/types/stats";
|
||||
import { createContainer } from "react-tracked";
|
||||
import useDeepMemo from "@/hooks/use-deep-memo";
|
||||
import { isEqual } from "lodash";
|
||||
import { WsSendContext } from "./wsContext";
|
||||
import type { Update, WsSend } from "./wsContext";
|
||||
|
||||
export type { Update };
|
||||
|
||||
export type WsFeedMessage = {
|
||||
topic: string;
|
||||
@ -24,170 +32,204 @@ export type WsFeedMessage = {
|
||||
id: string;
|
||||
};
|
||||
|
||||
type Update = {
|
||||
topic: string;
|
||||
payload: unknown;
|
||||
retain: boolean;
|
||||
};
|
||||
|
||||
type WsState = {
|
||||
[topic: string]: unknown;
|
||||
};
|
||||
|
||||
type useValueReturn = [WsState, (update: Update) => void];
|
||||
// External store for WebSocket state using useSyncExternalStore
|
||||
type Listener = () => void;
|
||||
|
||||
const wsState: WsState = {};
|
||||
const wsTopicListeners = new Map<string, Set<Listener>>();
|
||||
|
||||
// Reset all module-level state. Called on WsProvider unmount to prevent
|
||||
// stale data from leaking across mount/unmount cycles (e.g. HMR, logout)
|
||||
export function resetWsStore() {
|
||||
for (const key of Object.keys(wsState)) {
|
||||
delete wsState[key];
|
||||
}
|
||||
wsTopicListeners.clear();
|
||||
lastCameraActivityPayload = null;
|
||||
wsMessageSubscribers.clear();
|
||||
wsMessageIdCounter = 0;
|
||||
}
|
||||
|
||||
// Parse and apply a raw WS message synchronously.
|
||||
// Called directly from WsProvider's onmessage handler.
|
||||
export function processWsMessage(raw: string) {
|
||||
const data: Update = JSON.parse(raw);
|
||||
if (!data) return;
|
||||
|
||||
const { topic, payload } = data;
|
||||
|
||||
if (topic === "camera_activity") {
|
||||
applyCameraActivity(payload as string);
|
||||
} else {
|
||||
applyTopicUpdate(topic, payload);
|
||||
}
|
||||
|
||||
if (wsMessageSubscribers.size > 0) {
|
||||
wsMessageSubscribers.forEach((cb) =>
|
||||
cb({
|
||||
topic,
|
||||
payload,
|
||||
timestamp: Date.now(),
|
||||
id: String(wsMessageIdCounter++),
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function applyTopicUpdate(topic: string, newVal: unknown) {
|
||||
const oldVal = wsState[topic];
|
||||
// Fast path: === for primitives ("ON"/"OFF", numbers).
|
||||
// Fall back to isEqual for objects/arrays.
|
||||
const unchanged =
|
||||
oldVal === newVal ||
|
||||
(typeof newVal === "object" && newVal !== null && isEqual(oldVal, newVal));
|
||||
if (unchanged) return;
|
||||
|
||||
wsState[topic] = newVal;
|
||||
// Snapshot the Set — a listener may trigger unmount that modifies it.
|
||||
const listeners = wsTopicListeners.get(topic);
|
||||
if (listeners) {
|
||||
for (const l of Array.from(listeners)) l();
|
||||
}
|
||||
}
|
||||
|
||||
// Subscriptions
|
||||
|
||||
export function subscribeWsTopic(
|
||||
topic: string,
|
||||
listener: Listener,
|
||||
): () => void {
|
||||
let set = wsTopicListeners.get(topic);
|
||||
if (!set) {
|
||||
set = new Set();
|
||||
wsTopicListeners.set(topic, set);
|
||||
}
|
||||
set.add(listener);
|
||||
return () => {
|
||||
set!.delete(listener);
|
||||
if (set!.size === 0) wsTopicListeners.delete(topic);
|
||||
};
|
||||
}
|
||||
|
||||
export function getWsTopicValue(topic: string): unknown {
|
||||
return wsState[topic];
|
||||
}
|
||||
|
||||
// Feed message subscribers
|
||||
const wsMessageSubscribers = new Set<(msg: WsFeedMessage) => void>();
|
||||
let wsMessageIdCounter = 0;
|
||||
|
||||
function useValue(): useValueReturn {
|
||||
const wsUrl = `${baseUrl.replace(/^http/, "ws")}ws`;
|
||||
// Camera activity expansion
|
||||
//
|
||||
// Cache the last raw camera_activity JSON string so we can skip JSON.parse
|
||||
// and the entire expansion when nothing has changed. This avoids creating
|
||||
// fresh objects (which defeat Object.is and force expensive isEqual deep
|
||||
// traversals) on every flush — critical with many cameras.
|
||||
let lastCameraActivityPayload: string | null = null;
|
||||
|
||||
// main state
|
||||
function applyCameraActivity(payload: string) {
|
||||
// Fast path: if the raw JSON string is identical, nothing changed.
|
||||
if (payload === lastCameraActivityPayload) return;
|
||||
lastCameraActivityPayload = payload;
|
||||
|
||||
const [wsState, setWsState] = useState<WsState>({});
|
||||
let activity: { [key: string]: Partial<FrigateCameraState> };
|
||||
|
||||
useEffect(() => {
|
||||
const activityValue: string = wsState["camera_activity"] as string;
|
||||
try {
|
||||
activity = JSON.parse(payload);
|
||||
} catch {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!activityValue) {
|
||||
return;
|
||||
}
|
||||
if (Object.keys(activity).length === 0) return;
|
||||
|
||||
let cameraActivity: { [key: string]: Partial<FrigateCameraState> };
|
||||
for (const [name, state] of Object.entries(activity)) {
|
||||
applyTopicUpdate(`camera_activity/${name}`, state);
|
||||
|
||||
try {
|
||||
cameraActivity = JSON.parse(activityValue);
|
||||
} catch {
|
||||
return;
|
||||
}
|
||||
const cameraConfig = state?.config;
|
||||
if (!cameraConfig) continue;
|
||||
|
||||
if (Object.keys(cameraActivity).length === 0) {
|
||||
return;
|
||||
}
|
||||
const {
|
||||
record,
|
||||
detect,
|
||||
enabled,
|
||||
snapshots,
|
||||
audio,
|
||||
audio_transcription,
|
||||
notifications,
|
||||
notifications_suspended,
|
||||
autotracking,
|
||||
alerts,
|
||||
detections,
|
||||
object_descriptions,
|
||||
review_descriptions,
|
||||
} = cameraConfig;
|
||||
|
||||
const cameraStates: WsState = {};
|
||||
|
||||
Object.entries(cameraActivity).forEach(([name, state]) => {
|
||||
const cameraConfig = state?.config;
|
||||
|
||||
if (!cameraConfig) {
|
||||
return;
|
||||
}
|
||||
|
||||
const {
|
||||
record,
|
||||
detect,
|
||||
enabled,
|
||||
snapshots,
|
||||
audio,
|
||||
audio_transcription,
|
||||
notifications,
|
||||
notifications_suspended,
|
||||
autotracking,
|
||||
alerts,
|
||||
detections,
|
||||
object_descriptions,
|
||||
review_descriptions,
|
||||
} = cameraConfig;
|
||||
cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF";
|
||||
cameraStates[`${name}/enabled/state`] = enabled ? "ON" : "OFF";
|
||||
cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF";
|
||||
cameraStates[`${name}/snapshots/state`] = snapshots ? "ON" : "OFF";
|
||||
cameraStates[`${name}/audio/state`] = audio ? "ON" : "OFF";
|
||||
cameraStates[`${name}/audio_transcription/state`] = audio_transcription
|
||||
? "ON"
|
||||
: "OFF";
|
||||
cameraStates[`${name}/notifications/state`] = notifications
|
||||
? "ON"
|
||||
: "OFF";
|
||||
cameraStates[`${name}/notifications/suspended`] =
|
||||
notifications_suspended || 0;
|
||||
cameraStates[`${name}/ptz_autotracker/state`] = autotracking
|
||||
? "ON"
|
||||
: "OFF";
|
||||
cameraStates[`${name}/review_alerts/state`] = alerts ? "ON" : "OFF";
|
||||
cameraStates[`${name}/review_detections/state`] = detections
|
||||
? "ON"
|
||||
: "OFF";
|
||||
cameraStates[`${name}/object_descriptions/state`] = object_descriptions
|
||||
? "ON"
|
||||
: "OFF";
|
||||
cameraStates[`${name}/review_descriptions/state`] = review_descriptions
|
||||
? "ON"
|
||||
: "OFF";
|
||||
});
|
||||
|
||||
setWsState((prevState) => ({
|
||||
...prevState,
|
||||
...cameraStates,
|
||||
}));
|
||||
|
||||
// we only want this to run initially when the config is loaded
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [wsState["camera_activity"]]);
|
||||
|
||||
// ws handler
|
||||
const { sendJsonMessage, readyState } = useWebSocket(wsUrl, {
|
||||
onMessage: (event) => {
|
||||
const data: Update = JSON.parse(event.data);
|
||||
|
||||
if (data) {
|
||||
setWsState((prevState) => ({
|
||||
...prevState,
|
||||
[data.topic]: data.payload,
|
||||
}));
|
||||
|
||||
// Notify feed subscribers
|
||||
if (wsMessageSubscribers.size > 0) {
|
||||
const feedMsg: WsFeedMessage = {
|
||||
topic: data.topic,
|
||||
payload: data.payload,
|
||||
timestamp: Date.now(),
|
||||
id: String(wsMessageIdCounter++),
|
||||
};
|
||||
wsMessageSubscribers.forEach((cb) => cb(feedMsg));
|
||||
}
|
||||
}
|
||||
},
|
||||
onOpen: () => {
|
||||
sendJsonMessage({
|
||||
topic: "onConnect",
|
||||
message: "",
|
||||
retain: false,
|
||||
});
|
||||
},
|
||||
onClose: () => {},
|
||||
shouldReconnect: () => true,
|
||||
retryOnError: true,
|
||||
});
|
||||
|
||||
const setState = useCallback(
|
||||
(message: Update) => {
|
||||
if (readyState === ReadyState.OPEN) {
|
||||
sendJsonMessage({
|
||||
topic: message.topic,
|
||||
payload: message.payload,
|
||||
retain: message.retain,
|
||||
});
|
||||
}
|
||||
},
|
||||
[readyState, sendJsonMessage],
|
||||
);
|
||||
|
||||
return [wsState, setState];
|
||||
applyTopicUpdate(`${name}/recordings/state`, record ? "ON" : "OFF");
|
||||
applyTopicUpdate(`${name}/enabled/state`, enabled ? "ON" : "OFF");
|
||||
applyTopicUpdate(`${name}/detect/state`, detect ? "ON" : "OFF");
|
||||
applyTopicUpdate(`${name}/snapshots/state`, snapshots ? "ON" : "OFF");
|
||||
applyTopicUpdate(`${name}/audio/state`, audio ? "ON" : "OFF");
|
||||
applyTopicUpdate(
|
||||
`${name}/audio_transcription/state`,
|
||||
audio_transcription ? "ON" : "OFF",
|
||||
);
|
||||
applyTopicUpdate(
|
||||
`${name}/notifications/state`,
|
||||
notifications ? "ON" : "OFF",
|
||||
);
|
||||
applyTopicUpdate(
|
||||
`${name}/notifications/suspended`,
|
||||
notifications_suspended || 0,
|
||||
);
|
||||
applyTopicUpdate(
|
||||
`${name}/ptz_autotracker/state`,
|
||||
autotracking ? "ON" : "OFF",
|
||||
);
|
||||
applyTopicUpdate(`${name}/review_alerts/state`, alerts ? "ON" : "OFF");
|
||||
applyTopicUpdate(
|
||||
`${name}/review_detections/state`,
|
||||
detections ? "ON" : "OFF",
|
||||
);
|
||||
applyTopicUpdate(
|
||||
`${name}/object_descriptions/state`,
|
||||
object_descriptions ? "ON" : "OFF",
|
||||
);
|
||||
applyTopicUpdate(
|
||||
`${name}/review_descriptions/state`,
|
||||
review_descriptions ? "ON" : "OFF",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export const {
|
||||
Provider: WsProvider,
|
||||
useTrackedState: useWsState,
|
||||
useUpdate: useWsUpdate,
|
||||
} = createContainer(useValue, { defaultState: {}, concurrentMode: true });
|
||||
// Hooks
|
||||
export function useWsUpdate(): WsSend {
|
||||
const send = useContext(WsSendContext);
|
||||
if (!send) {
|
||||
throw new Error("useWsUpdate must be used within WsProvider");
|
||||
}
|
||||
return send;
|
||||
}
|
||||
|
||||
// Subscribe to a single WS topic with proper bail-out.
|
||||
// Only re-renders when the topic's value changes (Object.is comparison).
|
||||
// Uses useSyncExternalStore — zero useEffect, so no PassiveMask flags
|
||||
// propagate through the fiber tree.
|
||||
export function useWs(watchTopic: string, publishTopic: string) {
|
||||
const state = useWsState();
|
||||
const payload = useSyncExternalStore(
|
||||
useCallback(
|
||||
(listener: Listener) => subscribeWsTopic(watchTopic, listener),
|
||||
[watchTopic],
|
||||
),
|
||||
useCallback(() => wsState[watchTopic], [watchTopic]),
|
||||
);
|
||||
|
||||
const sendJsonMessage = useWsUpdate();
|
||||
|
||||
const value = { payload: state[watchTopic] || null };
|
||||
const value = { payload: payload ?? null };
|
||||
|
||||
const send = useCallback(
|
||||
(payload: unknown, retain = false) => {
|
||||
@ -203,6 +245,8 @@ export function useWs(watchTopic: string, publishTopic: string) {
|
||||
return { value, send };
|
||||
}
|
||||
|
||||
// Convenience hooks
|
||||
|
||||
export function useEnabledState(camera: string): {
|
||||
payload: ToggleableSetting;
|
||||
send: (payload: ToggleableSetting, retain?: boolean) => void;
|
||||
@ -413,28 +457,42 @@ export function useFrigateEvents(): { payload: FrigateEvent } {
|
||||
const {
|
||||
value: { payload },
|
||||
} = useWs("events", "");
|
||||
return { payload: JSON.parse(payload as string) };
|
||||
const parsed = useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
return { payload: parsed };
|
||||
}
|
||||
|
||||
export function useAudioDetections(): { payload: FrigateAudioDetections } {
|
||||
const {
|
||||
value: { payload },
|
||||
} = useWs("audio_detections", "");
|
||||
return { payload: JSON.parse(payload as string) };
|
||||
const parsed = useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
return { payload: parsed };
|
||||
}
|
||||
|
||||
export function useFrigateReviews(): FrigateReview {
|
||||
const {
|
||||
value: { payload },
|
||||
} = useWs("reviews", "");
|
||||
return useDeepMemo(JSON.parse(payload as string));
|
||||
return useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
}
|
||||
|
||||
export function useFrigateStats(): FrigateStats {
|
||||
const {
|
||||
value: { payload },
|
||||
} = useWs("stats", "");
|
||||
return useDeepMemo(JSON.parse(payload as string));
|
||||
return useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
}
|
||||
|
||||
export function useInitialCameraState(
|
||||
@ -446,32 +504,31 @@ export function useInitialCameraState(
|
||||
const {
|
||||
value: { payload },
|
||||
send: sendCommand,
|
||||
} = useWs("camera_activity", "onConnect");
|
||||
} = useWs(`camera_activity/${camera}`, "onConnect");
|
||||
|
||||
const data = useDeepMemo(JSON.parse(payload as string));
|
||||
// camera_activity sub-topic payload is already parsed by expandCameraActivity
|
||||
const data = payload as FrigateCameraState | undefined;
|
||||
|
||||
// onConnect is sent once in WsProvider.onopen — no need to re-request on
|
||||
// every component mount. Components read cached wsState immediately via
|
||||
// useSyncExternalStore. Only re-request when the user tabs back in.
|
||||
useEffect(() => {
|
||||
let listener = undefined;
|
||||
if (revalidateOnFocus) {
|
||||
sendCommand("onConnect");
|
||||
listener = () => {
|
||||
if (document.visibilityState == "visible") {
|
||||
sendCommand("onConnect");
|
||||
}
|
||||
};
|
||||
addEventListener("visibilitychange", listener);
|
||||
}
|
||||
if (!revalidateOnFocus) return;
|
||||
|
||||
return () => {
|
||||
if (listener) {
|
||||
removeEventListener("visibilitychange", listener);
|
||||
const listener = () => {
|
||||
if (document.visibilityState === "visible") {
|
||||
sendCommand("onConnect");
|
||||
}
|
||||
};
|
||||
// only refresh when onRefresh value changes
|
||||
addEventListener("visibilitychange", listener);
|
||||
|
||||
return () => {
|
||||
removeEventListener("visibilitychange", listener);
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [revalidateOnFocus]);
|
||||
|
||||
return { payload: data ? data[camera] : undefined };
|
||||
return { payload: data as FrigateCameraState };
|
||||
}
|
||||
|
||||
export function useModelState(
|
||||
@ -483,7 +540,10 @@ export function useModelState(
|
||||
send: sendCommand,
|
||||
} = useWs("model_state", "modelState");
|
||||
|
||||
const data = useDeepMemo(JSON.parse(payload as string));
|
||||
const data = useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
let listener = undefined;
|
||||
@ -519,7 +579,10 @@ export function useEmbeddingsReindexProgress(
|
||||
send: sendCommand,
|
||||
} = useWs("embeddings_reindex_progress", "embeddingsReindexProgress");
|
||||
|
||||
const data = useDeepMemo(JSON.parse(payload as string));
|
||||
const data = useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
let listener = undefined;
|
||||
@ -553,8 +616,9 @@ export function useAudioTranscriptionProcessState(
|
||||
send: sendCommand,
|
||||
} = useWs("audio_transcription_state", "audioTranscriptionState");
|
||||
|
||||
const data = useDeepMemo(
|
||||
payload ? (JSON.parse(payload as string) as string) : "idle",
|
||||
const data = useMemo(
|
||||
() => (payload ? (JSON.parse(payload as string) as string) : "idle"),
|
||||
[payload],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
@ -587,7 +651,10 @@ export function useBirdseyeLayout(revalidateOnFocus: boolean = true): {
|
||||
send: sendCommand,
|
||||
} = useWs("birdseye_layout", "birdseyeLayout");
|
||||
|
||||
const data = useDeepMemo(JSON.parse(payload as string));
|
||||
const data = useMemo(
|
||||
() => (payload ? JSON.parse(payload as string) : undefined),
|
||||
[payload],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
let listener = undefined;
|
||||
@ -684,10 +751,14 @@ export function useTrackedObjectUpdate(): {
|
||||
const {
|
||||
value: { payload },
|
||||
} = useWs("tracked_object_update", "");
|
||||
const parsed = payload
|
||||
? JSON.parse(payload as string)
|
||||
: { type: "", id: "", camera: "" };
|
||||
return { payload: useDeepMemo(parsed) };
|
||||
const parsed = useMemo(
|
||||
() =>
|
||||
payload
|
||||
? JSON.parse(payload as string)
|
||||
: { type: "", id: "", camera: "" },
|
||||
[payload],
|
||||
);
|
||||
return { payload: parsed };
|
||||
}
|
||||
|
||||
export function useNotifications(camera: string): {
|
||||
@ -730,10 +801,14 @@ export function useTriggers(): { payload: TriggerStatus } {
|
||||
const {
|
||||
value: { payload },
|
||||
} = useWs("triggers", "");
|
||||
const parsed = payload
|
||||
? JSON.parse(payload as string)
|
||||
: { name: "", camera: "", event_id: "", type: "", score: 0 };
|
||||
return { payload: useDeepMemo(parsed) };
|
||||
const parsed = useMemo(
|
||||
() =>
|
||||
payload
|
||||
? JSON.parse(payload as string)
|
||||
: { name: "", camera: "", event_id: "", type: "", score: 0 },
|
||||
[payload],
|
||||
);
|
||||
return { payload: parsed };
|
||||
}
|
||||
|
||||
export function useJobStatus(
|
||||
@ -745,8 +820,9 @@ export function useJobStatus(
|
||||
send: sendCommand,
|
||||
} = useWs("job_state", "jobState");
|
||||
|
||||
const jobData = useDeepMemo(
|
||||
payload && typeof payload === "string" ? JSON.parse(payload) : {},
|
||||
const jobData = useMemo(
|
||||
() => (payload && typeof payload === "string" ? JSON.parse(payload) : {}),
|
||||
[payload],
|
||||
);
|
||||
const currentJob = jobData[jobType] || null;
|
||||
|
||||
11
web/src/api/wsContext.ts
Normal file
11
web/src/api/wsContext.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { createContext } from "react";
|
||||
|
||||
export type Update = {
|
||||
topic: string;
|
||||
payload: unknown;
|
||||
retain: boolean;
|
||||
};
|
||||
|
||||
export type WsSend = (update: Update) => void;
|
||||
|
||||
export const WsSendContext = createContext<WsSend | null>(null);
|
||||
@ -98,10 +98,10 @@ const TimeAgo: FunctionComponent<IProp> = ({
|
||||
return manualRefreshInterval;
|
||||
}
|
||||
|
||||
const currentTs = currentTime.getTime() / 1000;
|
||||
if (currentTs - time < 60) {
|
||||
const elapsedMs = currentTime.getTime() - time;
|
||||
if (elapsedMs < 60000) {
|
||||
return 1000; // refresh every second
|
||||
} else if (currentTs - time < 3600) {
|
||||
} else if (elapsedMs < 3600000) {
|
||||
return 60000; // refresh every minute
|
||||
} else {
|
||||
return 3600000; // refresh every hour
|
||||
|
||||
@ -1,18 +1,70 @@
|
||||
import { useMemo } from "react";
|
||||
import { useCallback, useMemo, useSyncExternalStore } from "react";
|
||||
import { Polygon } from "@/types/canvas";
|
||||
import { useWsState } from "@/api/ws";
|
||||
import { subscribeWsTopic, getWsTopicValue } from "@/api/ws";
|
||||
|
||||
/**
|
||||
* Hook to get enabled state for a polygon from websocket state.
|
||||
* Memoizes the lookup function to avoid unnecessary re-renders.
|
||||
* Subscribes to all relevant per-polygon topics so it only re-renders
|
||||
* when one of those specific topics changes — not on every WS update.
|
||||
*/
|
||||
export function usePolygonStates(polygons: Polygon[]) {
|
||||
const wsState = useWsState();
|
||||
// Build a stable sorted list of topics we need to watch
|
||||
const topics = useMemo(() => {
|
||||
const set = new Set<string>();
|
||||
polygons.forEach((polygon) => {
|
||||
const topic =
|
||||
polygon.type === "zone"
|
||||
? `${polygon.camera}/zone/${polygon.name}/state`
|
||||
: polygon.type === "motion_mask"
|
||||
? `${polygon.camera}/motion_mask/${polygon.name}/state`
|
||||
: `${polygon.camera}/object_mask/${polygon.name}/state`;
|
||||
set.add(topic);
|
||||
});
|
||||
return Array.from(set).sort();
|
||||
}, [polygons]);
|
||||
|
||||
// Create a memoized lookup map that only updates when relevant ws values change
|
||||
// Stable key for the topic list so subscribe/getSnapshot stay in sync
|
||||
const topicsKey = topics.join("\0");
|
||||
|
||||
// Subscribe to all topics at once — re-subscribe only when the set changes
|
||||
const subscribe = useCallback(
|
||||
(listener: () => void) => {
|
||||
const unsubscribes = topicsKey
|
||||
.split("\0")
|
||||
.filter(Boolean)
|
||||
.map((topic) => subscribeWsTopic(topic, listener));
|
||||
return () => unsubscribes.forEach((unsub) => unsub());
|
||||
},
|
||||
[topicsKey],
|
||||
);
|
||||
|
||||
// Build a snapshot string from the current values of all topics.
|
||||
// useSyncExternalStore uses Object.is, so we return a primitive that
|
||||
// changes only when an observed topic's value changes.
|
||||
const getSnapshot = useCallback(() => {
|
||||
return topicsKey
|
||||
.split("\0")
|
||||
.filter(Boolean)
|
||||
.map((topic) => `${topic}=${getWsTopicValue(topic) ?? ""}`)
|
||||
.join("\0");
|
||||
}, [topicsKey]);
|
||||
|
||||
const snapshot = useSyncExternalStore(subscribe, getSnapshot);
|
||||
|
||||
// Parse the snapshot into a lookup map
|
||||
return useMemo(() => {
|
||||
const stateMap = new Map<string, boolean>();
|
||||
// Build value map from snapshot
|
||||
const valueMap = new Map<string, unknown>();
|
||||
snapshot.split("\0").forEach((entry) => {
|
||||
const eqIdx = entry.indexOf("=");
|
||||
if (eqIdx > 0) {
|
||||
const topic = entry.slice(0, eqIdx);
|
||||
const val = entry.slice(eqIdx + 1) || undefined;
|
||||
valueMap.set(topic, val);
|
||||
}
|
||||
});
|
||||
|
||||
const stateMap = new Map<string, boolean>();
|
||||
polygons.forEach((polygon) => {
|
||||
const topic =
|
||||
polygon.type === "zone"
|
||||
@ -21,7 +73,7 @@ export function usePolygonStates(polygons: Polygon[]) {
|
||||
? `${polygon.camera}/motion_mask/${polygon.name}/state`
|
||||
: `${polygon.camera}/object_mask/${polygon.name}/state`;
|
||||
|
||||
const wsValue = wsState[topic];
|
||||
const wsValue = valueMap.get(topic);
|
||||
const enabled =
|
||||
wsValue === "ON"
|
||||
? true
|
||||
@ -40,5 +92,5 @@ export function usePolygonStates(polygons: Polygon[]) {
|
||||
true
|
||||
);
|
||||
};
|
||||
}, [polygons, wsState]);
|
||||
}, [polygons, snapshot]);
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user