mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-09 15:05:26 +03:00
Compare commits
9 Commits
89a7087e83
...
7d7df9eb57
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d7df9eb57 | ||
|
|
77831304a7 | ||
|
|
1a6d04fde7 | ||
|
|
4a1b7a1629 | ||
|
|
0f3dd097ec | ||
|
|
2a4d7e4766 | ||
|
|
46415ffeb5 | ||
|
|
e35ab0b8a1 | ||
|
|
837373547d |
7
Makefile
7
Makefile
@ -21,6 +21,13 @@ local: version
|
||||
--tag frigate:latest \
|
||||
--load
|
||||
|
||||
localh10: version
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--build-arg HAILORT_VERSION=5.1.1 \
|
||||
--build-arg HAILORT_GIT_REPO=mathieu-d/hailort \
|
||||
--tag frigate:latest \
|
||||
--load
|
||||
|
||||
debug: version
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--build-arg DEBUG=true \
|
||||
|
||||
@ -12,6 +12,11 @@ services:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/main/Dockerfile
|
||||
# Use args to specify hailort version and location
|
||||
# args:
|
||||
# HAILORT_VERSION: "5.1.1"
|
||||
# HAILORT_GIT_REPO: "mathieu-d/hailort"
|
||||
|
||||
# Use target devcontainer-trt for TensorRT dev
|
||||
target: devcontainer
|
||||
cache_from:
|
||||
@ -29,6 +34,7 @@ services:
|
||||
# devices:
|
||||
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
|
||||
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
||||
|
||||
volumes:
|
||||
- .:/workspace/frigate:cached
|
||||
- ./web/dist:/opt/frigate/web:cached
|
||||
|
||||
7
docker/hailo10h/user_installation.sh
Normal file
7
docker/hailo10h/user_installation.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Update package list and install hailo driver version 5.1.1 for Hailo-10H
|
||||
sudo apt update
|
||||
sudo apt install -y hailo-h10-all=5.1.1
|
||||
|
||||
|
||||
@ -157,6 +157,8 @@ FROM base AS wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETARCH
|
||||
ARG DEBUG=false
|
||||
ARG HAILORT_VERSION=4.21.0
|
||||
ARG HAILORT_GIT_REPO=frigate-nvr/hailort
|
||||
|
||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||
RUN apt-get -qq update \
|
||||
|
||||
@ -2,13 +2,11 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.21.0"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
arch="aarch64"
|
||||
fi
|
||||
|
||||
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
|
||||
wget -qO- "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -P /wheels/ "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-${HAILORT_VERSION}-cp311-cp311-linux_${arch}.whl"
|
||||
|
||||
@ -754,6 +754,15 @@ def events_search(
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if search_event.camera not in allowed_cameras:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Event not found",
|
||||
},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
thumb_result = context.search_thumbnail(search_event)
|
||||
thumb_ids = {result[0]: result[1] for result in thumb_result}
|
||||
search_results = {
|
||||
|
||||
@ -35,7 +35,7 @@ logger = logging.getLogger(__name__)
|
||||
router = APIRouter(tags=[Tags.recordings])
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
@router.get("/recordings/storage", dependencies=[Depends(require_role(["admin"]))])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
|
||||
@ -549,6 +549,14 @@ class WebPushClient(Communicator):
|
||||
logger.debug(f"Sending camera monitoring push notification for {camera_name}")
|
||||
|
||||
for user in self.web_pushers:
|
||||
if not self._user_has_camera_access(user, camera):
|
||||
logger.debug(
|
||||
"Skipping notification for user %s - no access to camera %s",
|
||||
user,
|
||||
camera,
|
||||
)
|
||||
continue
|
||||
|
||||
self.send_push_notification(
|
||||
user=user,
|
||||
payload=payload,
|
||||
|
||||
415
frigate/detectors/plugins/hailo10h.py
Executable file
415
frigate/detectors/plugins/hailo10h.py
Executable file
@ -0,0 +1,415 @@
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import threading
|
||||
import urllib.request
|
||||
from functools import partial
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pydantic import ConfigDict, Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
)
|
||||
from frigate.object_detection.util import RequestStore, ResponseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ----------------- Utility Functions ----------------- #
|
||||
|
||||
|
||||
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
|
||||
"""
|
||||
Resize an image with unchanged aspect ratio using padding.
|
||||
Assumes input image shape is (H, W, 3).
|
||||
"""
|
||||
if image.ndim == 4 and image.shape[0] == 1:
|
||||
image = image[0]
|
||||
|
||||
h, w = image.shape[:2]
|
||||
scale = min(model_w / w, model_h / h)
|
||||
new_w, new_h = int(w * scale), int(h * scale)
|
||||
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
|
||||
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
|
||||
x_offset = (model_w - new_w) // 2
|
||||
y_offset = (model_h - new_h) // 2
|
||||
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
|
||||
resized_image
|
||||
)
|
||||
return padded_image
|
||||
|
||||
|
||||
# ----------------- Global Constants ----------------- #
|
||||
DETECTOR_KEY = "hailo10h"
|
||||
ARCH = None
|
||||
H10H_DEFAULT_MODEL = "yolov6n.hef"
|
||||
H10H_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v5.2.0/hailo10h/yolov6n.hef"
|
||||
|
||||
|
||||
def detect_hailo_arch():
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(f"Inference error: {result.stderr}")
|
||||
return None
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Device Architecture" in line:
|
||||
if "HAILO10H" in line:
|
||||
return "hailo10h"
|
||||
logger.error("Inference error: Could not determine Hailo architecture.")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Inference error: {e}")
|
||||
return None
|
||||
|
||||
|
||||
# ----------------- HailoAsyncInference Class ----------------- #
|
||||
class HailoAsyncInference:
|
||||
def __init__(
|
||||
self,
|
||||
hef_path: str,
|
||||
input_store: RequestStore,
|
||||
output_store: ResponseStore,
|
||||
batch_size: int = 1,
|
||||
input_type: Optional[str] = None,
|
||||
output_type: Optional[Dict[str, str]] = None,
|
||||
send_original_frame: bool = False,
|
||||
) -> None:
|
||||
# when importing hailo it activates the driver
|
||||
# which leaves processes running even though it may not be used.
|
||||
try:
|
||||
from hailo_platform import (
|
||||
HEF,
|
||||
FormatType,
|
||||
HailoSchedulingAlgorithm,
|
||||
VDevice,
|
||||
)
|
||||
except ModuleNotFoundError:
|
||||
pass
|
||||
|
||||
self.input_store = input_store
|
||||
self.output_store = output_store
|
||||
|
||||
params = VDevice.create_params()
|
||||
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
|
||||
|
||||
self.hef = HEF(hef_path)
|
||||
self.target = VDevice(params)
|
||||
self.infer_model = self.target.create_infer_model(hef_path)
|
||||
self.infer_model.set_batch_size(batch_size)
|
||||
|
||||
if input_type is not None:
|
||||
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
|
||||
|
||||
if output_type is not None:
|
||||
for output_name, output_type in output_type.items():
|
||||
self.infer_model.output(output_name).set_format_type(
|
||||
getattr(FormatType, output_type)
|
||||
)
|
||||
|
||||
self.output_type = output_type
|
||||
self.send_original_frame = send_original_frame
|
||||
|
||||
def callback(
|
||||
self,
|
||||
completion_info,
|
||||
bindings_list: List,
|
||||
input_batch: List,
|
||||
request_ids: List[int],
|
||||
):
|
||||
if completion_info.exception:
|
||||
logger.error(f"Inference error: {completion_info.exception}")
|
||||
else:
|
||||
for i, bindings in enumerate(bindings_list):
|
||||
if len(bindings._output_names) == 1:
|
||||
result = bindings.output().get_buffer()
|
||||
else:
|
||||
result = {
|
||||
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
|
||||
for name in bindings._output_names
|
||||
}
|
||||
self.output_store.put(request_ids[i], (input_batch[i], result))
|
||||
|
||||
def _create_bindings(self, configured_infer_model) -> object:
|
||||
if self.output_type is None:
|
||||
output_buffers = {
|
||||
output_info.name: np.empty(
|
||||
self.infer_model.output(output_info.name).shape,
|
||||
dtype=getattr(
|
||||
np, str(output_info.format.type).split(".")[1].lower()
|
||||
),
|
||||
)
|
||||
for output_info in self.hef.get_output_vstream_infos()
|
||||
}
|
||||
else:
|
||||
output_buffers = {
|
||||
name: np.empty(
|
||||
self.infer_model.output(name).shape,
|
||||
dtype=getattr(np, self.output_type[name].lower()),
|
||||
)
|
||||
for name in self.output_type
|
||||
}
|
||||
return configured_infer_model.create_bindings(output_buffers=output_buffers)
|
||||
|
||||
def get_input_shape(self) -> Tuple[int, ...]:
|
||||
return self.hef.get_input_vstream_infos()[0].shape
|
||||
|
||||
def run(self) -> None:
|
||||
job = None
|
||||
with self.infer_model.configure() as configured_infer_model:
|
||||
while True:
|
||||
batch_data = self.input_store.get()
|
||||
|
||||
if batch_data is None:
|
||||
break
|
||||
|
||||
request_id, frame_data = batch_data
|
||||
preprocessed_batch = [frame_data]
|
||||
request_ids = [request_id]
|
||||
input_batch = preprocessed_batch # non-send_original_frame mode
|
||||
|
||||
bindings_list = []
|
||||
for frame in preprocessed_batch:
|
||||
bindings = self._create_bindings(configured_infer_model)
|
||||
bindings.input().set_buffer(np.array(frame))
|
||||
bindings_list.append(bindings)
|
||||
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
|
||||
job = configured_infer_model.run_async(
|
||||
bindings_list,
|
||||
partial(
|
||||
self.callback,
|
||||
input_batch=input_batch,
|
||||
request_ids=request_ids,
|
||||
bindings_list=bindings_list,
|
||||
),
|
||||
)
|
||||
|
||||
if job is not None:
|
||||
job.wait(100)
|
||||
|
||||
|
||||
# ----------------- HailoDetector Class ----------------- #
|
||||
class HailoDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: "HailoDetectorConfig"):
|
||||
global ARCH
|
||||
ARCH = detect_hailo_arch()
|
||||
self.cache_dir = MODEL_CACHE_DIR
|
||||
self.device_type = detector_config.device
|
||||
self.model_height = (
|
||||
detector_config.model.height
|
||||
if hasattr(detector_config.model, "height")
|
||||
else None
|
||||
)
|
||||
self.model_width = (
|
||||
detector_config.model.width
|
||||
if hasattr(detector_config.model, "width")
|
||||
else None
|
||||
)
|
||||
self.model_type = (
|
||||
detector_config.model.model_type
|
||||
if hasattr(detector_config.model, "model_type")
|
||||
else None
|
||||
)
|
||||
self.tensor_format = (
|
||||
detector_config.model.input_tensor
|
||||
if hasattr(detector_config.model, "input_tensor")
|
||||
else None
|
||||
)
|
||||
self.pixel_format = (
|
||||
detector_config.model.input_pixel_format
|
||||
if hasattr(detector_config.model, "input_pixel_format")
|
||||
else None
|
||||
)
|
||||
self.input_dtype = (
|
||||
detector_config.model.input_dtype
|
||||
if hasattr(detector_config.model, "input_dtype")
|
||||
else None
|
||||
)
|
||||
self.output_type = "FLOAT32"
|
||||
self.set_path_and_url(detector_config.model.path)
|
||||
self.working_model_path = self.check_and_prepare()
|
||||
|
||||
self.batch_size = 1
|
||||
self.input_store = RequestStore()
|
||||
self.response_store = ResponseStore()
|
||||
|
||||
try:
|
||||
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
|
||||
self.inference_engine = HailoAsyncInference(
|
||||
self.working_model_path,
|
||||
self.input_store,
|
||||
self.response_store,
|
||||
self.batch_size,
|
||||
)
|
||||
self.input_shape = self.inference_engine.get_input_shape()
|
||||
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
|
||||
self.inference_thread = threading.Thread(
|
||||
target=self.inference_engine.run, daemon=True
|
||||
)
|
||||
self.inference_thread.start()
|
||||
except Exception as e:
|
||||
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
|
||||
raise
|
||||
|
||||
def set_path_and_url(self, path: str = None):
|
||||
if not path:
|
||||
self.model_path = None
|
||||
self.url = None
|
||||
return
|
||||
if self.is_url(path):
|
||||
self.url = path
|
||||
self.model_path = None
|
||||
else:
|
||||
self.model_path = path
|
||||
self.url = None
|
||||
|
||||
def is_url(self, url: str) -> bool:
|
||||
return (
|
||||
url.startswith("http://")
|
||||
or url.startswith("https://")
|
||||
or url.startswith("www.")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def extract_model_name(path: str = None, url: str = None) -> str:
|
||||
if path and path.endswith(".hef"):
|
||||
return os.path.basename(path)
|
||||
elif url and url.endswith(".hef"):
|
||||
return os.path.basename(url)
|
||||
else:
|
||||
return H10H_DEFAULT_MODEL
|
||||
|
||||
@staticmethod
|
||||
def download_model(url: str, destination: str):
|
||||
if not url.endswith(".hef"):
|
||||
raise ValueError("Invalid model URL. Only .hef files are supported.")
|
||||
try:
|
||||
urllib.request.urlretrieve(url, destination)
|
||||
logger.debug(f"Downloaded model to {destination}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
|
||||
|
||||
def check_and_prepare(self) -> str:
|
||||
if not os.path.exists(self.cache_dir):
|
||||
os.makedirs(self.cache_dir)
|
||||
model_name = self.extract_model_name(self.model_path, self.url)
|
||||
cached_model_path = os.path.join(self.cache_dir, model_name)
|
||||
if not self.model_path and not self.url:
|
||||
if os.path.exists(cached_model_path):
|
||||
logger.debug(f"Model found in cache: {cached_model_path}")
|
||||
return cached_model_path
|
||||
else:
|
||||
logger.debug(f"Downloading default model: {model_name}")
|
||||
self.download_model(H10H_DEFAULT_URL, cached_model_path)
|
||||
|
||||
elif self.url:
|
||||
logger.debug(f"Downloading model from URL: {self.url}")
|
||||
self.download_model(self.url, cached_model_path)
|
||||
elif self.model_path:
|
||||
if os.path.exists(self.model_path):
|
||||
logger.debug(f"Using existing model at: {self.model_path}")
|
||||
return self.model_path
|
||||
else:
|
||||
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
|
||||
return cached_model_path
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
tensor_input = self.preprocess(tensor_input)
|
||||
|
||||
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
|
||||
tensor_input = np.expand_dims(tensor_input, axis=0)
|
||||
|
||||
request_id = self.input_store.put(tensor_input)
|
||||
|
||||
try:
|
||||
_, infer_results = self.response_store.get(request_id, timeout=1.0)
|
||||
except TimeoutError:
|
||||
logger.error(
|
||||
f"Timeout waiting for inference results for request {request_id}"
|
||||
)
|
||||
|
||||
if not self.inference_thread.is_alive():
|
||||
raise RuntimeError(
|
||||
"HailoRT inference thread has stopped, restart required."
|
||||
)
|
||||
|
||||
return np.zeros((20, 6), dtype=np.float32)
|
||||
|
||||
if isinstance(infer_results, list) and len(infer_results) == 1:
|
||||
infer_results = infer_results[0]
|
||||
|
||||
threshold = 0.4
|
||||
all_detections = []
|
||||
for class_id, detection_set in enumerate(infer_results):
|
||||
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
|
||||
continue
|
||||
for det in detection_set:
|
||||
if det.shape[0] < 5:
|
||||
continue
|
||||
score = float(det[4])
|
||||
if score < threshold:
|
||||
continue
|
||||
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
|
||||
|
||||
if len(all_detections) == 0:
|
||||
detections_array = np.zeros((20, 6), dtype=np.float32)
|
||||
else:
|
||||
detections_array = np.array(all_detections, dtype=np.float32)
|
||||
if detections_array.shape[0] > 20:
|
||||
detections_array = detections_array[:20, :]
|
||||
elif detections_array.shape[0] < 20:
|
||||
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
|
||||
detections_array = np.vstack((detections_array, pad))
|
||||
|
||||
return detections_array
|
||||
|
||||
def preprocess(self, image):
|
||||
if isinstance(image, np.ndarray):
|
||||
processed = preprocess_tensor(
|
||||
image, self.input_shape[1], self.input_shape[0]
|
||||
)
|
||||
return np.expand_dims(processed, axis=0)
|
||||
else:
|
||||
raise ValueError("Unsupported image format for preprocessing")
|
||||
|
||||
def close(self):
|
||||
"""Properly shuts down the inference engine and releases the VDevice."""
|
||||
logger.debug("[CLOSE] Closing HailoDetector")
|
||||
try:
|
||||
if hasattr(self, "inference_engine"):
|
||||
if hasattr(self.inference_engine, "target"):
|
||||
self.inference_engine.target.release()
|
||||
logger.debug("Hailo VDevice released successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to close Hailo device: {e}")
|
||||
raise
|
||||
|
||||
def __del__(self):
|
||||
"""Destructor to ensure cleanup when the object is deleted."""
|
||||
self.close()
|
||||
|
||||
|
||||
# ----------------- HailoDetectorConfig Class ----------------- #
|
||||
class HailoDetectorConfig(BaseDetectorConfig):
|
||||
"""Hailo10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
title="Hailo-10H",
|
||||
)
|
||||
|
||||
type: Literal[DETECTOR_KEY]
|
||||
device: str = Field(
|
||||
default="PCIe",
|
||||
title="Device Type",
|
||||
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
|
||||
)
|
||||
@ -19,6 +19,7 @@ import numpy as np
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
|
||||
from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT
|
||||
from frigate.output.ws_auth import ws_has_camera_access
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
copy_yuv_to_position,
|
||||
@ -236,12 +237,14 @@ class BroadcastThread(threading.Thread):
|
||||
converter: FFMpegConverter,
|
||||
websocket_server: Any,
|
||||
stop_event: MpEvent,
|
||||
config: FrigateConfig,
|
||||
):
|
||||
super().__init__()
|
||||
self.camera = camera
|
||||
self.converter = converter
|
||||
self.websocket_server = websocket_server
|
||||
self.stop_event = stop_event
|
||||
self.config = config
|
||||
|
||||
def run(self) -> None:
|
||||
while not self.stop_event.is_set():
|
||||
@ -256,6 +259,7 @@ class BroadcastThread(threading.Thread):
|
||||
if (
|
||||
not ws.terminated
|
||||
and ws.environ["PATH_INFO"] == f"/{self.camera}"
|
||||
and ws_has_camera_access(ws, self.camera, self.config)
|
||||
):
|
||||
try:
|
||||
ws.send(buf, binary=True)
|
||||
@ -806,7 +810,11 @@ class Birdseye:
|
||||
config.birdseye.restream,
|
||||
)
|
||||
self.broadcaster = BroadcastThread(
|
||||
"birdseye", self.converter, websocket_server, stop_event
|
||||
"birdseye",
|
||||
self.converter,
|
||||
websocket_server,
|
||||
stop_event,
|
||||
config,
|
||||
)
|
||||
self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event)
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
|
||||
@ -7,7 +7,8 @@ import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Any
|
||||
|
||||
from frigate.config import CameraConfig, FfmpegConfig
|
||||
from frigate.config import CameraConfig, FfmpegConfig, FrigateConfig
|
||||
from frigate.output.ws_auth import ws_has_camera_access
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -102,12 +103,14 @@ class BroadcastThread(threading.Thread):
|
||||
converter: FFMpegConverter,
|
||||
websocket_server: Any,
|
||||
stop_event: MpEvent,
|
||||
config: FrigateConfig,
|
||||
):
|
||||
super().__init__()
|
||||
self.camera = camera
|
||||
self.converter = converter
|
||||
self.websocket_server = websocket_server
|
||||
self.stop_event = stop_event
|
||||
self.config = config
|
||||
|
||||
def run(self) -> None:
|
||||
while not self.stop_event.is_set():
|
||||
@ -122,6 +125,7 @@ class BroadcastThread(threading.Thread):
|
||||
if (
|
||||
not ws.terminated
|
||||
and ws.environ["PATH_INFO"] == f"/{self.camera}"
|
||||
and ws_has_camera_access(ws, self.camera, self.config)
|
||||
):
|
||||
try:
|
||||
ws.send(buf, binary=True)
|
||||
@ -135,7 +139,11 @@ class BroadcastThread(threading.Thread):
|
||||
|
||||
class JsmpegCamera:
|
||||
def __init__(
|
||||
self, config: CameraConfig, stop_event: MpEvent, websocket_server: Any
|
||||
self,
|
||||
config: CameraConfig,
|
||||
frigate_config: FrigateConfig,
|
||||
stop_event: MpEvent,
|
||||
websocket_server: Any,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.input: queue.Queue[bytes] = queue.Queue(maxsize=config.detect.fps)
|
||||
@ -154,7 +162,11 @@ class JsmpegCamera:
|
||||
config.live.quality,
|
||||
)
|
||||
self.broadcaster = BroadcastThread(
|
||||
config.name or "", self.converter, websocket_server, stop_event
|
||||
config.name or "",
|
||||
self.converter,
|
||||
websocket_server,
|
||||
stop_event,
|
||||
frigate_config,
|
||||
)
|
||||
|
||||
self.converter.start()
|
||||
|
||||
@ -32,6 +32,7 @@ from frigate.const import (
|
||||
from frigate.output.birdseye import Birdseye
|
||||
from frigate.output.camera import JsmpegCamera
|
||||
from frigate.output.preview import PreviewRecorder
|
||||
from frigate.output.ws_auth import ws_has_camera_access
|
||||
from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
|
||||
from frigate.util.process import FrigateProcess
|
||||
|
||||
@ -102,7 +103,7 @@ class OutputProcess(FrigateProcess):
|
||||
) -> None:
|
||||
camera_config = self.config.cameras[camera]
|
||||
jsmpeg_cameras[camera] = JsmpegCamera(
|
||||
camera_config, self.stop_event, websocket_server
|
||||
camera_config, self.config, self.stop_event, websocket_server
|
||||
)
|
||||
preview_recorders[camera] = PreviewRecorder(camera_config)
|
||||
preview_write_times[camera] = 0
|
||||
@ -262,6 +263,7 @@ class OutputProcess(FrigateProcess):
|
||||
# send camera frame to ffmpeg process if websockets are connected
|
||||
if any(
|
||||
ws.environ["PATH_INFO"].endswith(camera)
|
||||
and ws_has_camera_access(ws, camera, self.config)
|
||||
for ws in websocket_server.manager
|
||||
):
|
||||
# write to the converter for the camera if clients are listening to the specific camera
|
||||
@ -275,6 +277,7 @@ class OutputProcess(FrigateProcess):
|
||||
self.config.birdseye.restream
|
||||
or any(
|
||||
ws.environ["PATH_INFO"].endswith("birdseye")
|
||||
and ws_has_camera_access(ws, "birdseye", self.config)
|
||||
for ws in websocket_server.manager
|
||||
)
|
||||
)
|
||||
|
||||
43
frigate/output/ws_auth.py
Normal file
43
frigate/output/ws_auth.py
Normal file
@ -0,0 +1,43 @@
|
||||
"""Authorization helpers for JSMPEG websocket clients."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.models import User
|
||||
|
||||
|
||||
def _get_valid_ws_roles(ws: Any, config: FrigateConfig) -> list[str]:
|
||||
role_header = ws.environ.get("HTTP_REMOTE_ROLE", "")
|
||||
roles = [
|
||||
role.strip()
|
||||
for role in role_header.split(config.proxy.separator)
|
||||
if role.strip()
|
||||
]
|
||||
return [role for role in roles if role in config.auth.roles]
|
||||
|
||||
|
||||
def ws_has_camera_access(ws: Any, camera_name: str, config: FrigateConfig) -> bool:
|
||||
"""Return True when a websocket client is authorized for the camera path."""
|
||||
roles = _get_valid_ws_roles(ws, config)
|
||||
|
||||
if not roles:
|
||||
return False
|
||||
|
||||
roles_dict = config.auth.roles
|
||||
|
||||
# Birdseye is a composite stream, so only users with unrestricted access
|
||||
# should receive it.
|
||||
if camera_name == "birdseye":
|
||||
return any(role == "admin" or not roles_dict.get(role) for role in roles)
|
||||
|
||||
all_camera_names = set(config.cameras.keys())
|
||||
|
||||
for role in roles:
|
||||
if role == "admin" or not roles_dict.get(role):
|
||||
return True
|
||||
|
||||
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
|
||||
if camera_name in allowed_cameras:
|
||||
return True
|
||||
|
||||
return False
|
||||
@ -123,6 +123,15 @@ def get_detector_temperature(
|
||||
if index < len(hailo_device_names):
|
||||
device_name = hailo_device_names[index]
|
||||
return hailo_temps[device_name]
|
||||
elif detector_type == "hailo10h":
|
||||
# Get temperatures for Hailo devices
|
||||
hailo_temps = get_hailo_temps()
|
||||
if hailo_temps:
|
||||
hailo_device_names = sorted(hailo_temps.keys())
|
||||
index = detector_index_by_type.get("hailo10h", 0)
|
||||
if index < len(hailo_device_names):
|
||||
device_name = hailo_device_names[index]
|
||||
return hailo_temps[device_name]
|
||||
elif detector_type == "rknn":
|
||||
# Rockchip temperatures are handled by the GPU / NPU stats
|
||||
# as there are not detector specific temperatures
|
||||
|
||||
@ -23,6 +23,26 @@ class TestHttpApp(BaseTestHttp):
|
||||
response_json = response.json()
|
||||
assert response_json == self.test_stats
|
||||
|
||||
def test_recordings_storage_requires_admin(self):
|
||||
stats = Mock(spec=StatsEmitter)
|
||||
stats.get_latest_stats.return_value = self.test_stats
|
||||
app = super().create_app(stats)
|
||||
app.storage_maintainer = Mock()
|
||||
app.storage_maintainer.calculate_camera_usages.return_value = {
|
||||
"front_door": {"usage": 2.0},
|
||||
}
|
||||
|
||||
with AuthTestClient(app) as client:
|
||||
response = client.get(
|
||||
"/recordings/storage",
|
||||
headers={"remote-user": "viewer", "remote-role": "viewer"},
|
||||
)
|
||||
assert response.status_code == 403
|
||||
|
||||
response = client.get("/recordings/storage")
|
||||
assert response.status_code == 200
|
||||
assert response.json()["front_door"]["usage_percent"] == 25.0
|
||||
|
||||
def test_config_set_in_memory_replaces_objects_track_list(self):
|
||||
self.minimal_config["cameras"]["front_door"]["objects"] = {
|
||||
"track": ["person", "car"],
|
||||
|
||||
@ -219,6 +219,25 @@ class TestHttpApp(BaseTestHttp):
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == event_id
|
||||
|
||||
def test_similarity_search_hides_unauthorized_anchor_event(self):
|
||||
mock_embeddings = Mock()
|
||||
self.app.frigate_config.semantic_search.enabled = True
|
||||
self.app.embeddings = mock_embeddings
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event("hidden.anchor", camera="back_door")
|
||||
response = client.get(
|
||||
"/events/search",
|
||||
params={
|
||||
"search_type": "similarity",
|
||||
"event_id": "hidden.anchor",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 404
|
||||
assert response.json()["message"] == "Event not found"
|
||||
mock_embeddings.search_thumbnail.assert_not_called()
|
||||
|
||||
def test_get_good_event(self):
|
||||
id = "123456.random"
|
||||
|
||||
|
||||
@ -145,9 +145,12 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
|
||||
embeddings=embeddings,
|
||||
frigate_config=SimpleNamespace(
|
||||
semantic_search=SimpleNamespace(enabled=semantic_enabled),
|
||||
cameras={"driveway": object()},
|
||||
auth=SimpleNamespace(roles={"admin": [], "viewer": ["driveway"]}),
|
||||
proxy=SimpleNamespace(separator=","),
|
||||
),
|
||||
)
|
||||
return SimpleNamespace(app=app)
|
||||
return SimpleNamespace(app=app, headers={})
|
||||
|
||||
def test_semantic_search_disabled_returns_error(self):
|
||||
req = self._make_request(semantic_enabled=False)
|
||||
@ -180,7 +183,7 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
|
||||
_execute_find_similar_objects(
|
||||
req,
|
||||
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
|
||||
allowed_cameras=["nonexistent_cam"],
|
||||
allowed_cameras=["driveway"],
|
||||
)
|
||||
)
|
||||
self.assertEqual(result["results"], [])
|
||||
|
||||
57
frigate/test/test_output_ws_auth.py
Normal file
57
frigate/test/test_output_ws_auth.py
Normal file
@ -0,0 +1,57 @@
|
||||
"""Tests for JSMPEG websocket authorization."""
|
||||
|
||||
import unittest
|
||||
from types import SimpleNamespace
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.output.ws_auth import ws_has_camera_access
|
||||
|
||||
|
||||
class TestWsHasCameraAccess(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.config = FrigateConfig(
|
||||
mqtt={"host": "mqtt"},
|
||||
auth={"roles": {"limited_user": ["front_door"]}},
|
||||
cameras={
|
||||
"front_door": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {"height": 1080, "width": 1920, "fps": 5},
|
||||
},
|
||||
"back_door": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {"height": 1080, "width": 1920, "fps": 5},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
def _make_ws(self, role: str):
|
||||
return SimpleNamespace(environ={"HTTP_REMOTE_ROLE": role})
|
||||
|
||||
def test_restricted_role_only_gets_allowed_camera(self):
|
||||
ws = self._make_ws("limited_user")
|
||||
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
|
||||
self.assertFalse(ws_has_camera_access(ws, "back_door", self.config))
|
||||
|
||||
def test_unrestricted_role_can_access_any_camera(self):
|
||||
ws = self._make_ws("viewer")
|
||||
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
|
||||
self.assertTrue(ws_has_camera_access(ws, "back_door", self.config))
|
||||
|
||||
def test_birdseye_requires_unrestricted_access(self):
|
||||
self.assertTrue(
|
||||
ws_has_camera_access(self._make_ws("admin"), "birdseye", self.config)
|
||||
)
|
||||
self.assertTrue(
|
||||
ws_has_camera_access(self._make_ws("viewer"), "birdseye", self.config)
|
||||
)
|
||||
self.assertFalse(
|
||||
ws_has_camera_access(self._make_ws("limited_user"), "birdseye", self.config)
|
||||
)
|
||||
29
frigate/test/test_webpush_camera_monitoring.py
Normal file
29
frigate/test/test_webpush_camera_monitoring.py
Normal file
@ -0,0 +1,29 @@
|
||||
"""Tests for camera monitoring notification authorization."""
|
||||
|
||||
import unittest
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from frigate.comms.webpush import WebPushClient
|
||||
|
||||
|
||||
class TestCameraMonitoringNotifications(unittest.TestCase):
|
||||
def test_send_camera_monitoring_filters_by_camera_access(self):
|
||||
client = WebPushClient.__new__(WebPushClient)
|
||||
client.config = SimpleNamespace(
|
||||
cameras={"front_door": SimpleNamespace(friendly_name=None)}
|
||||
)
|
||||
client.web_pushers = {"allowed": [], "denied": []}
|
||||
client.user_cameras = {"allowed": {"front_door"}, "denied": set()}
|
||||
client.check_registrations = MagicMock()
|
||||
client.cleanup_registrations = MagicMock()
|
||||
client.send_push_notification = MagicMock()
|
||||
|
||||
client.send_camera_monitoring(
|
||||
{"camera": "front_door", "message": "Monitoring condition met"}
|
||||
)
|
||||
|
||||
self.assertEqual(client.send_push_notification.call_count, 1)
|
||||
self.assertEqual(
|
||||
client.send_push_notification.call_args.kwargs["user"], "allowed"
|
||||
)
|
||||
@ -24,8 +24,12 @@ from frigate.log import redirect_output_to_logger, suppress_stderr_during
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.file import get_event_thumbnail_bytes, load_event_snapshot_image
|
||||
from frigate.util.image import (
|
||||
calculate_region,
|
||||
get_image_from_recording,
|
||||
relative_box_to_absolute,
|
||||
)
|
||||
from frigate.util.process import FrigateProcess
|
||||
|
||||
BATCH_SIZE = 16
|
||||
@ -713,7 +717,7 @@ def collect_object_classification_examples(
|
||||
This function:
|
||||
1. Queries events for the specified label
|
||||
2. Selects 100 balanced events across different cameras and times
|
||||
3. Retrieves thumbnails for selected events (with 33% center crop applied)
|
||||
3. Crops each event's clean snapshot around the object bounding box
|
||||
4. Selects 24 most visually distinct thumbnails
|
||||
5. Saves to dataset directory
|
||||
|
||||
@ -832,66 +836,106 @@ def _select_balanced_events(
|
||||
|
||||
def _extract_event_thumbnails(events: list[Event], output_dir: str) -> list[str]:
|
||||
"""
|
||||
Extract thumbnails from events and save to disk.
|
||||
Extract a training image for each event.
|
||||
|
||||
Preferred path: load the full-frame clean snapshot and crop around the
|
||||
stored bounding box with the same calculate_region(..., max(w, h), 1.0)
|
||||
call the live ObjectClassificationProcessor uses, so wizard examples
|
||||
are framed like inference-time inputs.
|
||||
|
||||
Fallback: if no clean snapshot exists (snapshots disabled, or only a
|
||||
legacy annotated JPG is on disk), center-crop the stored thumbnail
|
||||
using a step ladder sized from the box/region area ratio.
|
||||
|
||||
Args:
|
||||
events: List of Event objects
|
||||
output_dir: Directory to save thumbnails
|
||||
output_dir: Directory to save crops
|
||||
|
||||
Returns:
|
||||
List of paths to successfully extracted thumbnail images
|
||||
List of paths to successfully extracted images
|
||||
"""
|
||||
thumbnail_paths = []
|
||||
image_paths = []
|
||||
|
||||
for idx, event in enumerate(events):
|
||||
try:
|
||||
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||
img = _load_event_classification_crop(event)
|
||||
if img is None:
|
||||
continue
|
||||
|
||||
if thumbnail_bytes:
|
||||
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
|
||||
if img is not None:
|
||||
height, width = img.shape[:2]
|
||||
|
||||
crop_size = 1.0
|
||||
if event.data and "box" in event.data and "region" in event.data:
|
||||
box = event.data["box"]
|
||||
region = event.data["region"]
|
||||
|
||||
if len(box) == 4 and len(region) == 4:
|
||||
box_w, box_h = box[2], box[3]
|
||||
region_w, region_h = region[2], region[3]
|
||||
|
||||
box_area = (box_w * box_h) / (region_w * region_h)
|
||||
|
||||
if box_area < 0.05:
|
||||
crop_size = 0.4
|
||||
elif box_area < 0.10:
|
||||
crop_size = 0.5
|
||||
elif box_area < 0.20:
|
||||
crop_size = 0.65
|
||||
elif box_area < 0.35:
|
||||
crop_size = 0.80
|
||||
else:
|
||||
crop_size = 0.95
|
||||
|
||||
crop_width = int(width * crop_size)
|
||||
crop_height = int(height * crop_size)
|
||||
|
||||
x1 = (width - crop_width) // 2
|
||||
y1 = (height - crop_height) // 2
|
||||
x2 = x1 + crop_width
|
||||
y2 = y1 + crop_height
|
||||
|
||||
cropped = img[y1:y2, x1:x2]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
thumbnail_paths.append(output_path)
|
||||
resized = cv2.resize(img, (224, 224))
|
||||
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
image_paths.append(output_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to extract thumbnail for event {event.id}: {e}")
|
||||
logger.debug(f"Failed to extract image for event {event.id}: {e}")
|
||||
continue
|
||||
|
||||
return thumbnail_paths
|
||||
return image_paths
|
||||
|
||||
|
||||
def _load_event_classification_crop(event: Event) -> np.ndarray | None:
|
||||
"""Prefer a snapshot-based object crop; fall back to a center-cropped thumbnail."""
|
||||
if event.data and "box" in event.data:
|
||||
snapshot, _ = load_event_snapshot_image(event, clean_only=True)
|
||||
if snapshot is not None:
|
||||
abs_box = relative_box_to_absolute(snapshot.shape, event.data["box"])
|
||||
if abs_box is not None:
|
||||
xmin, ymin, xmax, ymax = abs_box
|
||||
box_w = xmax - xmin
|
||||
box_h = ymax - ymin
|
||||
if box_w > 0 and box_h > 0:
|
||||
x1, y1, x2, y2 = calculate_region(
|
||||
snapshot.shape,
|
||||
xmin,
|
||||
ymin,
|
||||
xmax,
|
||||
ymax,
|
||||
max(box_w, box_h),
|
||||
1.0,
|
||||
)
|
||||
cropped = snapshot[y1:y2, x1:x2]
|
||||
if cropped.size > 0:
|
||||
return cropped
|
||||
|
||||
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||
if not thumbnail_bytes:
|
||||
return None
|
||||
|
||||
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
if img is None or img.size == 0:
|
||||
return None
|
||||
|
||||
height, width = img.shape[:2]
|
||||
crop_size = 1.0
|
||||
|
||||
if event.data and "box" in event.data and "region" in event.data:
|
||||
box = event.data["box"]
|
||||
region = event.data["region"]
|
||||
|
||||
if len(box) == 4 and len(region) == 4:
|
||||
box_w, box_h = box[2], box[3]
|
||||
region_w, region_h = region[2], region[3]
|
||||
box_area = (box_w * box_h) / (region_w * region_h)
|
||||
|
||||
if box_area < 0.05:
|
||||
crop_size = 0.4
|
||||
elif box_area < 0.10:
|
||||
crop_size = 0.5
|
||||
elif box_area < 0.20:
|
||||
crop_size = 0.65
|
||||
elif box_area < 0.35:
|
||||
crop_size = 0.80
|
||||
else:
|
||||
crop_size = 0.95
|
||||
|
||||
crop_width = int(width * crop_size)
|
||||
crop_height = int(height * crop_size)
|
||||
x1 = (width - crop_width) // 2
|
||||
y1 = (height - crop_height) // 2
|
||||
cropped = img[y1 : y1 + crop_height, x1 : x1 + crop_width]
|
||||
if cropped.size == 0:
|
||||
return None
|
||||
|
||||
return cropped
|
||||
|
||||
@ -726,7 +726,20 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
|
||||
if detailed and format_entries:
|
||||
cmd.extend(["-show_entries", f"format={format_entries}"])
|
||||
cmd.extend(["-loglevel", "error", clean_path])
|
||||
return sp.run(cmd, capture_output=True)
|
||||
try:
|
||||
return sp.run(cmd, capture_output=True, timeout=6)
|
||||
except sp.TimeoutExpired as e:
|
||||
logger.info(
|
||||
"ffprobe timed out while probing %s (transport=%s)",
|
||||
clean_camera_user_pass(path),
|
||||
rtsp_transport or "default",
|
||||
)
|
||||
return sp.CompletedProcess(
|
||||
args=cmd,
|
||||
returncode=1,
|
||||
stdout=e.stdout or b"",
|
||||
stderr=(e.stderr or b"") + b"\nffprobe timed out",
|
||||
)
|
||||
|
||||
result = run()
|
||||
|
||||
@ -832,11 +845,23 @@ async def get_video_properties(
|
||||
"-show_streams",
|
||||
url,
|
||||
]
|
||||
proc = None
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
stdout, _ = await proc.communicate()
|
||||
try:
|
||||
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=6)
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(
|
||||
"ffprobe timed out while probing %s (transport=%s)",
|
||||
clean_camera_user_pass(url),
|
||||
rtsp_transport or "default",
|
||||
)
|
||||
proc.kill()
|
||||
await proc.wait()
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
if proc.returncode != 0:
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
|
||||
@ -397,6 +397,14 @@
|
||||
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
|
||||
}
|
||||
},
|
||||
"hailo10h": {
|
||||
"label": "Hailo-10H",
|
||||
"description": "Hailo-10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
|
||||
"device": {
|
||||
"label": "Device Type",
|
||||
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
|
||||
}
|
||||
},
|
||||
"memryx": {
|
||||
"label": "MemryX",
|
||||
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",
|
||||
|
||||
Loading…
Reference in New Issue
Block a user