mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-09 15:05:26 +03:00
Compare commits
16 Commits
6471e4a893
...
89a7087e83
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89a7087e83 | ||
|
|
8eace9c3e7 | ||
|
|
8fc1e97df5 | ||
|
|
0a332cada9 | ||
|
|
ba499201e6 | ||
|
|
c244e6582a | ||
|
|
fff3594553 | ||
|
|
25bfb2c481 | ||
|
|
b7261c8e70 | ||
|
|
ad9092d0da | ||
|
|
20705a3e97 | ||
|
|
0f3dd097ec | ||
|
|
2a4d7e4766 | ||
|
|
46415ffeb5 | ||
|
|
e35ab0b8a1 | ||
|
|
837373547d |
7
Makefile
7
Makefile
@ -21,6 +21,13 @@ local: version
|
||||
--tag frigate:latest \
|
||||
--load
|
||||
|
||||
localh10: version
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--build-arg HAILORT_VERSION=5.1.1 \
|
||||
--build-arg HAILORT_GIT_REPO=mathieu-d/hailort \
|
||||
--tag frigate:latest \
|
||||
--load
|
||||
|
||||
debug: version
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--build-arg DEBUG=true \
|
||||
|
||||
@ -12,6 +12,11 @@ services:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/main/Dockerfile
|
||||
# Use args to specify hailort version and location
|
||||
# args:
|
||||
# HAILORT_VERSION: "5.1.1"
|
||||
# HAILORT_GIT_REPO: "mathieu-d/hailort"
|
||||
|
||||
# Use target devcontainer-trt for TensorRT dev
|
||||
target: devcontainer
|
||||
cache_from:
|
||||
@ -29,6 +34,7 @@ services:
|
||||
# devices:
|
||||
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
|
||||
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
||||
|
||||
volumes:
|
||||
- .:/workspace/frigate:cached
|
||||
- ./web/dist:/opt/frigate/web:cached
|
||||
|
||||
7
docker/hailo10h/user_installation.sh
Normal file
7
docker/hailo10h/user_installation.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Update package list and install hailo driver version 5.1.1 for Hailo-10H
|
||||
sudo apt update
|
||||
sudo apt install -y hailo-h10-all=5.1.1
|
||||
|
||||
|
||||
@ -157,6 +157,8 @@ FROM base AS wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETARCH
|
||||
ARG DEBUG=false
|
||||
ARG HAILORT_VERSION=4.21.0
|
||||
ARG HAILORT_GIT_REPO=frigate-nvr/hailort
|
||||
|
||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||
RUN apt-get -qq update \
|
||||
|
||||
@ -87,43 +87,43 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
# intel packages use zst compression so we need to update dpkg
|
||||
apt-get install -y dpkg
|
||||
|
||||
# use intel apt intel packages
|
||||
# use intel apt repo for libmfx1 (legacy QSV, pre-Gen12)
|
||||
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
apt-get -qq update
|
||||
|
||||
# intel-media-va-driver-non-free is built from source in the
|
||||
# intel-media-driver Dockerfile stage for Battlemage (Xe2) support
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
libmfx1 libmfxgen1 libvpl2
|
||||
libmfx1
|
||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
|
||||
# upgrade libva2, oneVPL runtime, and libvpl2 from trixie for Battlemage support
|
||||
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y -t trixie libva2 libva-drm2 libzstd1
|
||||
apt-get -qq install -y -t trixie libmfx-gen1.2 libvpl2
|
||||
rm -f /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y ocl-icd-libopencl1
|
||||
|
||||
# install libtbb12 for NPU support
|
||||
apt-get -qq install -y libtbb12
|
||||
|
||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
|
||||
# install legacy and standard intel icd and level-zero-gpu
|
||||
# install legacy and standard intel compute packages
|
||||
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
|
||||
# newer intel packages (gmmlib 22.9+, igc 2.32+) require libstdc++ >= 13.1 and libzstd >= 1.5.5
|
||||
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y -t trixie libstdc++6 libzstd1
|
||||
rm -f /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
|
||||
# needed core package
|
||||
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libigdgmm12_22.9.0_amd64.deb
|
||||
dpkg -i libigdgmm12_22.9.0_amd64.deb
|
||||
rm libigdgmm12_22.9.0_amd64.deb
|
||||
|
||||
# legacy packages
|
||||
# legacy compute-runtime packages
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-level-zero-gpu-legacy1_1.5.30872.36_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
|
||||
# standard packages
|
||||
# standard compute-runtime packages
|
||||
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/intel-opencl-icd_26.14.37833.4-0_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libze-intel-gpu1_26.14.37833.4-0_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.32.7/intel-igc-opencl-2_2.32.7+21184_amd64.deb
|
||||
@ -137,6 +137,10 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
dpkg -i *.deb
|
||||
rm *.deb
|
||||
apt-get -qq install -f -y
|
||||
|
||||
# Battlemage uses the xe kernel driver, but the VA-API driver is still iHD.
|
||||
# The oneVPL runtime may look for a driver named after the kernel module.
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/dri/iHD_drv_video.so /usr/lib/x86_64-linux-gnu/dri/xe_drv_video.so
|
||||
fi
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
|
||||
@ -2,13 +2,11 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.21.0"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
arch="aarch64"
|
||||
fi
|
||||
|
||||
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
|
||||
wget -qO- "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -P /wheels/ "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-${HAILORT_VERSION}-cp311-cp311-linux_${arch}.whl"
|
||||
|
||||
@ -11,7 +11,7 @@ joserfc == 1.2.*
|
||||
cryptography == 44.0.*
|
||||
pathvalidate == 3.3.*
|
||||
markupsafe == 3.0.*
|
||||
python-multipart == 0.0.20
|
||||
python-multipart == 0.0.26
|
||||
# Classification Model Training
|
||||
tensorflow == 2.19.* ; platform_machine == 'aarch64'
|
||||
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'
|
||||
|
||||
@ -39,6 +39,10 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
|
||||
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
|
||||
|
||||
## [kiosk-monitor](https://github.com/extremeshok/kiosk-monitor)
|
||||
|
||||
[kiosk-monitor](https://github.com/extremeshok/kiosk-monitor) is a Raspberry Pi watchdog that runs Chromium fullscreen on a Frigate dashboard (optionally with VLC on a second monitor for an RTSP camera stream), auto-restarts on frozen screens or unreachable URLs, and ships a Birdseye-aware Chromium helper that auto-sizes the grid to the display.
|
||||
|
||||
## [Periscope](https://github.com/maksz42/periscope)
|
||||
|
||||
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.
|
||||
|
||||
@ -111,26 +111,16 @@ TCP ensures that all data packets arrive in the correct order. This is crucial f
|
||||
|
||||
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
|
||||
|
||||
### Frigate hangs on startup with a "probing detect stream" message in the logs
|
||||
### Frigate is slow to start up with a "probing detect stream" message in the logs
|
||||
|
||||
On startup, Frigate probes each camera's detect stream with OpenCV to auto-detect its resolution. OpenCV's FFmpeg backend may attempt RTSP over UDP during this probe regardless of the `-rtsp_transport tcp` in your `input_args` or preset. For cameras that do not respond to UDP (common on some Reolink models and others behind firewalls that block UDP), the probe can hang indefinitely and block Frigate from finishing startup, or it can return zeroed-out dimensions that show up as width `0` and height `0` in Camera Probe Info under System Metrics.
|
||||
When `detect.width` and `detect.height` are not set, Frigate probes each camera's detect stream on startup (and when saving the config) to auto-detect its resolution. For RTSP streams Frigate probes with ffprobe and automatically retries over TCP if UDP doesn't respond, with a 5 second timeout per attempt. A camera that cannot be reached over either transport will add up to ~10 seconds to startup before Frigate falls through with default dimensions, which may show up as width `0` and height `0` in Camera Probe Info under System Metrics.
|
||||
|
||||
There are two ways to avoid this:
|
||||
To skip the probe entirely and make startup instant, set `detect.width` and `detect.height` explicitly in your camera config:
|
||||
|
||||
1. Set `detect.width` and `detect.height` explicitly in your camera config. When both are set, Frigate skips the auto-detect probe entirely:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
my_camera:
|
||||
detect:
|
||||
width: 1280
|
||||
height: 720
|
||||
```
|
||||
|
||||
2. Force OpenCV's FFmpeg backend to use TCP for RTSP by setting the environment variable on your Frigate container:
|
||||
|
||||
```
|
||||
OPENCV_FFMPEG_CAPTURE_OPTIONS=rtsp_transport;tcp
|
||||
```
|
||||
|
||||
This is a process-wide setting and applies to all cameras. If you have any cameras that require `preset-rtsp-udp`, use option 1 instead.
|
||||
```yaml
|
||||
cameras:
|
||||
my_camera:
|
||||
detect:
|
||||
width: 1280
|
||||
height: 720
|
||||
```
|
||||
|
||||
6
docs/package-lock.json
generated
6
docs/package-lock.json
generated
@ -10897,9 +10897,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/express/node_modules/path-to-regexp": {
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
|
||||
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
|
||||
"version": "0.1.13",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz",
|
||||
"integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/express/node_modules/range-parser": {
|
||||
|
||||
415
frigate/detectors/plugins/hailo10h.py
Executable file
415
frigate/detectors/plugins/hailo10h.py
Executable file
@ -0,0 +1,415 @@
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import threading
|
||||
import urllib.request
|
||||
from functools import partial
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pydantic import ConfigDict, Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
)
|
||||
from frigate.object_detection.util import RequestStore, ResponseStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ----------------- Utility Functions ----------------- #
|
||||
|
||||
|
||||
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
|
||||
"""
|
||||
Resize an image with unchanged aspect ratio using padding.
|
||||
Assumes input image shape is (H, W, 3).
|
||||
"""
|
||||
if image.ndim == 4 and image.shape[0] == 1:
|
||||
image = image[0]
|
||||
|
||||
h, w = image.shape[:2]
|
||||
scale = min(model_w / w, model_h / h)
|
||||
new_w, new_h = int(w * scale), int(h * scale)
|
||||
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
|
||||
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
|
||||
x_offset = (model_w - new_w) // 2
|
||||
y_offset = (model_h - new_h) // 2
|
||||
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
|
||||
resized_image
|
||||
)
|
||||
return padded_image
|
||||
|
||||
|
||||
# ----------------- Global Constants ----------------- #
|
||||
DETECTOR_KEY = "hailo10h"
|
||||
ARCH = None
|
||||
H10H_DEFAULT_MODEL = "yolov6n.hef"
|
||||
H10H_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v5.2.0/hailo10h/yolov6n.hef"
|
||||
|
||||
|
||||
def detect_hailo_arch():
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(f"Inference error: {result.stderr}")
|
||||
return None
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Device Architecture" in line:
|
||||
if "HAILO10H" in line:
|
||||
return "hailo10h"
|
||||
logger.error("Inference error: Could not determine Hailo architecture.")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Inference error: {e}")
|
||||
return None
|
||||
|
||||
|
||||
# ----------------- HailoAsyncInference Class ----------------- #
|
||||
class HailoAsyncInference:
|
||||
def __init__(
|
||||
self,
|
||||
hef_path: str,
|
||||
input_store: RequestStore,
|
||||
output_store: ResponseStore,
|
||||
batch_size: int = 1,
|
||||
input_type: Optional[str] = None,
|
||||
output_type: Optional[Dict[str, str]] = None,
|
||||
send_original_frame: bool = False,
|
||||
) -> None:
|
||||
# when importing hailo it activates the driver
|
||||
# which leaves processes running even though it may not be used.
|
||||
try:
|
||||
from hailo_platform import (
|
||||
HEF,
|
||||
FormatType,
|
||||
HailoSchedulingAlgorithm,
|
||||
VDevice,
|
||||
)
|
||||
except ModuleNotFoundError:
|
||||
pass
|
||||
|
||||
self.input_store = input_store
|
||||
self.output_store = output_store
|
||||
|
||||
params = VDevice.create_params()
|
||||
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
|
||||
|
||||
self.hef = HEF(hef_path)
|
||||
self.target = VDevice(params)
|
||||
self.infer_model = self.target.create_infer_model(hef_path)
|
||||
self.infer_model.set_batch_size(batch_size)
|
||||
|
||||
if input_type is not None:
|
||||
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
|
||||
|
||||
if output_type is not None:
|
||||
for output_name, output_type in output_type.items():
|
||||
self.infer_model.output(output_name).set_format_type(
|
||||
getattr(FormatType, output_type)
|
||||
)
|
||||
|
||||
self.output_type = output_type
|
||||
self.send_original_frame = send_original_frame
|
||||
|
||||
def callback(
|
||||
self,
|
||||
completion_info,
|
||||
bindings_list: List,
|
||||
input_batch: List,
|
||||
request_ids: List[int],
|
||||
):
|
||||
if completion_info.exception:
|
||||
logger.error(f"Inference error: {completion_info.exception}")
|
||||
else:
|
||||
for i, bindings in enumerate(bindings_list):
|
||||
if len(bindings._output_names) == 1:
|
||||
result = bindings.output().get_buffer()
|
||||
else:
|
||||
result = {
|
||||
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
|
||||
for name in bindings._output_names
|
||||
}
|
||||
self.output_store.put(request_ids[i], (input_batch[i], result))
|
||||
|
||||
def _create_bindings(self, configured_infer_model) -> object:
|
||||
if self.output_type is None:
|
||||
output_buffers = {
|
||||
output_info.name: np.empty(
|
||||
self.infer_model.output(output_info.name).shape,
|
||||
dtype=getattr(
|
||||
np, str(output_info.format.type).split(".")[1].lower()
|
||||
),
|
||||
)
|
||||
for output_info in self.hef.get_output_vstream_infos()
|
||||
}
|
||||
else:
|
||||
output_buffers = {
|
||||
name: np.empty(
|
||||
self.infer_model.output(name).shape,
|
||||
dtype=getattr(np, self.output_type[name].lower()),
|
||||
)
|
||||
for name in self.output_type
|
||||
}
|
||||
return configured_infer_model.create_bindings(output_buffers=output_buffers)
|
||||
|
||||
def get_input_shape(self) -> Tuple[int, ...]:
|
||||
return self.hef.get_input_vstream_infos()[0].shape
|
||||
|
||||
def run(self) -> None:
|
||||
job = None
|
||||
with self.infer_model.configure() as configured_infer_model:
|
||||
while True:
|
||||
batch_data = self.input_store.get()
|
||||
|
||||
if batch_data is None:
|
||||
break
|
||||
|
||||
request_id, frame_data = batch_data
|
||||
preprocessed_batch = [frame_data]
|
||||
request_ids = [request_id]
|
||||
input_batch = preprocessed_batch # non-send_original_frame mode
|
||||
|
||||
bindings_list = []
|
||||
for frame in preprocessed_batch:
|
||||
bindings = self._create_bindings(configured_infer_model)
|
||||
bindings.input().set_buffer(np.array(frame))
|
||||
bindings_list.append(bindings)
|
||||
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
|
||||
job = configured_infer_model.run_async(
|
||||
bindings_list,
|
||||
partial(
|
||||
self.callback,
|
||||
input_batch=input_batch,
|
||||
request_ids=request_ids,
|
||||
bindings_list=bindings_list,
|
||||
),
|
||||
)
|
||||
|
||||
if job is not None:
|
||||
job.wait(100)
|
||||
|
||||
|
||||
# ----------------- HailoDetector Class ----------------- #
|
||||
class HailoDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: "HailoDetectorConfig"):
|
||||
global ARCH
|
||||
ARCH = detect_hailo_arch()
|
||||
self.cache_dir = MODEL_CACHE_DIR
|
||||
self.device_type = detector_config.device
|
||||
self.model_height = (
|
||||
detector_config.model.height
|
||||
if hasattr(detector_config.model, "height")
|
||||
else None
|
||||
)
|
||||
self.model_width = (
|
||||
detector_config.model.width
|
||||
if hasattr(detector_config.model, "width")
|
||||
else None
|
||||
)
|
||||
self.model_type = (
|
||||
detector_config.model.model_type
|
||||
if hasattr(detector_config.model, "model_type")
|
||||
else None
|
||||
)
|
||||
self.tensor_format = (
|
||||
detector_config.model.input_tensor
|
||||
if hasattr(detector_config.model, "input_tensor")
|
||||
else None
|
||||
)
|
||||
self.pixel_format = (
|
||||
detector_config.model.input_pixel_format
|
||||
if hasattr(detector_config.model, "input_pixel_format")
|
||||
else None
|
||||
)
|
||||
self.input_dtype = (
|
||||
detector_config.model.input_dtype
|
||||
if hasattr(detector_config.model, "input_dtype")
|
||||
else None
|
||||
)
|
||||
self.output_type = "FLOAT32"
|
||||
self.set_path_and_url(detector_config.model.path)
|
||||
self.working_model_path = self.check_and_prepare()
|
||||
|
||||
self.batch_size = 1
|
||||
self.input_store = RequestStore()
|
||||
self.response_store = ResponseStore()
|
||||
|
||||
try:
|
||||
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
|
||||
self.inference_engine = HailoAsyncInference(
|
||||
self.working_model_path,
|
||||
self.input_store,
|
||||
self.response_store,
|
||||
self.batch_size,
|
||||
)
|
||||
self.input_shape = self.inference_engine.get_input_shape()
|
||||
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
|
||||
self.inference_thread = threading.Thread(
|
||||
target=self.inference_engine.run, daemon=True
|
||||
)
|
||||
self.inference_thread.start()
|
||||
except Exception as e:
|
||||
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
|
||||
raise
|
||||
|
||||
def set_path_and_url(self, path: str = None):
|
||||
if not path:
|
||||
self.model_path = None
|
||||
self.url = None
|
||||
return
|
||||
if self.is_url(path):
|
||||
self.url = path
|
||||
self.model_path = None
|
||||
else:
|
||||
self.model_path = path
|
||||
self.url = None
|
||||
|
||||
def is_url(self, url: str) -> bool:
|
||||
return (
|
||||
url.startswith("http://")
|
||||
or url.startswith("https://")
|
||||
or url.startswith("www.")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def extract_model_name(path: str = None, url: str = None) -> str:
|
||||
if path and path.endswith(".hef"):
|
||||
return os.path.basename(path)
|
||||
elif url and url.endswith(".hef"):
|
||||
return os.path.basename(url)
|
||||
else:
|
||||
return H10H_DEFAULT_MODEL
|
||||
|
||||
@staticmethod
|
||||
def download_model(url: str, destination: str):
|
||||
if not url.endswith(".hef"):
|
||||
raise ValueError("Invalid model URL. Only .hef files are supported.")
|
||||
try:
|
||||
urllib.request.urlretrieve(url, destination)
|
||||
logger.debug(f"Downloaded model to {destination}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
|
||||
|
||||
def check_and_prepare(self) -> str:
|
||||
if not os.path.exists(self.cache_dir):
|
||||
os.makedirs(self.cache_dir)
|
||||
model_name = self.extract_model_name(self.model_path, self.url)
|
||||
cached_model_path = os.path.join(self.cache_dir, model_name)
|
||||
if not self.model_path and not self.url:
|
||||
if os.path.exists(cached_model_path):
|
||||
logger.debug(f"Model found in cache: {cached_model_path}")
|
||||
return cached_model_path
|
||||
else:
|
||||
logger.debug(f"Downloading default model: {model_name}")
|
||||
self.download_model(H10H_DEFAULT_URL, cached_model_path)
|
||||
|
||||
elif self.url:
|
||||
logger.debug(f"Downloading model from URL: {self.url}")
|
||||
self.download_model(self.url, cached_model_path)
|
||||
elif self.model_path:
|
||||
if os.path.exists(self.model_path):
|
||||
logger.debug(f"Using existing model at: {self.model_path}")
|
||||
return self.model_path
|
||||
else:
|
||||
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
|
||||
return cached_model_path
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
tensor_input = self.preprocess(tensor_input)
|
||||
|
||||
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
|
||||
tensor_input = np.expand_dims(tensor_input, axis=0)
|
||||
|
||||
request_id = self.input_store.put(tensor_input)
|
||||
|
||||
try:
|
||||
_, infer_results = self.response_store.get(request_id, timeout=1.0)
|
||||
except TimeoutError:
|
||||
logger.error(
|
||||
f"Timeout waiting for inference results for request {request_id}"
|
||||
)
|
||||
|
||||
if not self.inference_thread.is_alive():
|
||||
raise RuntimeError(
|
||||
"HailoRT inference thread has stopped, restart required."
|
||||
)
|
||||
|
||||
return np.zeros((20, 6), dtype=np.float32)
|
||||
|
||||
if isinstance(infer_results, list) and len(infer_results) == 1:
|
||||
infer_results = infer_results[0]
|
||||
|
||||
threshold = 0.4
|
||||
all_detections = []
|
||||
for class_id, detection_set in enumerate(infer_results):
|
||||
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
|
||||
continue
|
||||
for det in detection_set:
|
||||
if det.shape[0] < 5:
|
||||
continue
|
||||
score = float(det[4])
|
||||
if score < threshold:
|
||||
continue
|
||||
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
|
||||
|
||||
if len(all_detections) == 0:
|
||||
detections_array = np.zeros((20, 6), dtype=np.float32)
|
||||
else:
|
||||
detections_array = np.array(all_detections, dtype=np.float32)
|
||||
if detections_array.shape[0] > 20:
|
||||
detections_array = detections_array[:20, :]
|
||||
elif detections_array.shape[0] < 20:
|
||||
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
|
||||
detections_array = np.vstack((detections_array, pad))
|
||||
|
||||
return detections_array
|
||||
|
||||
def preprocess(self, image):
|
||||
if isinstance(image, np.ndarray):
|
||||
processed = preprocess_tensor(
|
||||
image, self.input_shape[1], self.input_shape[0]
|
||||
)
|
||||
return np.expand_dims(processed, axis=0)
|
||||
else:
|
||||
raise ValueError("Unsupported image format for preprocessing")
|
||||
|
||||
def close(self):
|
||||
"""Properly shuts down the inference engine and releases the VDevice."""
|
||||
logger.debug("[CLOSE] Closing HailoDetector")
|
||||
try:
|
||||
if hasattr(self, "inference_engine"):
|
||||
if hasattr(self.inference_engine, "target"):
|
||||
self.inference_engine.target.release()
|
||||
logger.debug("Hailo VDevice released successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to close Hailo device: {e}")
|
||||
raise
|
||||
|
||||
def __del__(self):
|
||||
"""Destructor to ensure cleanup when the object is deleted."""
|
||||
self.close()
|
||||
|
||||
|
||||
# ----------------- HailoDetectorConfig Class ----------------- #
|
||||
class HailoDetectorConfig(BaseDetectorConfig):
|
||||
"""Hailo10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
title="Hailo-10H",
|
||||
)
|
||||
|
||||
type: Literal[DETECTOR_KEY]
|
||||
device: str = Field(
|
||||
default="PCIe",
|
||||
title="Device Type",
|
||||
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
|
||||
)
|
||||
@ -310,6 +310,10 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self._handle_custom_classification_update(topic, payload)
|
||||
return
|
||||
|
||||
if topic == "config/genai":
|
||||
self.config.genai = payload
|
||||
self.genai_manager.update_config(self.config)
|
||||
|
||||
# Broadcast to all processors — each decides if the topic is relevant
|
||||
for processor in self.realtime_processors:
|
||||
processor.update_config(topic, payload)
|
||||
|
||||
@ -113,6 +113,15 @@ class OllamaClient(GenAIClient):
|
||||
schema = response_format.get("json_schema", {}).get("schema")
|
||||
if schema:
|
||||
ollama_options["format"] = self._clean_schema_for_ollama(schema)
|
||||
logger.debug(
|
||||
"Ollama generate request: model=%s, prompt_len=%s, image_count=%s, "
|
||||
"has_format=%s, options=%s",
|
||||
self.genai_config.model,
|
||||
len(prompt),
|
||||
len(images) if images else 0,
|
||||
"format" in ollama_options,
|
||||
{k: v for k, v in ollama_options.items() if k != "format"},
|
||||
)
|
||||
result = self.provider.generate(
|
||||
self.genai_config.model,
|
||||
prompt,
|
||||
@ -120,9 +129,24 @@ class OllamaClient(GenAIClient):
|
||||
**ollama_options,
|
||||
)
|
||||
logger.debug(
|
||||
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
|
||||
"Ollama generate response: done=%s, done_reason=%s, eval_count=%s, "
|
||||
"prompt_eval_count=%s, response_len=%s",
|
||||
result.get("done"),
|
||||
result.get("done_reason"),
|
||||
result.get("eval_count"),
|
||||
result.get("prompt_eval_count"),
|
||||
len(result.get("response", "") or ""),
|
||||
)
|
||||
return str(result["response"]).strip()
|
||||
response_text = str(result["response"]).strip()
|
||||
if not response_text:
|
||||
logger.warning(
|
||||
"Ollama returned a blank response for model %s (done_reason=%s, "
|
||||
"eval_count=%s). Check model output, ensure thinking is disabled.",
|
||||
self.genai_config.model,
|
||||
result.get("done_reason"),
|
||||
result.get("eval_count"),
|
||||
)
|
||||
return response_text
|
||||
except (
|
||||
TimeoutException,
|
||||
ResponseError,
|
||||
|
||||
@ -80,7 +80,23 @@ class OpenAIClient(GenAIClient):
|
||||
and hasattr(result, "choices")
|
||||
and len(result.choices) > 0
|
||||
):
|
||||
return str(result.choices[0].message.content.strip())
|
||||
message = result.choices[0].message
|
||||
content = message.content
|
||||
|
||||
if not content:
|
||||
# When reasoning is enabled for some OpenAI backends the actual response
|
||||
# is incorrectly placed in reasoning_content instead of content.
|
||||
# This is buggy/incorrect behavior — reasoning should not be
|
||||
# enabled for these models.
|
||||
reasoning_content = getattr(message, "reasoning_content", None)
|
||||
if reasoning_content:
|
||||
logger.warning(
|
||||
"Response content was empty but reasoning_content was provided; "
|
||||
"reasoning appears to be enabled and should be disabled for this model."
|
||||
)
|
||||
content = reasoning_content
|
||||
|
||||
return str(content.strip()) if content else None
|
||||
return None
|
||||
except (TimeoutException, Exception) as e:
|
||||
logger.warning("OpenAI returned an error: %s", str(e))
|
||||
|
||||
@ -123,6 +123,15 @@ def get_detector_temperature(
|
||||
if index < len(hailo_device_names):
|
||||
device_name = hailo_device_names[index]
|
||||
return hailo_temps[device_name]
|
||||
elif detector_type == "hailo10h":
|
||||
# Get temperatures for Hailo devices
|
||||
hailo_temps = get_hailo_temps()
|
||||
if hailo_temps:
|
||||
hailo_device_names = sorted(hailo_temps.keys())
|
||||
index = detector_index_by_type.get("hailo10h", 0)
|
||||
if index < len(hailo_device_names):
|
||||
device_name = hailo_device_names[index]
|
||||
return hailo_temps[device_name]
|
||||
elif detector_type == "rknn":
|
||||
# Rockchip temperatures are handled by the GPU / NPU stats
|
||||
# as there are not detector specific temperatures
|
||||
|
||||
@ -711,23 +711,31 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
|
||||
else:
|
||||
format_entries = None
|
||||
|
||||
ffprobe_cmd = [
|
||||
ffmpeg.ffprobe_path,
|
||||
"-timeout",
|
||||
"1000000",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_entries",
|
||||
f"stream={stream_entries}",
|
||||
]
|
||||
def run(rtsp_transport: Optional[str] = None) -> sp.CompletedProcess:
|
||||
cmd = [ffmpeg.ffprobe_path]
|
||||
if rtsp_transport:
|
||||
cmd += ["-rtsp_transport", rtsp_transport]
|
||||
cmd += [
|
||||
"-timeout",
|
||||
"1000000",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_entries",
|
||||
f"stream={stream_entries}",
|
||||
]
|
||||
if detailed and format_entries:
|
||||
cmd.extend(["-show_entries", f"format={format_entries}"])
|
||||
cmd.extend(["-loglevel", "error", clean_path])
|
||||
return sp.run(cmd, capture_output=True)
|
||||
|
||||
# Add format entries for detailed mode
|
||||
if detailed and format_entries:
|
||||
ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
|
||||
result = run()
|
||||
|
||||
ffprobe_cmd.extend(["-loglevel", "error", clean_path])
|
||||
# For RTSP: retry with explicit TCP transport if the first attempt failed
|
||||
# (default UDP may be blocked)
|
||||
if result.returncode != 0 and clean_path.startswith("rtsp://"):
|
||||
result = run(rtsp_transport="tcp")
|
||||
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
return result
|
||||
|
||||
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
@ -877,15 +885,23 @@ async def get_video_properties(
|
||||
cap.release()
|
||||
return valid, width, height, fourcc, duration
|
||||
|
||||
# try cv2 first
|
||||
has_video, width, height, fourcc, duration = probe_with_cv2(url)
|
||||
is_rtsp = url.startswith("rtsp://")
|
||||
|
||||
# fallback to ffprobe if needed
|
||||
if not has_video or (get_duration and duration < 0):
|
||||
if is_rtsp:
|
||||
# skip cv2 for RTSP: its FFmpeg backend has a hardcoded ~30s internal
|
||||
# timeout that cannot be shortened per-call, and ffprobe bounded by
|
||||
# -rw_timeout handles RTSP probing reliably
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
|
||||
else:
|
||||
# try cv2 first for local files, HTTP, RTMP
|
||||
has_video, width, height, fourcc, duration = probe_with_cv2(url)
|
||||
|
||||
# fallback to ffprobe if needed
|
||||
if not has_video or (get_duration and duration < 0):
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
|
||||
|
||||
# last resort for RTSP: try TCP transport, since default UDP may be blocked
|
||||
if (not has_video or (get_duration and duration < 0)) and url.startswith("rtsp://"):
|
||||
if (not has_video or (get_duration and duration < 0)) and is_rtsp:
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(
|
||||
url, rtsp_transport="tcp"
|
||||
)
|
||||
|
||||
14
web/package-lock.json
generated
14
web/package-lock.json
generated
@ -54,7 +54,7 @@
|
||||
"immer": "^10.1.1",
|
||||
"js-yaml": "^4.1.1",
|
||||
"konva": "^10.2.3",
|
||||
"lodash": "^4.17.23",
|
||||
"lodash": "^4.18.1",
|
||||
"lucide-react": "^0.577.0",
|
||||
"monaco-yaml": "^5.4.1",
|
||||
"next-themes": "^0.4.6",
|
||||
@ -9636,15 +9636,15 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lodash": {
|
||||
"version": "4.17.23",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
||||
"version": "4.18.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
|
||||
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash-es": {
|
||||
"version": "4.17.23",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
||||
"version": "4.18.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.18.1.tgz",
|
||||
"integrity": "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.merge": {
|
||||
|
||||
@ -68,7 +68,7 @@
|
||||
"immer": "^10.1.1",
|
||||
"js-yaml": "^4.1.1",
|
||||
"konva": "^10.2.3",
|
||||
"lodash": "^4.17.23",
|
||||
"lodash": "^4.18.1",
|
||||
"lucide-react": "^0.577.0",
|
||||
"monaco-yaml": "^5.4.1",
|
||||
"next-themes": "^0.4.6",
|
||||
|
||||
@ -397,6 +397,14 @@
|
||||
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
|
||||
}
|
||||
},
|
||||
"hailo10h": {
|
||||
"label": "Hailo-10H",
|
||||
"description": "Hailo-10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
|
||||
"device": {
|
||||
"label": "Device Type",
|
||||
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
|
||||
}
|
||||
},
|
||||
"memryx": {
|
||||
"label": "MemryX",
|
||||
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",
|
||||
|
||||
@ -415,7 +415,7 @@
|
||||
"audioCodecGood": "Audio codec is {{codec}}.",
|
||||
"resolutionHigh": "A resolution of {{resolution}} may cause increased resource usage.",
|
||||
"resolutionLow": "A resolution of {{resolution}} may be too low for reliable detection of small objects.",
|
||||
"resolutionUnknown": "The resolution of this stream could not be probed. This will cause issues on startup. You should manually set the detect resolution in Settings or your config.",
|
||||
"resolutionUnknown": "The resolution of this stream could not be probed. You should manually set the detect resolution in Settings or your config.",
|
||||
"noAudioWarning": "No audio detected for this stream, recordings will not have audio.",
|
||||
"audioCodecRecordError": "The AAC audio codec is required to support audio in recordings.",
|
||||
"audioCodecRequired": "An audio stream is required to support audio detection.",
|
||||
|
||||
@ -17,6 +17,9 @@ import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||
import { Skeleton } from "../ui/skeleton";
|
||||
import { Button } from "../ui/button";
|
||||
import { FaCircleCheck } from "react-icons/fa6";
|
||||
import { FaExclamationTriangle } from "react-icons/fa";
|
||||
import { MdOutlinePersonSearch } from "react-icons/md";
|
||||
import { ThreatLevel } from "@/types/review";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
@ -127,6 +130,11 @@ export function AnimatedEventCard({
|
||||
true,
|
||||
);
|
||||
|
||||
const threatLevel = useMemo<ThreatLevel>(
|
||||
() => (event.data.metadata?.potential_threat_level ?? 0) as ThreatLevel,
|
||||
[event],
|
||||
);
|
||||
|
||||
const aspectRatio = useMemo(() => {
|
||||
if (
|
||||
!config ||
|
||||
@ -152,7 +160,15 @@ export function AnimatedEventCard({
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
className="pointer-events-none absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 transition-opacity group-hover:pointer-events-auto group-hover:opacity-100"
|
||||
className={cn(
|
||||
"absolute left-2 top-1 z-40 transition-opacity",
|
||||
threatLevel === ThreatLevel.SECURITY_CONCERN &&
|
||||
"pointer-events-auto bg-severity_alert opacity-100 hover:bg-severity_alert",
|
||||
threatLevel === ThreatLevel.NEEDS_REVIEW &&
|
||||
"pointer-events-auto bg-severity_detection opacity-100 hover:bg-severity_detection",
|
||||
threatLevel === ThreatLevel.NORMAL &&
|
||||
"pointer-events-none bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 group-hover:pointer-events-auto group-hover:opacity-100",
|
||||
)}
|
||||
size="xs"
|
||||
aria-label={t("markAsReviewed")}
|
||||
onClick={async () => {
|
||||
@ -160,7 +176,13 @@ export function AnimatedEventCard({
|
||||
updateEvents();
|
||||
}}
|
||||
>
|
||||
<FaCircleCheck className="size-3 text-white" />
|
||||
{threatLevel === ThreatLevel.SECURITY_CONCERN ? (
|
||||
<FaExclamationTriangle className="size-3 text-white" />
|
||||
) : threatLevel === ThreatLevel.NEEDS_REVIEW ? (
|
||||
<MdOutlinePersonSearch className="size-3 text-white" />
|
||||
) : (
|
||||
<FaCircleCheck className="size-3 text-white" />
|
||||
)}
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{t("markAsReviewed")}</TooltipContent>
|
||||
|
||||
@ -218,7 +218,7 @@ export default function CameraReviewClassification({
|
||||
<Label
|
||||
className={cn(
|
||||
"flex flex-row items-center text-base",
|
||||
alertsZonesModified && "text-danger",
|
||||
alertsZonesModified && "text-unsaved",
|
||||
)}
|
||||
>
|
||||
<Trans ns="views/settings">cameraReview.review.alerts</Trans>
|
||||
@ -286,7 +286,7 @@ export default function CameraReviewClassification({
|
||||
<Label
|
||||
className={cn(
|
||||
"flex flex-row items-center text-base",
|
||||
detectionsZonesModified && "text-danger",
|
||||
detectionsZonesModified && "text-unsaved",
|
||||
)}
|
||||
>
|
||||
<Trans ns="views/settings">
|
||||
|
||||
@ -1012,7 +1012,7 @@ export function ConfigSection({
|
||||
>
|
||||
{hasChanges && (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-danger">
|
||||
<span className="text-sm text-unsaved">
|
||||
{t("unsavedChanges", {
|
||||
ns: "views/settings",
|
||||
defaultValue: "You have unsaved changes",
|
||||
@ -1299,7 +1299,7 @@ export function ConfigSection({
|
||||
{hasChanges && (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
|
||||
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
|
||||
>
|
||||
{t("button.modified", {
|
||||
ns: "common",
|
||||
|
||||
@ -154,7 +154,7 @@ export function KnownPlatesField(props: FieldProps) {
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<CardTitle
|
||||
className={cn("text-sm", isModified && "text-danger")}
|
||||
className={cn("text-sm", isModified && "text-unsaved")}
|
||||
>
|
||||
{title}
|
||||
</CardTitle>
|
||||
|
||||
@ -142,7 +142,7 @@ export function ReplaceRulesField(props: FieldProps) {
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<CardTitle
|
||||
className={cn("text-sm", isModified && "text-danger")}
|
||||
className={cn("text-sm", isModified && "text-unsaved")}
|
||||
>
|
||||
{title}
|
||||
</CardTitle>
|
||||
|
||||
@ -497,7 +497,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
|
||||
htmlFor={id}
|
||||
className={cn(
|
||||
"text-sm font-medium",
|
||||
isModified && "text-danger",
|
||||
isModified && "text-unsaved",
|
||||
hasFieldErrors && "text-destructive",
|
||||
)}
|
||||
>
|
||||
@ -516,7 +516,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
|
||||
return (
|
||||
<Label
|
||||
htmlFor={id}
|
||||
className={cn("text-sm font-medium", isModified && "text-danger")}
|
||||
className={cn("text-sm font-medium", isModified && "text-unsaved")}
|
||||
>
|
||||
{finalLabel}
|
||||
{required && <span className="ml-1 text-destructive">*</span>}
|
||||
@ -535,7 +535,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
|
||||
htmlFor={id}
|
||||
className={cn(
|
||||
"text-sm font-medium",
|
||||
isModified && "text-danger",
|
||||
isModified && "text-unsaved",
|
||||
hasFieldErrors && "text-destructive",
|
||||
)}
|
||||
>
|
||||
|
||||
@ -467,7 +467,7 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
|
||||
<CardTitle
|
||||
className={cn(
|
||||
"flex items-center text-sm",
|
||||
hasModifiedDescendants && "text-danger",
|
||||
hasModifiedDescendants && "text-unsaved",
|
||||
)}
|
||||
>
|
||||
{inferredLabel}
|
||||
|
||||
@ -1435,7 +1435,7 @@ export default function Settings() {
|
||||
/>
|
||||
)}
|
||||
{showUnsavedDot && (
|
||||
<span className="inline-block size-2 rounded-full bg-danger" />
|
||||
<span className="inline-block size-2 rounded-full bg-unsaved" />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
@ -1516,7 +1516,7 @@ export default function Settings() {
|
||||
<div className="sticky bottom-0 z-50 mt-2 bg-background p-4">
|
||||
<div className="flex flex-col items-center gap-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-danger">
|
||||
<span className="text-sm text-unsaved">
|
||||
{t("unsavedChanges", {
|
||||
ns: "views/settings",
|
||||
defaultValue: "You have unsaved changes",
|
||||
|
||||
@ -79,11 +79,11 @@ const PROFILE_COLORS: ProfileColor[] = [
|
||||
bgMuted: "bg-green-400/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-amber-400",
|
||||
text: "text-amber-400",
|
||||
dot: "bg-amber-400",
|
||||
border: "border-amber-400",
|
||||
bgMuted: "bg-amber-400/20",
|
||||
bg: "bg-fuchsia-500",
|
||||
text: "text-fuchsia-500",
|
||||
dot: "bg-fuchsia-500",
|
||||
border: "border-fuchsia-500",
|
||||
bgMuted: "bg-fuchsia-500/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-slate-400",
|
||||
@ -93,11 +93,11 @@ const PROFILE_COLORS: ProfileColor[] = [
|
||||
bgMuted: "bg-slate-400/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-orange-300",
|
||||
text: "text-orange-300",
|
||||
dot: "bg-orange-300",
|
||||
border: "border-orange-300",
|
||||
bgMuted: "bg-orange-300/20",
|
||||
bg: "bg-stone-500",
|
||||
text: "text-stone-500",
|
||||
dot: "bg-stone-500",
|
||||
border: "border-stone-500",
|
||||
bgMuted: "bg-stone-500/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-blue-300",
|
||||
|
||||
@ -389,7 +389,7 @@ export default function LiveCameraView({
|
||||
return "mse";
|
||||
}, [lowBandwidth, mic, webRTC, isRestreamed]);
|
||||
|
||||
useKeyboardListener(["m"], (key, modifiers) => {
|
||||
useKeyboardListener(["m", "Escape"], (key, modifiers) => {
|
||||
if (!modifiers.down) {
|
||||
return true;
|
||||
}
|
||||
@ -407,6 +407,12 @@ export default function LiveCameraView({
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
case "Escape":
|
||||
if (!fullscreen) {
|
||||
navigate(-1);
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
@ -380,7 +380,9 @@ export default function Go2RtcStreamsSettingsView({
|
||||
>
|
||||
{hasChanges && (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-danger">{t("unsavedChanges")}</span>
|
||||
<span className="text-sm text-unsaved">
|
||||
{t("unsavedChanges")}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex w-full items-center gap-2 md:w-auto">
|
||||
|
||||
@ -212,7 +212,7 @@ export function SingleSectionPage({
|
||||
{sectionStatus.hasChanges && (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
|
||||
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
|
||||
>
|
||||
{t("button.modified", {
|
||||
ns: "common",
|
||||
@ -250,7 +250,7 @@ export function SingleSectionPage({
|
||||
{sectionStatus.hasChanges && (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
|
||||
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
|
||||
>
|
||||
{t("button.modified", { ns: "common", defaultValue: "Modified" })}
|
||||
</Badge>
|
||||
|
||||
@ -65,6 +65,7 @@ module.exports = {
|
||||
ring: "hsl(var(--ring))",
|
||||
danger: "#ef4444",
|
||||
success: "#22c55e",
|
||||
unsaved: "#f59e0b",
|
||||
background: "hsl(var(--background))",
|
||||
background_alt: "hsl(var(--background-alt))",
|
||||
foreground: "hsl(var(--foreground))",
|
||||
|
||||
Loading…
Reference in New Issue
Block a user