mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-08 06:25:27 +03:00
Compare commits
15 Commits
ffb5182f8a
...
6e48fde930
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6e48fde930 | ||
|
|
1a6d04fde7 | ||
|
|
4a1b7a1629 | ||
|
|
8eace9c3e7 | ||
|
|
8fc1e97df5 | ||
|
|
0a332cada9 | ||
|
|
ba499201e6 | ||
|
|
c244e6582a | ||
|
|
fff3594553 | ||
|
|
25bfb2c481 | ||
|
|
b7261c8e70 | ||
|
|
ad9092d0da | ||
|
|
20705a3e97 | ||
|
|
f4ac063b37 | ||
|
|
900f9f4ee8 |
@ -87,43 +87,43 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
# intel packages use zst compression so we need to update dpkg
|
||||
apt-get install -y dpkg
|
||||
|
||||
# use intel apt intel packages
|
||||
# use intel apt repo for libmfx1 (legacy QSV, pre-Gen12)
|
||||
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
apt-get -qq update
|
||||
|
||||
# intel-media-va-driver-non-free is built from source in the
|
||||
# intel-media-driver Dockerfile stage for Battlemage (Xe2) support
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
libmfx1 libmfxgen1 libvpl2
|
||||
libmfx1
|
||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
|
||||
# upgrade libva2, oneVPL runtime, and libvpl2 from trixie for Battlemage support
|
||||
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y -t trixie libva2 libva-drm2 libzstd1
|
||||
apt-get -qq install -y -t trixie libmfx-gen1.2 libvpl2
|
||||
rm -f /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y ocl-icd-libopencl1
|
||||
|
||||
# install libtbb12 for NPU support
|
||||
apt-get -qq install -y libtbb12
|
||||
|
||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
|
||||
# install legacy and standard intel icd and level-zero-gpu
|
||||
# install legacy and standard intel compute packages
|
||||
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
|
||||
# newer intel packages (gmmlib 22.9+, igc 2.32+) require libstdc++ >= 13.1 and libzstd >= 1.5.5
|
||||
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y -t trixie libstdc++6 libzstd1
|
||||
rm -f /etc/apt/sources.list.d/trixie.list
|
||||
apt-get -qq update
|
||||
|
||||
# needed core package
|
||||
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libigdgmm12_22.9.0_amd64.deb
|
||||
dpkg -i libigdgmm12_22.9.0_amd64.deb
|
||||
rm libigdgmm12_22.9.0_amd64.deb
|
||||
|
||||
# legacy packages
|
||||
# legacy compute-runtime packages
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-level-zero-gpu-legacy1_1.5.30872.36_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
|
||||
# standard packages
|
||||
# standard compute-runtime packages
|
||||
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/intel-opencl-icd_26.14.37833.4-0_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libze-intel-gpu1_26.14.37833.4-0_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.32.7/intel-igc-opencl-2_2.32.7+21184_amd64.deb
|
||||
@ -137,6 +137,10 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
dpkg -i *.deb
|
||||
rm *.deb
|
||||
apt-get -qq install -f -y
|
||||
|
||||
# Battlemage uses the xe kernel driver, but the VA-API driver is still iHD.
|
||||
# The oneVPL runtime may look for a driver named after the kernel module.
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/dri/iHD_drv_video.so /usr/lib/x86_64-linux-gnu/dri/xe_drv_video.so
|
||||
fi
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
|
||||
@ -11,7 +11,7 @@ joserfc == 1.2.*
|
||||
cryptography == 44.0.*
|
||||
pathvalidate == 3.3.*
|
||||
markupsafe == 3.0.*
|
||||
python-multipart == 0.0.20
|
||||
python-multipart == 0.0.26
|
||||
# Classification Model Training
|
||||
tensorflow == 2.19.* ; platform_machine == 'aarch64'
|
||||
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'
|
||||
@ -63,7 +63,7 @@ argcomplete==2.0.*
|
||||
contextlib2==0.6.*
|
||||
distlib==0.3.*
|
||||
filelock==3.8.*
|
||||
future==0.18.*
|
||||
future==1.0.*
|
||||
importlib-metadata==5.1.*
|
||||
importlib-resources==5.1.*
|
||||
netaddr==0.8.*
|
||||
|
||||
@ -39,6 +39,10 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
|
||||
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
|
||||
|
||||
## [kiosk-monitor](https://github.com/extremeshok/kiosk-monitor)
|
||||
|
||||
[kiosk-monitor](https://github.com/extremeshok/kiosk-monitor) is a Raspberry Pi watchdog that runs Chromium fullscreen on a Frigate dashboard (optionally with VLC on a second monitor for an RTSP camera stream), auto-restarts on frozen screens or unreachable URLs, and ships a Birdseye-aware Chromium helper that auto-sizes the grid to the display.
|
||||
|
||||
## [Periscope](https://github.com/maksz42/periscope)
|
||||
|
||||
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.
|
||||
|
||||
@ -111,26 +111,16 @@ TCP ensures that all data packets arrive in the correct order. This is crucial f
|
||||
|
||||
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
|
||||
|
||||
### Frigate hangs on startup with a "probing detect stream" message in the logs
|
||||
### Frigate is slow to start up with a "probing detect stream" message in the logs
|
||||
|
||||
On startup, Frigate probes each camera's detect stream with OpenCV to auto-detect its resolution. OpenCV's FFmpeg backend may attempt RTSP over UDP during this probe regardless of the `-rtsp_transport tcp` in your `input_args` or preset. For cameras that do not respond to UDP (common on some Reolink models and others behind firewalls that block UDP), the probe can hang indefinitely and block Frigate from finishing startup, or it can return zeroed-out dimensions that show up as width `0` and height `0` in Camera Probe Info under System Metrics.
|
||||
When `detect.width` and `detect.height` are not set, Frigate probes each camera's detect stream on startup (and when saving the config) to auto-detect its resolution. For RTSP streams Frigate probes with ffprobe and automatically retries over TCP if UDP doesn't respond, with a 5 second timeout per attempt. A camera that cannot be reached over either transport will add up to ~10 seconds to startup before Frigate falls through with default dimensions, which may show up as width `0` and height `0` in Camera Probe Info under System Metrics.
|
||||
|
||||
There are two ways to avoid this:
|
||||
To skip the probe entirely and make startup instant, set `detect.width` and `detect.height` explicitly in your camera config:
|
||||
|
||||
1. Set `detect.width` and `detect.height` explicitly in your camera config. When both are set, Frigate skips the auto-detect probe entirely:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
my_camera:
|
||||
detect:
|
||||
width: 1280
|
||||
height: 720
|
||||
```
|
||||
|
||||
2. Force OpenCV's FFmpeg backend to use TCP for RTSP by setting the environment variable on your Frigate container:
|
||||
|
||||
```
|
||||
OPENCV_FFMPEG_CAPTURE_OPTIONS=rtsp_transport;tcp
|
||||
```
|
||||
|
||||
This is a process-wide setting and applies to all cameras. If you have any cameras that require `preset-rtsp-udp`, use option 1 instead.
|
||||
```yaml
|
||||
cameras:
|
||||
my_camera:
|
||||
detect:
|
||||
width: 1280
|
||||
height: 720
|
||||
```
|
||||
|
||||
6
docs/package-lock.json
generated
6
docs/package-lock.json
generated
@ -10897,9 +10897,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/express/node_modules/path-to-regexp": {
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
|
||||
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
|
||||
"version": "0.1.13",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz",
|
||||
"integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/express/node_modules/range-parser": {
|
||||
|
||||
@ -310,6 +310,10 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self._handle_custom_classification_update(topic, payload)
|
||||
return
|
||||
|
||||
if topic == "config/genai":
|
||||
self.config.genai = payload
|
||||
self.genai_manager.update_config(self.config)
|
||||
|
||||
# Broadcast to all processors — each decides if the topic is relevant
|
||||
for processor in self.realtime_processors:
|
||||
processor.update_config(topic, payload)
|
||||
|
||||
@ -113,6 +113,15 @@ class OllamaClient(GenAIClient):
|
||||
schema = response_format.get("json_schema", {}).get("schema")
|
||||
if schema:
|
||||
ollama_options["format"] = self._clean_schema_for_ollama(schema)
|
||||
logger.debug(
|
||||
"Ollama generate request: model=%s, prompt_len=%s, image_count=%s, "
|
||||
"has_format=%s, options=%s",
|
||||
self.genai_config.model,
|
||||
len(prompt),
|
||||
len(images) if images else 0,
|
||||
"format" in ollama_options,
|
||||
{k: v for k, v in ollama_options.items() if k != "format"},
|
||||
)
|
||||
result = self.provider.generate(
|
||||
self.genai_config.model,
|
||||
prompt,
|
||||
@ -120,9 +129,24 @@ class OllamaClient(GenAIClient):
|
||||
**ollama_options,
|
||||
)
|
||||
logger.debug(
|
||||
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
|
||||
"Ollama generate response: done=%s, done_reason=%s, eval_count=%s, "
|
||||
"prompt_eval_count=%s, response_len=%s",
|
||||
result.get("done"),
|
||||
result.get("done_reason"),
|
||||
result.get("eval_count"),
|
||||
result.get("prompt_eval_count"),
|
||||
len(result.get("response", "") or ""),
|
||||
)
|
||||
return str(result["response"]).strip()
|
||||
response_text = str(result["response"]).strip()
|
||||
if not response_text:
|
||||
logger.warning(
|
||||
"Ollama returned a blank response for model %s (done_reason=%s, "
|
||||
"eval_count=%s). Check model output, ensure thinking is disabled.",
|
||||
self.genai_config.model,
|
||||
result.get("done_reason"),
|
||||
result.get("eval_count"),
|
||||
)
|
||||
return response_text
|
||||
except (
|
||||
TimeoutException,
|
||||
ResponseError,
|
||||
|
||||
@ -80,7 +80,23 @@ class OpenAIClient(GenAIClient):
|
||||
and hasattr(result, "choices")
|
||||
and len(result.choices) > 0
|
||||
):
|
||||
return str(result.choices[0].message.content.strip())
|
||||
message = result.choices[0].message
|
||||
content = message.content
|
||||
|
||||
if not content:
|
||||
# When reasoning is enabled for some OpenAI backends the actual response
|
||||
# is incorrectly placed in reasoning_content instead of content.
|
||||
# This is buggy/incorrect behavior — reasoning should not be
|
||||
# enabled for these models.
|
||||
reasoning_content = getattr(message, "reasoning_content", None)
|
||||
if reasoning_content:
|
||||
logger.warning(
|
||||
"Response content was empty but reasoning_content was provided; "
|
||||
"reasoning appears to be enabled and should be disabled for this model."
|
||||
)
|
||||
content = reasoning_content
|
||||
|
||||
return str(content.strip()) if content else None
|
||||
return None
|
||||
except (TimeoutException, Exception) as e:
|
||||
logger.warning("OpenAI returned an error: %s", str(e))
|
||||
|
||||
@ -24,8 +24,12 @@ from frigate.log import redirect_output_to_logger, suppress_stderr_during
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.file import get_event_thumbnail_bytes, load_event_snapshot_image
|
||||
from frigate.util.image import (
|
||||
calculate_region,
|
||||
get_image_from_recording,
|
||||
relative_box_to_absolute,
|
||||
)
|
||||
from frigate.util.process import FrigateProcess
|
||||
|
||||
BATCH_SIZE = 16
|
||||
@ -713,7 +717,7 @@ def collect_object_classification_examples(
|
||||
This function:
|
||||
1. Queries events for the specified label
|
||||
2. Selects 100 balanced events across different cameras and times
|
||||
3. Retrieves thumbnails for selected events (with 33% center crop applied)
|
||||
3. Crops each event's clean snapshot around the object bounding box
|
||||
4. Selects 24 most visually distinct thumbnails
|
||||
5. Saves to dataset directory
|
||||
|
||||
@ -832,66 +836,106 @@ def _select_balanced_events(
|
||||
|
||||
def _extract_event_thumbnails(events: list[Event], output_dir: str) -> list[str]:
|
||||
"""
|
||||
Extract thumbnails from events and save to disk.
|
||||
Extract a training image for each event.
|
||||
|
||||
Preferred path: load the full-frame clean snapshot and crop around the
|
||||
stored bounding box with the same calculate_region(..., max(w, h), 1.0)
|
||||
call the live ObjectClassificationProcessor uses, so wizard examples
|
||||
are framed like inference-time inputs.
|
||||
|
||||
Fallback: if no clean snapshot exists (snapshots disabled, or only a
|
||||
legacy annotated JPG is on disk), center-crop the stored thumbnail
|
||||
using a step ladder sized from the box/region area ratio.
|
||||
|
||||
Args:
|
||||
events: List of Event objects
|
||||
output_dir: Directory to save thumbnails
|
||||
output_dir: Directory to save crops
|
||||
|
||||
Returns:
|
||||
List of paths to successfully extracted thumbnail images
|
||||
List of paths to successfully extracted images
|
||||
"""
|
||||
thumbnail_paths = []
|
||||
image_paths = []
|
||||
|
||||
for idx, event in enumerate(events):
|
||||
try:
|
||||
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||
img = _load_event_classification_crop(event)
|
||||
if img is None:
|
||||
continue
|
||||
|
||||
if thumbnail_bytes:
|
||||
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
|
||||
if img is not None:
|
||||
height, width = img.shape[:2]
|
||||
|
||||
crop_size = 1.0
|
||||
if event.data and "box" in event.data and "region" in event.data:
|
||||
box = event.data["box"]
|
||||
region = event.data["region"]
|
||||
|
||||
if len(box) == 4 and len(region) == 4:
|
||||
box_w, box_h = box[2], box[3]
|
||||
region_w, region_h = region[2], region[3]
|
||||
|
||||
box_area = (box_w * box_h) / (region_w * region_h)
|
||||
|
||||
if box_area < 0.05:
|
||||
crop_size = 0.4
|
||||
elif box_area < 0.10:
|
||||
crop_size = 0.5
|
||||
elif box_area < 0.20:
|
||||
crop_size = 0.65
|
||||
elif box_area < 0.35:
|
||||
crop_size = 0.80
|
||||
else:
|
||||
crop_size = 0.95
|
||||
|
||||
crop_width = int(width * crop_size)
|
||||
crop_height = int(height * crop_size)
|
||||
|
||||
x1 = (width - crop_width) // 2
|
||||
y1 = (height - crop_height) // 2
|
||||
x2 = x1 + crop_width
|
||||
y2 = y1 + crop_height
|
||||
|
||||
cropped = img[y1:y2, x1:x2]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
thumbnail_paths.append(output_path)
|
||||
resized = cv2.resize(img, (224, 224))
|
||||
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
image_paths.append(output_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to extract thumbnail for event {event.id}: {e}")
|
||||
logger.debug(f"Failed to extract image for event {event.id}: {e}")
|
||||
continue
|
||||
|
||||
return thumbnail_paths
|
||||
return image_paths
|
||||
|
||||
|
||||
def _load_event_classification_crop(event: Event) -> np.ndarray | None:
|
||||
"""Prefer a snapshot-based object crop; fall back to a center-cropped thumbnail."""
|
||||
if event.data and "box" in event.data:
|
||||
snapshot, _ = load_event_snapshot_image(event, clean_only=True)
|
||||
if snapshot is not None:
|
||||
abs_box = relative_box_to_absolute(snapshot.shape, event.data["box"])
|
||||
if abs_box is not None:
|
||||
xmin, ymin, xmax, ymax = abs_box
|
||||
box_w = xmax - xmin
|
||||
box_h = ymax - ymin
|
||||
if box_w > 0 and box_h > 0:
|
||||
x1, y1, x2, y2 = calculate_region(
|
||||
snapshot.shape,
|
||||
xmin,
|
||||
ymin,
|
||||
xmax,
|
||||
ymax,
|
||||
max(box_w, box_h),
|
||||
1.0,
|
||||
)
|
||||
cropped = snapshot[y1:y2, x1:x2]
|
||||
if cropped.size > 0:
|
||||
return cropped
|
||||
|
||||
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||
if not thumbnail_bytes:
|
||||
return None
|
||||
|
||||
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
if img is None or img.size == 0:
|
||||
return None
|
||||
|
||||
height, width = img.shape[:2]
|
||||
crop_size = 1.0
|
||||
|
||||
if event.data and "box" in event.data and "region" in event.data:
|
||||
box = event.data["box"]
|
||||
region = event.data["region"]
|
||||
|
||||
if len(box) == 4 and len(region) == 4:
|
||||
box_w, box_h = box[2], box[3]
|
||||
region_w, region_h = region[2], region[3]
|
||||
box_area = (box_w * box_h) / (region_w * region_h)
|
||||
|
||||
if box_area < 0.05:
|
||||
crop_size = 0.4
|
||||
elif box_area < 0.10:
|
||||
crop_size = 0.5
|
||||
elif box_area < 0.20:
|
||||
crop_size = 0.65
|
||||
elif box_area < 0.35:
|
||||
crop_size = 0.80
|
||||
else:
|
||||
crop_size = 0.95
|
||||
|
||||
crop_width = int(width * crop_size)
|
||||
crop_height = int(height * crop_size)
|
||||
x1 = (width - crop_width) // 2
|
||||
y1 = (height - crop_height) // 2
|
||||
cropped = img[y1 : y1 + crop_height, x1 : x1 + crop_width]
|
||||
if cropped.size == 0:
|
||||
return None
|
||||
|
||||
return cropped
|
||||
|
||||
@ -711,23 +711,44 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
|
||||
else:
|
||||
format_entries = None
|
||||
|
||||
ffprobe_cmd = [
|
||||
ffmpeg.ffprobe_path,
|
||||
"-timeout",
|
||||
"1000000",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_entries",
|
||||
f"stream={stream_entries}",
|
||||
]
|
||||
def run(rtsp_transport: Optional[str] = None) -> sp.CompletedProcess:
|
||||
cmd = [ffmpeg.ffprobe_path]
|
||||
if rtsp_transport:
|
||||
cmd += ["-rtsp_transport", rtsp_transport]
|
||||
cmd += [
|
||||
"-timeout",
|
||||
"1000000",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_entries",
|
||||
f"stream={stream_entries}",
|
||||
]
|
||||
if detailed and format_entries:
|
||||
cmd.extend(["-show_entries", f"format={format_entries}"])
|
||||
cmd.extend(["-loglevel", "error", clean_path])
|
||||
try:
|
||||
return sp.run(cmd, capture_output=True, timeout=6)
|
||||
except sp.TimeoutExpired as e:
|
||||
logger.info(
|
||||
"ffprobe timed out while probing %s (transport=%s)",
|
||||
clean_camera_user_pass(path),
|
||||
rtsp_transport or "default",
|
||||
)
|
||||
return sp.CompletedProcess(
|
||||
args=cmd,
|
||||
returncode=1,
|
||||
stdout=e.stdout or b"",
|
||||
stderr=(e.stderr or b"") + b"\nffprobe timed out",
|
||||
)
|
||||
|
||||
# Add format entries for detailed mode
|
||||
if detailed and format_entries:
|
||||
ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
|
||||
result = run()
|
||||
|
||||
ffprobe_cmd.extend(["-loglevel", "error", clean_path])
|
||||
# For RTSP: retry with explicit TCP transport if the first attempt failed
|
||||
# (default UDP may be blocked)
|
||||
if result.returncode != 0 and clean_path.startswith("rtsp://"):
|
||||
result = run(rtsp_transport="tcp")
|
||||
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
return result
|
||||
|
||||
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
@ -807,10 +828,15 @@ async def get_video_properties(
|
||||
) -> dict[str, Any]:
|
||||
async def probe_with_ffprobe(
|
||||
url: str,
|
||||
rtsp_transport: Optional[str] = None,
|
||||
) -> tuple[bool, int, int, Optional[str], float]:
|
||||
"""Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
|
||||
cmd = [
|
||||
ffmpeg.ffprobe_path,
|
||||
cmd = [ffmpeg.ffprobe_path]
|
||||
if rtsp_transport:
|
||||
cmd += ["-rtsp_transport", rtsp_transport]
|
||||
cmd += [
|
||||
"-rw_timeout",
|
||||
"5000000",
|
||||
"-v",
|
||||
"quiet",
|
||||
"-print_format",
|
||||
@ -819,11 +845,23 @@ async def get_video_properties(
|
||||
"-show_streams",
|
||||
url,
|
||||
]
|
||||
proc = None
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
stdout, _ = await proc.communicate()
|
||||
try:
|
||||
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=6)
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(
|
||||
"ffprobe timed out while probing %s (transport=%s)",
|
||||
clean_camera_user_pass(url),
|
||||
rtsp_transport or "default",
|
||||
)
|
||||
proc.kill()
|
||||
await proc.wait()
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
if proc.returncode != 0:
|
||||
return False, 0, 0, None, -1
|
||||
|
||||
@ -872,12 +910,26 @@ async def get_video_properties(
|
||||
cap.release()
|
||||
return valid, width, height, fourcc, duration
|
||||
|
||||
# try cv2 first
|
||||
has_video, width, height, fourcc, duration = probe_with_cv2(url)
|
||||
is_rtsp = url.startswith("rtsp://")
|
||||
|
||||
# fallback to ffprobe if needed
|
||||
if not has_video or (get_duration and duration < 0):
|
||||
if is_rtsp:
|
||||
# skip cv2 for RTSP: its FFmpeg backend has a hardcoded ~30s internal
|
||||
# timeout that cannot be shortened per-call, and ffprobe bounded by
|
||||
# -rw_timeout handles RTSP probing reliably
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
|
||||
else:
|
||||
# try cv2 first for local files, HTTP, RTMP
|
||||
has_video, width, height, fourcc, duration = probe_with_cv2(url)
|
||||
|
||||
# fallback to ffprobe if needed
|
||||
if not has_video or (get_duration and duration < 0):
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
|
||||
|
||||
# last resort for RTSP: try TCP transport, since default UDP may be blocked
|
||||
if (not has_video or (get_duration and duration < 0)) and is_rtsp:
|
||||
has_video, width, height, fourcc, duration = await probe_with_ffprobe(
|
||||
url, rtsp_transport="tcp"
|
||||
)
|
||||
|
||||
result: dict[str, Any] = {"has_valid_video": has_video}
|
||||
if has_video:
|
||||
|
||||
14
web/package-lock.json
generated
14
web/package-lock.json
generated
@ -54,7 +54,7 @@
|
||||
"immer": "^10.1.1",
|
||||
"js-yaml": "^4.1.1",
|
||||
"konva": "^10.2.3",
|
||||
"lodash": "^4.17.23",
|
||||
"lodash": "^4.18.1",
|
||||
"lucide-react": "^0.577.0",
|
||||
"monaco-yaml": "^5.4.1",
|
||||
"next-themes": "^0.4.6",
|
||||
@ -9636,15 +9636,15 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lodash": {
|
||||
"version": "4.17.23",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
||||
"version": "4.18.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
|
||||
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash-es": {
|
||||
"version": "4.17.23",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
||||
"version": "4.18.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.18.1.tgz",
|
||||
"integrity": "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.merge": {
|
||||
|
||||
@ -68,7 +68,7 @@
|
||||
"immer": "^10.1.1",
|
||||
"js-yaml": "^4.1.1",
|
||||
"konva": "^10.2.3",
|
||||
"lodash": "^4.17.23",
|
||||
"lodash": "^4.18.1",
|
||||
"lucide-react": "^0.577.0",
|
||||
"monaco-yaml": "^5.4.1",
|
||||
"next-themes": "^0.4.6",
|
||||
|
||||
@ -415,6 +415,7 @@
|
||||
"audioCodecGood": "Audio codec is {{codec}}.",
|
||||
"resolutionHigh": "A resolution of {{resolution}} may cause increased resource usage.",
|
||||
"resolutionLow": "A resolution of {{resolution}} may be too low for reliable detection of small objects.",
|
||||
"resolutionUnknown": "The resolution of this stream could not be probed. You should manually set the detect resolution in Settings or your config.",
|
||||
"noAudioWarning": "No audio detected for this stream, recordings will not have audio.",
|
||||
"audioCodecRecordError": "The AAC audio codec is required to support audio in recordings.",
|
||||
"audioCodecRequired": "An audio stream is required to support audio detection.",
|
||||
|
||||
@ -17,6 +17,9 @@ import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||
import { Skeleton } from "../ui/skeleton";
|
||||
import { Button } from "../ui/button";
|
||||
import { FaCircleCheck } from "react-icons/fa6";
|
||||
import { FaExclamationTriangle } from "react-icons/fa";
|
||||
import { MdOutlinePersonSearch } from "react-icons/md";
|
||||
import { ThreatLevel } from "@/types/review";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
@ -127,6 +130,11 @@ export function AnimatedEventCard({
|
||||
true,
|
||||
);
|
||||
|
||||
const threatLevel = useMemo<ThreatLevel>(
|
||||
() => (event.data.metadata?.potential_threat_level ?? 0) as ThreatLevel,
|
||||
[event],
|
||||
);
|
||||
|
||||
const aspectRatio = useMemo(() => {
|
||||
if (
|
||||
!config ||
|
||||
@ -152,7 +160,15 @@ export function AnimatedEventCard({
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
className="pointer-events-none absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 transition-opacity group-hover:pointer-events-auto group-hover:opacity-100"
|
||||
className={cn(
|
||||
"absolute left-2 top-1 z-40 transition-opacity",
|
||||
threatLevel === ThreatLevel.SECURITY_CONCERN &&
|
||||
"pointer-events-auto bg-severity_alert opacity-100 hover:bg-severity_alert",
|
||||
threatLevel === ThreatLevel.NEEDS_REVIEW &&
|
||||
"pointer-events-auto bg-severity_detection opacity-100 hover:bg-severity_detection",
|
||||
threatLevel === ThreatLevel.NORMAL &&
|
||||
"pointer-events-none bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 group-hover:pointer-events-auto group-hover:opacity-100",
|
||||
)}
|
||||
size="xs"
|
||||
aria-label={t("markAsReviewed")}
|
||||
onClick={async () => {
|
||||
@ -160,7 +176,13 @@ export function AnimatedEventCard({
|
||||
updateEvents();
|
||||
}}
|
||||
>
|
||||
<FaCircleCheck className="size-3 text-white" />
|
||||
{threatLevel === ThreatLevel.SECURITY_CONCERN ? (
|
||||
<FaExclamationTriangle className="size-3 text-white" />
|
||||
) : threatLevel === ThreatLevel.NEEDS_REVIEW ? (
|
||||
<MdOutlinePersonSearch className="size-3 text-white" />
|
||||
) : (
|
||||
<FaCircleCheck className="size-3 text-white" />
|
||||
)}
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{t("markAsReviewed")}</TooltipContent>
|
||||
|
||||
@ -218,7 +218,7 @@ export default function CameraReviewClassification({
|
||||
<Label
|
||||
className={cn(
|
||||
"flex flex-row items-center text-base",
|
||||
alertsZonesModified && "text-danger",
|
||||
alertsZonesModified && "text-unsaved",
|
||||
)}
|
||||
>
|
||||
<Trans ns="views/settings">cameraReview.review.alerts</Trans>
|
||||
@ -286,7 +286,7 @@ export default function CameraReviewClassification({
|
||||
<Label
|
||||
className={cn(
|
||||
"flex flex-row items-center text-base",
|
||||
detectionsZonesModified && "text-danger",
|
||||
detectionsZonesModified && "text-unsaved",
|
||||
)}
|
||||
>
|
||||
<Trans ns="views/settings">
|
||||
|
||||
@ -1012,7 +1012,7 @@ export function ConfigSection({
|
||||
>
|
||||
{hasChanges && (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-danger">
|
||||
<span className="text-sm text-unsaved">
|
||||
{t("unsavedChanges", {
|
||||
ns: "views/settings",
|
||||
defaultValue: "You have unsaved changes",
|
||||
@ -1299,7 +1299,7 @@ export function ConfigSection({
|
||||
{hasChanges && (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
|
||||
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
|
||||
>
|
||||
{t("button.modified", {
|
||||
ns: "common",
|
||||
|
||||
@ -154,7 +154,7 @@ export function KnownPlatesField(props: FieldProps) {
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<CardTitle
|
||||
className={cn("text-sm", isModified && "text-danger")}
|
||||
className={cn("text-sm", isModified && "text-unsaved")}
|
||||
>
|
||||
{title}
|
||||
</CardTitle>
|
||||
|
||||
@ -142,7 +142,7 @@ export function ReplaceRulesField(props: FieldProps) {
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<CardTitle
|
||||
className={cn("text-sm", isModified && "text-danger")}
|
||||
className={cn("text-sm", isModified && "text-unsaved")}
|
||||
>
|
||||
{title}
|
||||
</CardTitle>
|
||||
|
||||
@ -497,7 +497,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
|
||||
htmlFor={id}
|
||||
className={cn(
|
||||
"text-sm font-medium",
|
||||
isModified && "text-danger",
|
||||
isModified && "text-unsaved",
|
||||
hasFieldErrors && "text-destructive",
|
||||
)}
|
||||
>
|
||||
@ -516,7 +516,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
|
||||
return (
|
||||
<Label
|
||||
htmlFor={id}
|
||||
className={cn("text-sm font-medium", isModified && "text-danger")}
|
||||
className={cn("text-sm font-medium", isModified && "text-unsaved")}
|
||||
>
|
||||
{finalLabel}
|
||||
{required && <span className="ml-1 text-destructive">*</span>}
|
||||
@ -535,7 +535,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
|
||||
htmlFor={id}
|
||||
className={cn(
|
||||
"text-sm font-medium",
|
||||
isModified && "text-danger",
|
||||
isModified && "text-unsaved",
|
||||
hasFieldErrors && "text-destructive",
|
||||
)}
|
||||
>
|
||||
|
||||
@ -467,7 +467,7 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
|
||||
<CardTitle
|
||||
className={cn(
|
||||
"flex items-center text-sm",
|
||||
hasModifiedDescendants && "text-danger",
|
||||
hasModifiedDescendants && "text-unsaved",
|
||||
)}
|
||||
>
|
||||
{inferredLabel}
|
||||
|
||||
@ -607,23 +607,38 @@ function StreamIssues({
|
||||
}
|
||||
}
|
||||
|
||||
if (stream.roles.includes("detect") && stream.resolution) {
|
||||
const [width, height] = stream.resolution.split("x").map(Number);
|
||||
if (!isNaN(width) && !isNaN(height) && width > 0 && height > 0) {
|
||||
const minDimension = Math.min(width, height);
|
||||
const maxDimension = Math.max(width, height);
|
||||
if (stream.roles.includes("detect") && stream.testResult) {
|
||||
const probedResolution = stream.testResult.resolution;
|
||||
let probedWidth = 0;
|
||||
let probedHeight = 0;
|
||||
if (probedResolution) {
|
||||
const [w, h] = probedResolution.split("x").map(Number);
|
||||
if (!isNaN(w) && !isNaN(h)) {
|
||||
probedWidth = w;
|
||||
probedHeight = h;
|
||||
}
|
||||
}
|
||||
|
||||
if (probedWidth <= 0 || probedHeight <= 0) {
|
||||
result.push({
|
||||
type: "error",
|
||||
message: t("cameraWizard.step4.issues.resolutionUnknown"),
|
||||
});
|
||||
} else {
|
||||
const minDimension = Math.min(probedWidth, probedHeight);
|
||||
const maxDimension = Math.max(probedWidth, probedHeight);
|
||||
if (minDimension > 1080) {
|
||||
result.push({
|
||||
type: "warning",
|
||||
message: t("cameraWizard.step4.issues.resolutionHigh", {
|
||||
resolution: stream.resolution,
|
||||
resolution: probedResolution,
|
||||
}),
|
||||
});
|
||||
} else if (maxDimension < 640) {
|
||||
result.push({
|
||||
type: "error",
|
||||
message: t("cameraWizard.step4.issues.resolutionLow", {
|
||||
resolution: stream.resolution,
|
||||
resolution: probedResolution,
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
@ -1435,7 +1435,7 @@ export default function Settings() {
|
||||
/>
|
||||
)}
|
||||
{showUnsavedDot && (
|
||||
<span className="inline-block size-2 rounded-full bg-danger" />
|
||||
<span className="inline-block size-2 rounded-full bg-unsaved" />
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
@ -1516,7 +1516,7 @@ export default function Settings() {
|
||||
<div className="sticky bottom-0 z-50 mt-2 bg-background p-4">
|
||||
<div className="flex flex-col items-center gap-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-danger">
|
||||
<span className="text-sm text-unsaved">
|
||||
{t("unsavedChanges", {
|
||||
ns: "views/settings",
|
||||
defaultValue: "You have unsaved changes",
|
||||
|
||||
@ -79,11 +79,11 @@ const PROFILE_COLORS: ProfileColor[] = [
|
||||
bgMuted: "bg-green-400/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-amber-400",
|
||||
text: "text-amber-400",
|
||||
dot: "bg-amber-400",
|
||||
border: "border-amber-400",
|
||||
bgMuted: "bg-amber-400/20",
|
||||
bg: "bg-fuchsia-500",
|
||||
text: "text-fuchsia-500",
|
||||
dot: "bg-fuchsia-500",
|
||||
border: "border-fuchsia-500",
|
||||
bgMuted: "bg-fuchsia-500/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-slate-400",
|
||||
@ -93,11 +93,11 @@ const PROFILE_COLORS: ProfileColor[] = [
|
||||
bgMuted: "bg-slate-400/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-orange-300",
|
||||
text: "text-orange-300",
|
||||
dot: "bg-orange-300",
|
||||
border: "border-orange-300",
|
||||
bgMuted: "bg-orange-300/20",
|
||||
bg: "bg-stone-500",
|
||||
text: "text-stone-500",
|
||||
dot: "bg-stone-500",
|
||||
border: "border-stone-500",
|
||||
bgMuted: "bg-stone-500/20",
|
||||
},
|
||||
{
|
||||
bg: "bg-blue-300",
|
||||
|
||||
@ -389,7 +389,7 @@ export default function LiveCameraView({
|
||||
return "mse";
|
||||
}, [lowBandwidth, mic, webRTC, isRestreamed]);
|
||||
|
||||
useKeyboardListener(["m"], (key, modifiers) => {
|
||||
useKeyboardListener(["m", "Escape"], (key, modifiers) => {
|
||||
if (!modifiers.down) {
|
||||
return true;
|
||||
}
|
||||
@ -407,6 +407,12 @@ export default function LiveCameraView({
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
case "Escape":
|
||||
if (!fullscreen) {
|
||||
navigate(-1);
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
@ -380,7 +380,9 @@ export default function Go2RtcStreamsSettingsView({
|
||||
>
|
||||
{hasChanges && (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm text-danger">{t("unsavedChanges")}</span>
|
||||
<span className="text-sm text-unsaved">
|
||||
{t("unsavedChanges")}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex w-full items-center gap-2 md:w-auto">
|
||||
|
||||
@ -212,7 +212,7 @@ export function SingleSectionPage({
|
||||
{sectionStatus.hasChanges && (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
|
||||
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
|
||||
>
|
||||
{t("button.modified", {
|
||||
ns: "common",
|
||||
@ -250,7 +250,7 @@ export function SingleSectionPage({
|
||||
{sectionStatus.hasChanges && (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
|
||||
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
|
||||
>
|
||||
{t("button.modified", { ns: "common", defaultValue: "Modified" })}
|
||||
</Badge>
|
||||
|
||||
@ -65,6 +65,7 @@ module.exports = {
|
||||
ring: "hsl(var(--ring))",
|
||||
danger: "#ef4444",
|
||||
success: "#22c55e",
|
||||
unsaved: "#f59e0b",
|
||||
background: "hsl(var(--background))",
|
||||
background_alt: "hsl(var(--background-alt))",
|
||||
foreground: "hsl(var(--foreground))",
|
||||
|
||||
Loading…
Reference in New Issue
Block a user