mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-07 22:05:44 +03:00
Merge branch 'dev' into dev-zones-friendly-name
This commit is contained in:
commit
90b7086b1e
@ -5,21 +5,27 @@ set -euxo pipefail
|
||||
SQLITE3_VERSION="3.46.1"
|
||||
PYSQLITE3_VERSION="0.5.3"
|
||||
|
||||
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
|
||||
if ! dpkg -l | grep -q libsqlite3-dev; then
|
||||
echo "Installing libsqlite3-dev for compilation..."
|
||||
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
|
||||
# Fetch the pre-built sqlite amalgamation instead of building from source
|
||||
if [[ ! -d "sqlite" ]]; then
|
||||
mkdir sqlite
|
||||
cd sqlite
|
||||
|
||||
|
||||
# Download the pre-built amalgamation from sqlite.org
|
||||
# For SQLite 3.46.1, the amalgamation version is 3460100
|
||||
SQLITE_AMALGAMATION_VERSION="3460100"
|
||||
|
||||
|
||||
wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip
|
||||
unzip sqlite-amalgamation.zip
|
||||
mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* .
|
||||
rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}
|
||||
rm sqlite-amalgamation.zip
|
||||
|
||||
|
||||
cd ../
|
||||
fi
|
||||
|
||||
|
||||
@ -2,9 +2,9 @@
|
||||
set -e
|
||||
|
||||
# Download the MxAccl for Frigate github release
|
||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
|
||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
|
||||
unzip /tmp/mxaccl.zip -d /tmp
|
||||
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
|
||||
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
|
||||
rm /tmp/mxaccl.zip
|
||||
|
||||
# Install Python dependencies
|
||||
|
||||
@ -56,7 +56,7 @@ pywebpush == 2.0.*
|
||||
# alpr
|
||||
pyclipper == 1.3.*
|
||||
shapely == 2.0.*
|
||||
Levenshtein==0.26.*
|
||||
rapidfuzz==3.12.*
|
||||
# HailoRT Wheels
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
|
||||
@ -24,10 +24,13 @@ echo "Adding MemryX GPG key and repository..."
|
||||
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
||||
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
||||
|
||||
# Update and install memx-drivers
|
||||
echo "Installing memx-drivers..."
|
||||
# Update and install specific SDK 2.1 packages
|
||||
echo "Installing MemryX SDK 2.1 packages..."
|
||||
sudo apt update
|
||||
sudo apt install -y memx-drivers
|
||||
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
|
||||
|
||||
# Hold packages to prevent automatic upgrades
|
||||
sudo apt-mark hold memx-drivers memx-accl mxa-manager
|
||||
|
||||
# ARM-specific board setup
|
||||
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
||||
@ -37,11 +40,5 @@ fi
|
||||
|
||||
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
||||
|
||||
# Install other runtime packages
|
||||
packages=("memx-accl" "mxa-manager")
|
||||
for pkg in "${packages[@]}"; do
|
||||
echo "Installing $pkg..."
|
||||
sudo apt install -y "$pkg"
|
||||
done
|
||||
echo "MemryX SDK 2.1 installation complete!"
|
||||
|
||||
echo "MemryX installation complete!"
|
||||
|
||||
@ -112,7 +112,7 @@ RUN apt-get update \
|
||||
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
||||
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
|
||||
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
|
||||
|
||||
FROM wget AS jetson-ffmpeg
|
||||
ARG DEBIAN_FRONTEND
|
||||
@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
||||
pip3 uninstall -y onnxruntime \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl \
|
||||
&& pip3 install -U /deps/trt-model-wheels/*.whl \
|
||||
&& ldconfig
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
|
||||
@ -1 +1,2 @@
|
||||
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
||||
numpy == 1.26.*; platform_machine == 'aarch64'
|
||||
|
||||
@ -37,7 +37,6 @@ from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
flatten_config_data,
|
||||
get_tz_modifiers,
|
||||
process_config_query_string,
|
||||
update_yaml_file_bulk,
|
||||
)
|
||||
@ -48,6 +47,7 @@ from frigate.util.services import (
|
||||
restart_frigate,
|
||||
vainfo_hwaccel,
|
||||
)
|
||||
from frigate.util.time import get_tz_modifiers
|
||||
from frigate.version import VERSION
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -403,12 +403,13 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
# Handle nested config updates (e.g., config/classification/custom/{name})
|
||||
# Generic handling for global config updates
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
if settings:
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
# Publish None for removal, actual config for add/update
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
|
||||
@ -31,14 +31,14 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera import DetectConfig
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
from frigate.util.classification import (
|
||||
collect_object_classification_examples,
|
||||
collect_state_classification_examples,
|
||||
)
|
||||
from frigate.util.path import get_event_snapshot
|
||||
from frigate.util.file import get_event_snapshot
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -828,9 +828,13 @@ def delete_classification_model(request: Request, name: str):
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Delete the classification model's data directory
|
||||
model_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
||||
# Delete the classification model's data directory in clips
|
||||
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
||||
if os.path.exists(data_dir):
|
||||
shutil.rmtree(data_dir)
|
||||
|
||||
# Delete the classification model's files in model_cache
|
||||
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
|
||||
if os.path.exists(model_dir):
|
||||
shutil.rmtree(model_dir)
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
@ -57,8 +58,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||
from frigate.track.object_processing import TrackedObject
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.time import get_dst_transitions, get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -813,7 +814,6 @@ def events_summary(
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
tz_name = params.timezone
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
|
||||
has_clip = params.has_clip
|
||||
has_snapshot = params.has_snapshot
|
||||
|
||||
@ -828,33 +828,91 @@ def events_summary(
|
||||
if len(clauses) == 0:
|
||||
clauses.append((True))
|
||||
|
||||
groups = (
|
||||
time_range_query = (
|
||||
Event.select(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||
),
|
||||
).alias("day"),
|
||||
Event.zones,
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
fn.MIN(Event.start_time).alias("min_time"),
|
||||
fn.MAX(Event.start_time).alias("max_time"),
|
||||
)
|
||||
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
|
||||
.group_by(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
|
||||
Event.zones,
|
||||
)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
return JSONResponse(content=[e for e in groups.dicts()])
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=[])
|
||||
|
||||
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
|
||||
|
||||
grouped: dict[tuple, dict] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_groups = (
|
||||
Event.select(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
Event.zones,
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(
|
||||
reduce(operator.and_, clauses)
|
||||
& (Event.camera << allowed_cameras)
|
||||
& (Event.start_time >= period_start)
|
||||
& (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
(Event.start_time + period_offset).cast("int") / (3600 * 24),
|
||||
Event.zones,
|
||||
)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_groups:
|
||||
key = (
|
||||
g.camera,
|
||||
g.label,
|
||||
g.sub_label,
|
||||
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
|
||||
g.day,
|
||||
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
|
||||
)
|
||||
|
||||
if key in grouped:
|
||||
grouped[key]["count"] += int(g.count or 0)
|
||||
else:
|
||||
grouped[key] = {
|
||||
"camera": g.camera,
|
||||
"label": g.label,
|
||||
"sub_label": g.sub_label,
|
||||
"data": g.data,
|
||||
"day": g.day,
|
||||
"zones": g.zones,
|
||||
"count": int(g.count or 0),
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(grouped.values()))
|
||||
|
||||
|
||||
@router.get(
|
||||
|
||||
@ -34,7 +34,7 @@ from frigate.record.export import (
|
||||
PlaybackSourceEnum,
|
||||
RecordingExporter,
|
||||
)
|
||||
from frigate.util.builtin import is_current_hour
|
||||
from frigate.util.time import is_current_hour
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -44,9 +44,9 @@ from frigate.const import (
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -424,7 +424,6 @@ def all_recordings_summary(
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
@ -432,41 +431,70 @@ def all_recordings_summary(
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
cameras = ",".join(filtered)
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
camera_list = allowed_cameras
|
||||
|
||||
query = (
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time + seconds_offset,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time + seconds_offset,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
if params.cameras != "all":
|
||||
query = query.where(Recordings.camera << cameras.split(","))
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
recording_days = query.namedtuples()
|
||||
days = {day.day: True for day in recording_days}
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=days)
|
||||
|
||||
@ -476,61 +504,103 @@ def all_recordings_summary(
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
|
||||
recording_groups = (
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
days = {}
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day not in days:
|
||||
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
|
||||
else:
|
||||
days[day]["events"] += events_count
|
||||
days[day]["hours"].append(hour_data)
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -197,7 +197,6 @@ async def review_summary(
|
||||
|
||||
user_id = current_user["username"]
|
||||
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
||||
|
||||
cameras = params.cameras
|
||||
@ -329,89 +328,135 @@ async def review_summary(
|
||||
)
|
||||
clauses.append(reduce(operator.or_, label_clauses))
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
last_month_query = (
|
||||
# Find the time range of available data
|
||||
time_range_query = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
ReviewSegment.start_time,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_detection"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_detection"),
|
||||
)
|
||||
.left_outer_join(
|
||||
UserReviewStatus,
|
||||
on=(
|
||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
fn.MIN(ReviewSegment.start_time).alias("min_time"),
|
||||
fn.MAX(ReviewSegment.start_time).alias("max_time"),
|
||||
)
|
||||
.where(reduce(operator.and_, clauses) if clauses else True)
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
data = {
|
||||
"last24Hours": last_24_query,
|
||||
}
|
||||
|
||||
for e in last_month_query.dicts().iterator():
|
||||
data[e["day"]] = e
|
||||
# If no data, return early
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=data)
|
||||
|
||||
# Get DST transition periods
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
|
||||
# Query each DST period separately with the correct offset
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
# Calculate hour/minute modifiers for this period
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
# Build clauses including time range for this period
|
||||
period_clauses = clauses.copy()
|
||||
period_clauses.append(
|
||||
(ReviewSegment.start_time >= period_start)
|
||||
& (ReviewSegment.start_time <= period_end)
|
||||
)
|
||||
|
||||
period_query = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
ReviewSegment.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_detection"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_detection"),
|
||||
)
|
||||
.left_outer_join(
|
||||
UserReviewStatus,
|
||||
on=(
|
||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
)
|
||||
.where(reduce(operator.and_, period_clauses))
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
)
|
||||
|
||||
# Merge results from this period
|
||||
for e in period_query.dicts().iterator():
|
||||
day_key = e["day"]
|
||||
if day_key in data:
|
||||
# Merge counts if day already exists (edge case at DST boundary)
|
||||
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
|
||||
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
|
||||
data[day_key]["total_alert"] += e["total_alert"] or 0
|
||||
data[day_key]["total_detection"] += e["total_detection"] or 0
|
||||
else:
|
||||
data[day_key] = e
|
||||
|
||||
return JSONResponse(content=data)
|
||||
|
||||
|
||||
@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from Levenshtein import distance, jaro_winkler
|
||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from rapidfuzz.distance import JaroWinkler, Levenshtein
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
@ -1123,7 +1123,9 @@ class LicensePlateProcessingMixin:
|
||||
for i, plate in enumerate(plates):
|
||||
merged = False
|
||||
for j, cluster in enumerate(clusters):
|
||||
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
|
||||
sims = [
|
||||
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
|
||||
]
|
||||
if len(sims) > 0:
|
||||
avg_sim = sum(sims) / len(sims)
|
||||
if avg_sim >= self.cluster_threshold:
|
||||
@ -1500,7 +1502,7 @@ class LicensePlateProcessingMixin:
|
||||
and current_time - data["last_seen"]
|
||||
<= self.config.cameras[camera].lpr.expire_time
|
||||
):
|
||||
similarity = jaro_winkler(data["plate"], top_plate)
|
||||
similarity = JaroWinkler.similarity(data["plate"], top_plate)
|
||||
if similarity >= self.similarity_threshold:
|
||||
plate_id = existing_id
|
||||
logger.debug(
|
||||
@ -1580,7 +1582,8 @@ class LicensePlateProcessingMixin:
|
||||
for label, plates_list in self.lpr_config.known_plates.items()
|
||||
if any(
|
||||
re.match(f"^{plate}$", rep_plate)
|
||||
or distance(plate, rep_plate) <= self.lpr_config.match_distance
|
||||
or Levenshtein.distance(plate, rep_plate)
|
||||
<= self.lpr_config.match_distance
|
||||
for plate in plates_list
|
||||
)
|
||||
),
|
||||
|
||||
@ -20,8 +20,8 @@ from frigate.genai import GenAIClient
|
||||
from frigate.models import Event
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from frigate.embeddings import Embeddings
|
||||
|
||||
@ -22,7 +22,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.embeddings.util import ZScoreNormalization
|
||||
from frigate.models import Event, Trigger
|
||||
from frigate.util.builtin import cosine_distance
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
|
||||
from ..post.api import PostProcessorApi
|
||||
from ..types import DataProcessorMetrics
|
||||
|
||||
@ -466,6 +466,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
max_files=200,
|
||||
)
|
||||
|
||||
if score < self.model_config.threshold:
|
||||
@ -529,6 +530,7 @@ def write_classification_attempt(
|
||||
timestamp: float,
|
||||
label: str,
|
||||
score: float,
|
||||
max_files: int = 100,
|
||||
) -> None:
|
||||
if "-" in label:
|
||||
label = label.replace("-", "_")
|
||||
@ -544,5 +546,5 @@ def write_classification_attempt(
|
||||
)
|
||||
|
||||
# delete oldest face image if maximum is reached
|
||||
if len(files) > 100:
|
||||
if len(files) > max_files:
|
||||
os.unlink(os.path.join(folder, files[-1]))
|
||||
|
||||
@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
camera = obj_data["camera"]
|
||||
|
||||
if not self.config.cameras[camera].face_recognition.enabled:
|
||||
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
|
||||
return
|
||||
|
||||
start = datetime.datetime.now().timestamp()
|
||||
@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
person_box = obj_data.get("box")
|
||||
|
||||
if not person_box:
|
||||
logger.debug(f"No person box available for {id}")
|
||||
return
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
try:
|
||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to convert face frame color for {id}: {e}")
|
||||
return
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
# no faces detected in this frame
|
||||
if not face:
|
||||
logger.debug(f"No face attributes found for {id}")
|
||||
return
|
||||
|
||||
face_box = face.get("box")
|
||||
@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
res = self.recognizer.classify(face_frame)
|
||||
|
||||
if not res:
|
||||
logger.debug(f"Face recognizer returned no result for {id}")
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
return
|
||||
|
||||
@ -330,6 +335,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
||||
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
||||
self.recognizer.clear()
|
||||
return {"success": True, "message": "Face classifier cleared."}
|
||||
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
||||
|
||||
@ -17,6 +17,7 @@ from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
ModelTypeEnum,
|
||||
)
|
||||
from frigate.util.file import FileLock
|
||||
from frigate.util.model import post_process_yolo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -177,29 +178,6 @@ class MemryXDetector(DetectionApi):
|
||||
logger.error(f"Failed to initialize MemryX model: {e}")
|
||||
raise
|
||||
|
||||
def _acquire_file_lock(self, lock_path: str, timeout: int = 60, poll: float = 0.2):
|
||||
"""
|
||||
Create an exclusive lock file. Blocks (with polling) until it can acquire,
|
||||
or raises TimeoutError. Uses only stdlib (os.O_EXCL).
|
||||
"""
|
||||
start = time.time()
|
||||
while True:
|
||||
try:
|
||||
fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
||||
os.close(fd)
|
||||
return
|
||||
except FileExistsError:
|
||||
if time.time() - start > timeout:
|
||||
raise TimeoutError(f"Timeout waiting for lock: {lock_path}")
|
||||
time.sleep(poll)
|
||||
|
||||
def _release_file_lock(self, lock_path: str):
|
||||
"""Best-effort removal of the lock file."""
|
||||
try:
|
||||
os.remove(lock_path)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
def load_yolo_constants(self):
|
||||
base = f"{self.cache_dir}/{self.model_folder}"
|
||||
# constants for yolov9 post-processing
|
||||
@ -212,9 +190,9 @@ class MemryXDetector(DetectionApi):
|
||||
os.makedirs(self.cache_dir, exist_ok=True)
|
||||
|
||||
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
|
||||
self._acquire_file_lock(lock_path)
|
||||
lock = FileLock(lock_path, timeout=60)
|
||||
|
||||
try:
|
||||
with lock:
|
||||
# ---------- CASE 1: user provided a custom model path ----------
|
||||
if self.memx_model_path:
|
||||
if not self.memx_model_path.endswith(".zip"):
|
||||
@ -338,9 +316,6 @@ class MemryXDetector(DetectionApi):
|
||||
f"Failed to remove downloaded zip {zip_path}: {e}"
|
||||
)
|
||||
|
||||
finally:
|
||||
self._release_file_lock(lock_path)
|
||||
|
||||
def send_input(self, connection_id, tensor_input: np.ndarray):
|
||||
"""Pre-process (if needed) and send frame to MemryX input queue"""
|
||||
if tensor_input is None:
|
||||
|
||||
@ -29,7 +29,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event, Trigger
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
|
||||
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
||||
from .onnx.jina_v2_embedding import JinaV2Embedding
|
||||
|
||||
@ -62,8 +62,8 @@ from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
|
||||
from frigate.genai import get_genai_client
|
||||
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import SharedMemoryFrameManager
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
from .embeddings import Embeddings
|
||||
|
||||
@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
|
||||
self.realtime_processors.append(
|
||||
FaceRealTimeProcessor(
|
||||
self.config, self.requestor, self.event_metadata_publisher, metrics
|
||||
)
|
||||
)
|
||||
logger.debug("FaceRealTimeProcessor initialized successfully")
|
||||
|
||||
if self.config.classification.bird.enabled:
|
||||
self.realtime_processors.append(
|
||||
@ -283,44 +285,65 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
logger.info("Exiting embeddings maintenance...")
|
||||
|
||||
def _check_classification_config_updates(self) -> None:
|
||||
"""Check for classification config updates and add new processors."""
|
||||
"""Check for classification config updates and add/remove processors."""
|
||||
topic, model_config = self.classification_config_subscriber.check_for_update()
|
||||
|
||||
if topic and model_config:
|
||||
if topic:
|
||||
model_name = topic.split("/")[-1]
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
if model_config is None:
|
||||
self.realtime_processors = [
|
||||
processor
|
||||
for processor in self.realtime_processors
|
||||
if not (
|
||||
isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
)
|
||||
return
|
||||
and processor.model_config.name == model_name
|
||||
)
|
||||
]
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
logger.info(
|
||||
f"Successfully removed classification processor for model: {model_name}"
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.metrics,
|
||||
)
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
)
|
||||
return
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.metrics,
|
||||
)
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
|
||||
def _process_requests(self) -> None:
|
||||
"""Process embeddings requests"""
|
||||
@ -374,7 +397,14 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
source_type, _, camera, frame_name, data = update
|
||||
|
||||
logger.debug(
|
||||
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
|
||||
)
|
||||
|
||||
if not camera or source_type != EventTypeEnum.tracked_object:
|
||||
logger.debug(
|
||||
f"Skipping update - camera: {camera}, source_type: {source_type}"
|
||||
)
|
||||
return
|
||||
|
||||
if self.config.semantic_search.enabled:
|
||||
@ -384,6 +414,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
# no need to process updated objects if no processors are active
|
||||
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
||||
logger.debug(
|
||||
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
|
||||
)
|
||||
return
|
||||
|
||||
# Create our own thumbnail based on the bounding box and the frame time
|
||||
@ -392,6 +425,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
frame_name, camera_config.frame_shape_yuv
|
||||
)
|
||||
except FileNotFoundError:
|
||||
logger.debug(f"Frame {frame_name} not found for camera {camera}")
|
||||
pass
|
||||
|
||||
if yuv_frame is None:
|
||||
@ -400,7 +434,11 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
|
||||
)
|
||||
for processor in self.realtime_processors:
|
||||
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
|
||||
processor.process_frame(data, yuv_frame)
|
||||
|
||||
for processor in self.post_processors:
|
||||
|
||||
@ -12,7 +12,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.util.path import delete_event_snapshot, delete_event_thumbnail
|
||||
from frigate.util.file import delete_event_snapshot, delete_event_thumbnail
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ from multiprocessing import Queue, Value
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
import numpy as np
|
||||
import zmq
|
||||
|
||||
from frigate.comms.object_detector_signaler import (
|
||||
ObjectDetectorPublisher,
|
||||
@ -377,6 +378,15 @@ class RemoteObjectDetector:
|
||||
if self.stop_event.is_set():
|
||||
return detections
|
||||
|
||||
# Drain any stale detection results from the ZMQ buffer before making a new request
|
||||
# This prevents reading detection results from a previous request
|
||||
# NOTE: This should never happen, but can in some rare cases
|
||||
while True:
|
||||
try:
|
||||
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
|
||||
except zmq.Again:
|
||||
break
|
||||
|
||||
# copy input to shared memory
|
||||
self.np_shm[:] = tensor_input[:]
|
||||
self.detection_queue.put(self.name)
|
||||
|
||||
@ -14,7 +14,8 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
||||
from frigate.util.builtin import clear_and_unlink
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
)
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.util.builtin import is_current_hour
|
||||
from frigate.util.time import is_current_hour
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -15,12 +15,9 @@ from collections.abc import Mapping
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Tuple, Union
|
||||
from zoneinfo import ZoneInfoNotFoundError
|
||||
|
||||
import numpy as np
|
||||
import pytz
|
||||
from ruamel.yaml import YAML
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||
|
||||
@ -157,17 +154,6 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
|
||||
return labels
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier, seconds_offset
|
||||
|
||||
|
||||
def to_relative_box(
|
||||
width: int, height: int, box: Tuple[int, int, int, int]
|
||||
) -> Tuple[int | float, int | float, int | float, int | float]:
|
||||
@ -298,34 +284,6 @@ def find_by_key(dictionary, target_key):
|
||||
return None
|
||||
|
||||
|
||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||
"""Returns the datetime of the following day at 2am."""
|
||||
try:
|
||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||
except ZoneInfoNotFoundError:
|
||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||
days=1
|
||||
)
|
||||
logger.warning(
|
||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||
)
|
||||
|
||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||
datetime.timezone.utc
|
||||
)
|
||||
|
||||
|
||||
def is_current_hour(timestamp: int) -> bool:
|
||||
"""Returns if timestamp is in the current UTC hour."""
|
||||
start_of_next_hour = (
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
+ datetime.timedelta(hours=1)
|
||||
).timestamp()
|
||||
return timestamp < start_of_next_hour
|
||||
|
||||
|
||||
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
||||
"""clear file then unlink to avoid space retained by file descriptors."""
|
||||
if not missing_ok and not file.exists():
|
||||
|
||||
@ -20,8 +20,8 @@ from frigate.const import (
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.process import FrigateProcess
|
||||
|
||||
BATCH_SIZE = 16
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Callable, List
|
||||
|
||||
@ -10,40 +9,11 @@ import requests
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.file import FileLock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileLock:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.lock_file = f"{path}.lock"
|
||||
|
||||
# we have not acquired the lock yet so it should not exist
|
||||
if os.path.exists(self.lock_file):
|
||||
try:
|
||||
os.remove(self.lock_file)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def acquire(self):
|
||||
parent_dir = os.path.dirname(self.lock_file)
|
||||
os.makedirs(parent_dir, exist_ok=True)
|
||||
|
||||
while True:
|
||||
try:
|
||||
with open(self.lock_file, "x"):
|
||||
return
|
||||
except FileExistsError:
|
||||
time.sleep(0.1)
|
||||
|
||||
def release(self):
|
||||
try:
|
||||
os.remove(self.lock_file)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
class ModelDownloader:
|
||||
def __init__(
|
||||
self,
|
||||
@ -81,15 +51,13 @@ class ModelDownloader:
|
||||
def _download_models(self):
|
||||
for file_name in self.file_names:
|
||||
path = os.path.join(self.download_path, file_name)
|
||||
lock = FileLock(path)
|
||||
lock_path = f"{path}.lock"
|
||||
lock = FileLock(lock_path, cleanup_stale_on_init=True)
|
||||
|
||||
if not os.path.exists(path):
|
||||
lock.acquire()
|
||||
try:
|
||||
with lock:
|
||||
if not os.path.exists(path):
|
||||
self.download_func(path)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
self.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
|
||||
276
frigate/util/file.py
Normal file
276
frigate/util/file.py
Normal file
@ -0,0 +1,276 @@
|
||||
"""Path and file utilities."""
|
||||
|
||||
import base64
|
||||
import fcntl
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
from numpy import ndarray
|
||||
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.models import Event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
||||
if event.thumbnail:
|
||||
return base64.b64decode(event.thumbnail)
|
||||
else:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
||||
) as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_event_snapshot(event: Event) -> ndarray:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
|
||||
### Deletion
|
||||
|
||||
|
||||
def delete_event_images(event: Event) -> bool:
|
||||
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
||||
|
||||
|
||||
def delete_event_snapshot(event: Event) -> bool:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
try:
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
||||
media_path.unlink(missing_ok=True)
|
||||
# also delete clean.png (legacy) for backward compatibility
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||
media_path.unlink(missing_ok=True)
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def delete_event_thumbnail(event: Event) -> bool:
|
||||
if event.thumbnail:
|
||||
return True
|
||||
else:
|
||||
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
||||
missing_ok=True
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
### File Locking
|
||||
|
||||
|
||||
class FileLock:
|
||||
"""
|
||||
A file-based lock for coordinating access to resources across processes.
|
||||
|
||||
Uses fcntl.flock() for proper POSIX file locking on Linux. Supports timeouts,
|
||||
stale lock detection, and can be used as a context manager.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Using as a context manager (recommended)
|
||||
with FileLock("/path/to/resource.lock", timeout=60):
|
||||
# Critical section
|
||||
do_something()
|
||||
|
||||
# Manual acquisition and release
|
||||
lock = FileLock("/path/to/resource.lock")
|
||||
if lock.acquire(timeout=60):
|
||||
try:
|
||||
do_something()
|
||||
finally:
|
||||
lock.release()
|
||||
```
|
||||
|
||||
Attributes:
|
||||
lock_path: Path to the lock file
|
||||
timeout: Maximum time to wait for lock acquisition (seconds)
|
||||
poll_interval: Time to wait between lock acquisition attempts (seconds)
|
||||
stale_timeout: Time after which a lock is considered stale (seconds)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
lock_path: str | Path,
|
||||
timeout: int = 300,
|
||||
poll_interval: float = 1.0,
|
||||
stale_timeout: int = 600,
|
||||
cleanup_stale_on_init: bool = False,
|
||||
):
|
||||
"""
|
||||
Initialize a FileLock.
|
||||
|
||||
Args:
|
||||
lock_path: Path to the lock file
|
||||
timeout: Maximum time to wait for lock acquisition in seconds (default: 300)
|
||||
poll_interval: Time to wait between lock attempts in seconds (default: 1.0)
|
||||
stale_timeout: Time after which a lock is considered stale in seconds (default: 600)
|
||||
cleanup_stale_on_init: Whether to clean up stale locks on initialization (default: False)
|
||||
"""
|
||||
self.lock_path = Path(lock_path)
|
||||
self.timeout = timeout
|
||||
self.poll_interval = poll_interval
|
||||
self.stale_timeout = stale_timeout
|
||||
self._fd: Optional[int] = None
|
||||
self._acquired = False
|
||||
|
||||
if cleanup_stale_on_init:
|
||||
self._cleanup_stale_lock()
|
||||
|
||||
def _cleanup_stale_lock(self) -> bool:
|
||||
"""
|
||||
Clean up a stale lock file if it exists and is old.
|
||||
|
||||
Returns:
|
||||
True if lock was cleaned up, False otherwise
|
||||
"""
|
||||
try:
|
||||
if self.lock_path.exists():
|
||||
# Check if lock file is older than stale_timeout
|
||||
lock_age = time.time() - self.lock_path.stat().st_mtime
|
||||
if lock_age > self.stale_timeout:
|
||||
logger.warning(
|
||||
f"Removing stale lock file: {self.lock_path} (age: {lock_age:.1f}s)"
|
||||
)
|
||||
self.lock_path.unlink()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up stale lock: {e}")
|
||||
|
||||
return False
|
||||
|
||||
def is_stale(self) -> bool:
|
||||
"""
|
||||
Check if the lock file is stale (older than stale_timeout).
|
||||
|
||||
Returns:
|
||||
True if lock is stale, False otherwise
|
||||
"""
|
||||
try:
|
||||
if self.lock_path.exists():
|
||||
lock_age = time.time() - self.lock_path.stat().st_mtime
|
||||
return lock_age > self.stale_timeout
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def acquire(self, timeout: Optional[int] = None) -> bool:
|
||||
"""
|
||||
Acquire the file lock using fcntl.flock().
|
||||
|
||||
Args:
|
||||
timeout: Maximum time to wait for lock in seconds (uses instance timeout if None)
|
||||
|
||||
Returns:
|
||||
True if lock acquired, False if timeout or error
|
||||
"""
|
||||
if self._acquired:
|
||||
logger.warning(f"Lock already acquired: {self.lock_path}")
|
||||
return True
|
||||
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
|
||||
# Ensure parent directory exists
|
||||
self.lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Clean up stale lock before attempting to acquire
|
||||
self._cleanup_stale_lock()
|
||||
|
||||
try:
|
||||
self._fd = os.open(self.lock_path, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
try:
|
||||
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
self._acquired = True
|
||||
logger.debug(f"Acquired lock: {self.lock_path}")
|
||||
return True
|
||||
except (OSError, IOError):
|
||||
# Lock is held by another process
|
||||
if time.time() - start_time >= timeout:
|
||||
logger.warning(f"Timeout waiting for lock: {self.lock_path}")
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
return False
|
||||
|
||||
time.sleep(self.poll_interval)
|
||||
|
||||
# Timeout reached
|
||||
if self._fd is not None:
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error acquiring lock: {e}")
|
||||
if self._fd is not None:
|
||||
try:
|
||||
os.close(self._fd)
|
||||
except Exception:
|
||||
pass
|
||||
self._fd = None
|
||||
return False
|
||||
|
||||
def release(self) -> None:
|
||||
"""
|
||||
Release the file lock.
|
||||
|
||||
This closes the file descriptor and removes the lock file.
|
||||
"""
|
||||
if not self._acquired:
|
||||
return
|
||||
|
||||
try:
|
||||
# Close file descriptor and release fcntl lock
|
||||
if self._fd is not None:
|
||||
try:
|
||||
fcntl.flock(self._fd, fcntl.LOCK_UN)
|
||||
os.close(self._fd)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing lock file descriptor: {e}")
|
||||
finally:
|
||||
self._fd = None
|
||||
|
||||
# Remove lock file
|
||||
if self.lock_path.exists():
|
||||
self.lock_path.unlink()
|
||||
logger.debug(f"Released lock: {self.lock_path}")
|
||||
|
||||
except FileNotFoundError:
|
||||
# Lock file already removed, that's fine
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"Error releasing lock: {e}")
|
||||
finally:
|
||||
self._acquired = False
|
||||
|
||||
def __enter__(self):
|
||||
"""Context manager entry - acquire the lock."""
|
||||
if not self.acquire():
|
||||
raise TimeoutError(f"Failed to acquire lock: {self.lock_path}")
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Context manager exit - release the lock."""
|
||||
self.release()
|
||||
return False
|
||||
|
||||
def __del__(self):
|
||||
"""Destructor - ensure lock is released."""
|
||||
if self._acquired:
|
||||
self.release()
|
||||
@ -1,62 +0,0 @@
|
||||
"""Path utilities."""
|
||||
|
||||
import base64
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
from numpy import ndarray
|
||||
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.models import Event
|
||||
|
||||
|
||||
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
||||
if event.thumbnail:
|
||||
return base64.b64decode(event.thumbnail)
|
||||
else:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
||||
) as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_event_snapshot(event: Event) -> ndarray:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
|
||||
### Deletion
|
||||
|
||||
|
||||
def delete_event_images(event: Event) -> bool:
|
||||
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
||||
|
||||
|
||||
def delete_event_snapshot(event: Event) -> bool:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
try:
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
||||
media_path.unlink(missing_ok=True)
|
||||
# also delete clean.png (legacy) for backward compatibility
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||
media_path.unlink(missing_ok=True)
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def delete_event_thumbnail(event: Event) -> bool:
|
||||
if event.thumbnail:
|
||||
return True
|
||||
else:
|
||||
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
||||
missing_ok=True
|
||||
)
|
||||
return True
|
||||
@ -1,6 +1,5 @@
|
||||
"""RKNN model conversion utility for Frigate."""
|
||||
|
||||
import fcntl
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
@ -9,6 +8,8 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from frigate.util.file import FileLock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MODEL_TYPE_CONFIGS = {
|
||||
@ -245,112 +246,6 @@ def convert_onnx_to_rknn(
|
||||
logger.warning(f"Failed to remove temporary ONNX file: {e}")
|
||||
|
||||
|
||||
def cleanup_stale_lock(lock_file_path: Path) -> bool:
|
||||
"""
|
||||
Clean up a stale lock file if it exists and is old.
|
||||
|
||||
Args:
|
||||
lock_file_path: Path to the lock file
|
||||
|
||||
Returns:
|
||||
True if lock was cleaned up, False otherwise
|
||||
"""
|
||||
try:
|
||||
if lock_file_path.exists():
|
||||
# Check if lock file is older than 10 minutes (stale)
|
||||
lock_age = time.time() - lock_file_path.stat().st_mtime
|
||||
if lock_age > 600: # 10 minutes
|
||||
logger.warning(
|
||||
f"Removing stale lock file: {lock_file_path} (age: {lock_age:.1f}s)"
|
||||
)
|
||||
lock_file_path.unlink()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up stale lock: {e}")
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def acquire_conversion_lock(lock_file_path: Path, timeout: int = 300) -> bool:
|
||||
"""
|
||||
Acquire a file-based lock for model conversion.
|
||||
|
||||
Args:
|
||||
lock_file_path: Path to the lock file
|
||||
timeout: Maximum time to wait for lock in seconds
|
||||
|
||||
Returns:
|
||||
True if lock acquired, False if timeout or error
|
||||
"""
|
||||
try:
|
||||
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
cleanup_stale_lock(lock_file_path)
|
||||
lock_fd = os.open(lock_file_path, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
# Try to acquire exclusive lock
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
try:
|
||||
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
# Lock acquired successfully
|
||||
logger.debug(f"Acquired conversion lock: {lock_file_path}")
|
||||
return True
|
||||
except (OSError, IOError):
|
||||
# Lock is held by another process, wait and retry
|
||||
if time.time() - start_time >= timeout:
|
||||
logger.warning(
|
||||
f"Timeout waiting for conversion lock: {lock_file_path}"
|
||||
)
|
||||
os.close(lock_fd)
|
||||
return False
|
||||
|
||||
logger.debug("Waiting for conversion lock to be released...")
|
||||
time.sleep(1)
|
||||
|
||||
os.close(lock_fd)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error acquiring conversion lock: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def release_conversion_lock(lock_file_path: Path) -> None:
|
||||
"""
|
||||
Release the conversion lock.
|
||||
|
||||
Args:
|
||||
lock_file_path: Path to the lock file
|
||||
"""
|
||||
try:
|
||||
if lock_file_path.exists():
|
||||
lock_file_path.unlink()
|
||||
logger.debug(f"Released conversion lock: {lock_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error releasing conversion lock: {e}")
|
||||
|
||||
|
||||
def is_lock_stale(lock_file_path: Path, max_age: int = 600) -> bool:
|
||||
"""
|
||||
Check if a lock file is stale (older than max_age seconds).
|
||||
|
||||
Args:
|
||||
lock_file_path: Path to the lock file
|
||||
max_age: Maximum age in seconds before considering lock stale
|
||||
|
||||
Returns:
|
||||
True if lock is stale, False otherwise
|
||||
"""
|
||||
try:
|
||||
if lock_file_path.exists():
|
||||
lock_age = time.time() - lock_file_path.stat().st_mtime
|
||||
return lock_age > max_age
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def wait_for_conversion_completion(
|
||||
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
|
||||
) -> bool:
|
||||
@ -358,6 +253,7 @@ def wait_for_conversion_completion(
|
||||
Wait for another process to complete the conversion.
|
||||
|
||||
Args:
|
||||
model_type: Type of model being converted
|
||||
rknn_path: Path to the expected RKNN model
|
||||
lock_file_path: Path to the lock file to monitor
|
||||
timeout: Maximum time to wait in seconds
|
||||
@ -366,6 +262,8 @@ def wait_for_conversion_completion(
|
||||
True if RKNN model appears, False if timeout
|
||||
"""
|
||||
start_time = time.time()
|
||||
lock = FileLock(lock_file_path, stale_timeout=600)
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
# Check if RKNN model appeared
|
||||
if rknn_path.exists():
|
||||
@ -385,11 +283,14 @@ def wait_for_conversion_completion(
|
||||
return False
|
||||
|
||||
# Check if lock is stale
|
||||
if is_lock_stale(lock_file_path):
|
||||
if lock.is_stale():
|
||||
logger.warning("Lock file is stale, attempting to clean up and retry...")
|
||||
cleanup_stale_lock(lock_file_path)
|
||||
lock._cleanup_stale_lock()
|
||||
# Try to acquire lock again
|
||||
if acquire_conversion_lock(lock_file_path, timeout=60):
|
||||
retry_lock = FileLock(
|
||||
lock_file_path, timeout=60, cleanup_stale_on_init=True
|
||||
)
|
||||
if retry_lock.acquire():
|
||||
try:
|
||||
# Check if RKNN file appeared while waiting
|
||||
if rknn_path.exists():
|
||||
@ -415,7 +316,7 @@ def wait_for_conversion_completion(
|
||||
return False
|
||||
|
||||
finally:
|
||||
release_conversion_lock(lock_file_path)
|
||||
retry_lock.release()
|
||||
|
||||
logger.debug("Waiting for RKNN model to appear...")
|
||||
time.sleep(1)
|
||||
@ -452,8 +353,9 @@ def auto_convert_model(
|
||||
return str(rknn_path)
|
||||
|
||||
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
|
||||
lock = FileLock(lock_file_path, timeout=300, cleanup_stale_on_init=True)
|
||||
|
||||
if acquire_conversion_lock(lock_file_path):
|
||||
if lock.acquire():
|
||||
try:
|
||||
if rknn_path.exists():
|
||||
logger.info(
|
||||
@ -476,7 +378,7 @@ def auto_convert_model(
|
||||
return None
|
||||
|
||||
finally:
|
||||
release_conversion_lock(lock_file_path)
|
||||
lock.release()
|
||||
else:
|
||||
logger.info(
|
||||
f"Another process is converting {model_path}, waiting for completion..."
|
||||
|
||||
100
frigate/util/time.py
Normal file
100
frigate/util/time.py
Normal file
@ -0,0 +1,100 @@
|
||||
"""Time utilities."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
from typing import Tuple
|
||||
from zoneinfo import ZoneInfoNotFoundError
|
||||
|
||||
import pytz
|
||||
from tzlocal import get_localzone
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier, seconds_offset
|
||||
|
||||
|
||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||
"""Returns the datetime of the following day at 2am."""
|
||||
try:
|
||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||
except ZoneInfoNotFoundError:
|
||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||
days=1
|
||||
)
|
||||
logger.warning(
|
||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||
)
|
||||
|
||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||
datetime.timezone.utc
|
||||
)
|
||||
|
||||
|
||||
def is_current_hour(timestamp: int) -> bool:
|
||||
"""Returns if timestamp is in the current UTC hour."""
|
||||
start_of_next_hour = (
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
+ datetime.timedelta(hours=1)
|
||||
).timestamp()
|
||||
return timestamp < start_of_next_hour
|
||||
|
||||
|
||||
def get_dst_transitions(
|
||||
tz_name: str, start_time: float, end_time: float
|
||||
) -> list[tuple[float, float]]:
|
||||
"""
|
||||
Find DST transition points and return time periods with consistent offsets.
|
||||
|
||||
Args:
|
||||
tz_name: Timezone name (e.g., 'America/New_York')
|
||||
start_time: Start timestamp (UTC)
|
||||
end_time: End timestamp (UTC)
|
||||
|
||||
Returns:
|
||||
List of (period_start, period_end, seconds_offset) tuples representing
|
||||
continuous periods with the same UTC offset
|
||||
"""
|
||||
try:
|
||||
tz = pytz.timezone(tz_name)
|
||||
except pytz.UnknownTimeZoneError:
|
||||
# If timezone is invalid, return single period with no offset
|
||||
return [(start_time, end_time, 0)]
|
||||
|
||||
periods = []
|
||||
current = start_time
|
||||
|
||||
# Get initial offset
|
||||
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
||||
local_dt = dt.astimezone(tz)
|
||||
prev_offset = local_dt.utcoffset().total_seconds()
|
||||
period_start = start_time
|
||||
|
||||
# Check each day for offset changes
|
||||
while current <= end_time:
|
||||
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
||||
local_dt = dt.astimezone(tz)
|
||||
current_offset = local_dt.utcoffset().total_seconds()
|
||||
|
||||
if current_offset != prev_offset:
|
||||
# Found a transition - close previous period
|
||||
periods.append((period_start, current, prev_offset))
|
||||
period_start = current
|
||||
prev_offset = current_offset
|
||||
|
||||
current += 86400 # Check daily
|
||||
|
||||
# Add final period
|
||||
periods.append((period_start, end_time, prev_offset))
|
||||
|
||||
return periods
|
||||
@ -34,7 +34,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.track.norfair_tracker import NorfairTracker
|
||||
from frigate.track.tracked_object import TrackedObjectAttribute
|
||||
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
|
||||
from frigate.util.builtin import EventsPerSecond
|
||||
from frigate.util.image import (
|
||||
FrameManager,
|
||||
SharedMemoryFrameManager,
|
||||
@ -53,6 +53,7 @@ from frigate.util.object import (
|
||||
reduce_detections,
|
||||
)
|
||||
from frigate.util.process import FrigateProcess
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
{
|
||||
"documentTitle": "Classification Models",
|
||||
"details": {
|
||||
"scoreInfo": "Score represents the average classification confidence across all detections of this object."
|
||||
},
|
||||
"button": {
|
||||
"deleteClassificationAttempts": "Delete Classification Images",
|
||||
"renameCategory": "Rename Class",
|
||||
@ -7,23 +10,27 @@
|
||||
"deleteImages": "Delete Images",
|
||||
"trainModel": "Train Model",
|
||||
"addClassification": "Add Classification",
|
||||
"deleteModels": "Delete Models"
|
||||
"deleteModels": "Delete Models",
|
||||
"editModel": "Edit Model"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"deletedCategory": "Deleted Class",
|
||||
"deletedImage": "Deleted Images",
|
||||
"deletedModel": "Successfully deleted {{count}} model(s)",
|
||||
"deletedModel_one": "Successfully deleted {{count}} model",
|
||||
"deletedModel_other": "Successfully deleted {{count}} models",
|
||||
"categorizedImage": "Successfully Classified Image",
|
||||
"trainedModel": "Successfully trained model.",
|
||||
"trainingModel": "Successfully started model training."
|
||||
"trainingModel": "Successfully started model training.",
|
||||
"updatedModel": "Successfully updated model configuration"
|
||||
},
|
||||
"error": {
|
||||
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
|
||||
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
|
||||
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
|
||||
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
|
||||
"trainingFailed": "Failed to start model training: {{errorMessage}}"
|
||||
"trainingFailed": "Failed to start model training: {{errorMessage}}",
|
||||
"updateModelFailed": "Failed to update model: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"deleteCategory": {
|
||||
@ -35,6 +42,12 @@
|
||||
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
|
||||
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
|
||||
},
|
||||
"edit": {
|
||||
"title": "Edit Classification Model",
|
||||
"descriptionState": "Edit the classes for this state classification model. Changes will require retraining the model.",
|
||||
"descriptionObject": "Edit the object type and classification type for this object classification model.",
|
||||
"stateClassesInfo": "Note: Changing state classes requires retraining the model with the updated classes."
|
||||
},
|
||||
"deleteDatasetImages": {
|
||||
"title": "Delete Dataset Images",
|
||||
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
|
||||
|
||||
@ -6,7 +6,8 @@
|
||||
},
|
||||
"details": {
|
||||
"timestamp": "Timestamp",
|
||||
"unknown": "Unknown"
|
||||
"unknown": "Unknown",
|
||||
"scoreInfo": "Score is a weighted average of all face scores, weighted by the size of the face in each image."
|
||||
},
|
||||
"documentTitle": "Face Library - Frigate",
|
||||
"uploadFaceImage": {
|
||||
|
||||
@ -271,6 +271,8 @@
|
||||
"disconnectStream": "Disconnect",
|
||||
"estimatedBandwidth": "Estimated Bandwidth",
|
||||
"roles": "Roles",
|
||||
"ffmpegModule": "Use stream compatibility mode",
|
||||
"ffmpegModuleDescription": "If the stream does not load after several attempts, try enabling this. When enabled, Frigate will use the ffmpeg module with go2rtc. This may provide better compatibility with some camera streams.",
|
||||
"none": "None",
|
||||
"error": "Error",
|
||||
"streamValidated": "Stream {{number}} validated successfully",
|
||||
|
||||
@ -7,11 +7,12 @@ import {
|
||||
} from "@/types/classification";
|
||||
import { Event } from "@/types/event";
|
||||
import { forwardRef, useMemo, useRef, useState } from "react";
|
||||
import { isDesktop, isMobile } from "react-device-detect";
|
||||
import { isDesktop, isMobile, isMobileOnly } from "react-device-detect";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import TimeAgo from "../dynamic/TimeAgo";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
||||
import { LuSearch } from "react-icons/lu";
|
||||
import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover";
|
||||
import { LuSearch, LuInfo } from "react-icons/lu";
|
||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import { HiSquare2Stack } from "react-icons/hi2";
|
||||
@ -181,6 +182,7 @@ type GroupedClassificationCardProps = {
|
||||
selectedItems: string[];
|
||||
i18nLibrary: string;
|
||||
objectType: string;
|
||||
noClassificationLabel?: string;
|
||||
onClick: (data: ClassificationItemData | undefined) => void;
|
||||
children?: (data: ClassificationItemData) => React.ReactNode;
|
||||
};
|
||||
@ -190,6 +192,7 @@ export function GroupedClassificationCard({
|
||||
threshold,
|
||||
selectedItems,
|
||||
i18nLibrary,
|
||||
noClassificationLabel = "details.none",
|
||||
onClick,
|
||||
children,
|
||||
}: GroupedClassificationCardProps) {
|
||||
@ -222,10 +225,14 @@ export function GroupedClassificationCard({
|
||||
const bestTyped: ClassificationItemData = best;
|
||||
return {
|
||||
...bestTyped,
|
||||
name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name,
|
||||
name: event
|
||||
? event.sub_label && event.sub_label !== "none"
|
||||
? event.sub_label
|
||||
: t(noClassificationLabel)
|
||||
: bestTyped.name,
|
||||
score: event?.data?.sub_label_score || bestTyped.score,
|
||||
};
|
||||
}, [group, event, t]);
|
||||
}, [group, event, noClassificationLabel, t]);
|
||||
|
||||
const bestScoreStatus = useMemo(() => {
|
||||
if (!bestItem?.score || !threshold) {
|
||||
@ -257,8 +264,8 @@ export function GroupedClassificationCard({
|
||||
|
||||
const Overlay = isDesktop ? Dialog : MobilePage;
|
||||
const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger;
|
||||
const Header = isDesktop ? DialogHeader : MobilePageHeader;
|
||||
const Content = isDesktop ? DialogContent : MobilePageContent;
|
||||
const Header = isDesktop ? DialogHeader : MobilePageHeader;
|
||||
const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle;
|
||||
const ContentDescription = isDesktop
|
||||
? DialogDescription
|
||||
@ -291,9 +298,9 @@ export function GroupedClassificationCard({
|
||||
<Trigger asChild></Trigger>
|
||||
<Content
|
||||
className={cn(
|
||||
"",
|
||||
"scrollbar-container",
|
||||
isDesktop && "min-w-[50%] max-w-[65%]",
|
||||
isMobile && "flex flex-col",
|
||||
isMobile && "overflow-y-auto",
|
||||
)}
|
||||
onOpenAutoFocus={(e) => e.preventDefault()}
|
||||
>
|
||||
@ -301,26 +308,45 @@ export function GroupedClassificationCard({
|
||||
<Header
|
||||
className={cn(
|
||||
"mx-2 flex flex-row items-center gap-4",
|
||||
isMobile && "flex-shrink-0",
|
||||
isMobileOnly && "top-0 mx-4",
|
||||
)}
|
||||
>
|
||||
<div>
|
||||
<ContentTitle
|
||||
className={cn(
|
||||
"flex items-center gap-2 font-normal capitalize",
|
||||
isMobile && "px-2",
|
||||
)}
|
||||
>
|
||||
{event?.sub_label ? event.sub_label : t("details.unknown")}
|
||||
{event?.sub_label && (
|
||||
<div
|
||||
className={cn(
|
||||
"",
|
||||
bestScoreStatus == "match" && "text-success",
|
||||
bestScoreStatus == "potential" && "text-orange-400",
|
||||
bestScoreStatus == "unknown" && "text-danger",
|
||||
)}
|
||||
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
|
||||
<div
|
||||
className={cn(
|
||||
"",
|
||||
isMobile && "flex flex-col items-center justify-center",
|
||||
)}
|
||||
>
|
||||
<ContentTitle className="flex items-center gap-2 font-normal capitalize">
|
||||
{event?.sub_label && event.sub_label !== "none"
|
||||
? event.sub_label
|
||||
: t(noClassificationLabel)}
|
||||
{event?.sub_label && event.sub_label !== "none" && (
|
||||
<div className="flex items-center gap-1">
|
||||
<div
|
||||
className={cn(
|
||||
"",
|
||||
bestScoreStatus == "match" && "text-success",
|
||||
bestScoreStatus == "potential" && "text-orange-400",
|
||||
bestScoreStatus == "unknown" && "text-danger",
|
||||
)}
|
||||
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<button
|
||||
className="focus:outline-none"
|
||||
aria-label={t("details.scoreInfo", {
|
||||
ns: i18nLibrary,
|
||||
})}
|
||||
>
|
||||
<LuInfo className="size-3" />
|
||||
</button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-80 text-sm">
|
||||
{t("details.scoreInfo", { ns: i18nLibrary })}
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
</div>
|
||||
)}
|
||||
</ContentTitle>
|
||||
<ContentDescription className={cn("", isMobile && "px-2")}>
|
||||
@ -364,7 +390,7 @@ export function GroupedClassificationCard({
|
||||
className={cn(
|
||||
"grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8",
|
||||
isDesktop && "p-2",
|
||||
isMobile && "scrollbar-container flex-1 overflow-y-auto",
|
||||
isMobile && "px-4 pb-4",
|
||||
)}
|
||||
>
|
||||
{group.map((data: ClassificationItemData) => (
|
||||
|
||||
@ -37,6 +37,7 @@ import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||
import { Button, buttonVariants } from "../ui/button";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { LuCircle } from "react-icons/lu";
|
||||
|
||||
type ReviewCardProps = {
|
||||
event: ReviewSegment;
|
||||
@ -142,7 +143,7 @@ export default function ReviewCard({
|
||||
className={cn(
|
||||
"size-full rounded-lg",
|
||||
activeReviewItem?.id == event.id &&
|
||||
"outline outline-[3px] outline-offset-1 outline-selected",
|
||||
"outline outline-[3px] -outline-offset-[2.8px] outline-selected duration-200",
|
||||
imgLoaded ? "visible" : "invisible",
|
||||
)}
|
||||
src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`}
|
||||
@ -165,6 +166,14 @@ export default function ReviewCard({
|
||||
<TooltipTrigger asChild>
|
||||
<div className="flex items-center justify-evenly gap-1">
|
||||
<>
|
||||
<LuCircle
|
||||
className={cn(
|
||||
"size-2",
|
||||
event.severity == "alert"
|
||||
? "fill-severity_alert text-severity_alert"
|
||||
: "fill-severity_detection text-severity_detection",
|
||||
)}
|
||||
/>
|
||||
{event.data.objects.map((object) => {
|
||||
return getIconForLabel(
|
||||
object,
|
||||
|
||||
@ -0,0 +1,477 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
import {
|
||||
Form,
|
||||
FormControl,
|
||||
FormField,
|
||||
FormItem,
|
||||
FormLabel,
|
||||
FormMessage,
|
||||
} from "@/components/ui/form";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select";
|
||||
import {
|
||||
CustomClassificationModelConfig,
|
||||
FrigateConfig,
|
||||
} from "@/types/frigateConfig";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import axios from "axios";
|
||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { LuPlus, LuX } from "react-icons/lu";
|
||||
import { toast } from "sonner";
|
||||
import useSWR from "swr";
|
||||
import { z } from "zod";
|
||||
|
||||
type ClassificationModelEditDialogProps = {
|
||||
open: boolean;
|
||||
model: CustomClassificationModelConfig;
|
||||
onClose: () => void;
|
||||
onSuccess: () => void;
|
||||
};
|
||||
|
||||
type ObjectClassificationType = "sub_label" | "attribute";
|
||||
|
||||
type ObjectFormData = {
|
||||
objectLabel: string;
|
||||
objectType: ObjectClassificationType;
|
||||
};
|
||||
|
||||
type StateFormData = {
|
||||
classes: string[];
|
||||
};
|
||||
|
||||
export default function ClassificationModelEditDialog({
|
||||
open,
|
||||
model,
|
||||
onClose,
|
||||
onSuccess,
|
||||
}: ClassificationModelEditDialogProps) {
|
||||
const { t } = useTranslation(["views/classificationModel"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
|
||||
const isStateModel = model.state_config !== undefined;
|
||||
const isObjectModel = model.object_config !== undefined;
|
||||
|
||||
const objectLabels = useMemo(() => {
|
||||
if (!config) return [];
|
||||
|
||||
const labels = new Set<string>();
|
||||
|
||||
Object.values(config.cameras).forEach((cameraConfig) => {
|
||||
if (!cameraConfig.enabled || !cameraConfig.enabled_in_config) {
|
||||
return;
|
||||
}
|
||||
|
||||
cameraConfig.objects.track.forEach((label) => {
|
||||
if (!config.model.all_attributes.includes(label)) {
|
||||
labels.add(label);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
return [...labels].sort();
|
||||
}, [config]);
|
||||
|
||||
// Define form schema based on model type
|
||||
const formSchema = useMemo(() => {
|
||||
if (isObjectModel) {
|
||||
return z.object({
|
||||
objectLabel: z
|
||||
.string()
|
||||
.min(1, t("wizard.step1.errors.objectLabelRequired")),
|
||||
objectType: z.enum(["sub_label", "attribute"]),
|
||||
});
|
||||
} else {
|
||||
// State model
|
||||
return z.object({
|
||||
classes: z
|
||||
.array(z.string())
|
||||
.min(1, t("wizard.step1.errors.classRequired"))
|
||||
.refine(
|
||||
(classes) => {
|
||||
const nonEmpty = classes.filter((c) => c.trim().length > 0);
|
||||
return nonEmpty.length >= 2;
|
||||
},
|
||||
{ message: t("wizard.step1.errors.stateRequiresTwoClasses") },
|
||||
)
|
||||
.refine(
|
||||
(classes) => {
|
||||
const nonEmpty = classes.filter((c) => c.trim().length > 0);
|
||||
const unique = new Set(nonEmpty.map((c) => c.toLowerCase()));
|
||||
return unique.size === nonEmpty.length;
|
||||
},
|
||||
{ message: t("wizard.step1.errors.classesUnique") },
|
||||
),
|
||||
});
|
||||
}
|
||||
}, [isObjectModel, t]);
|
||||
|
||||
const form = useForm<ObjectFormData | StateFormData>({
|
||||
resolver: zodResolver(formSchema),
|
||||
defaultValues: isObjectModel
|
||||
? ({
|
||||
objectLabel: model.object_config?.objects?.[0] || "",
|
||||
objectType:
|
||||
(model.object_config
|
||||
?.classification_type as ObjectClassificationType) || "sub_label",
|
||||
} as ObjectFormData)
|
||||
: ({
|
||||
classes: [""], // Will be populated from dataset
|
||||
} as StateFormData),
|
||||
mode: "onChange",
|
||||
});
|
||||
|
||||
// Fetch dataset to get current classes for state models
|
||||
const { data: dataset } = useSWR<{
|
||||
[id: string]: string[];
|
||||
}>(isStateModel ? `classification/${model.name}/dataset` : null, {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
// Update form with classes from dataset when loaded
|
||||
useEffect(() => {
|
||||
if (isStateModel && dataset) {
|
||||
const classes = Object.keys(dataset).filter((key) => key !== "none");
|
||||
if (classes.length > 0) {
|
||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||
"classes",
|
||||
classes,
|
||||
);
|
||||
}
|
||||
}
|
||||
}, [dataset, isStateModel, form]);
|
||||
|
||||
const watchedClasses = isStateModel
|
||||
? (form as ReturnType<typeof useForm<StateFormData>>).watch("classes")
|
||||
: undefined;
|
||||
const watchedObjectType = isObjectModel
|
||||
? (form as ReturnType<typeof useForm<ObjectFormData>>).watch("objectType")
|
||||
: undefined;
|
||||
|
||||
const handleAddClass = useCallback(() => {
|
||||
const currentClasses = (
|
||||
form as ReturnType<typeof useForm<StateFormData>>
|
||||
).getValues("classes");
|
||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||
"classes",
|
||||
[...currentClasses, ""],
|
||||
{
|
||||
shouldValidate: true,
|
||||
},
|
||||
);
|
||||
}, [form]);
|
||||
|
||||
const handleRemoveClass = useCallback(
|
||||
(index: number) => {
|
||||
const currentClasses = (
|
||||
form as ReturnType<typeof useForm<StateFormData>>
|
||||
).getValues("classes");
|
||||
const newClasses = currentClasses.filter((_, i) => i !== index);
|
||||
|
||||
// Ensure at least one field remains (even if empty)
|
||||
if (newClasses.length === 0) {
|
||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||
"classes",
|
||||
[""],
|
||||
{ shouldValidate: true },
|
||||
);
|
||||
} else {
|
||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||
"classes",
|
||||
newClasses,
|
||||
{ shouldValidate: true },
|
||||
);
|
||||
}
|
||||
},
|
||||
[form],
|
||||
);
|
||||
|
||||
const onSubmit = useCallback(
|
||||
async (data: ObjectFormData | StateFormData) => {
|
||||
setIsSaving(true);
|
||||
try {
|
||||
if (isObjectModel) {
|
||||
const objectData = data as ObjectFormData;
|
||||
|
||||
// Update the config
|
||||
await axios.put("/config/set", {
|
||||
requires_restart: 0,
|
||||
update_topic: `config/classification/custom/${model.name}`,
|
||||
config_data: {
|
||||
classification: {
|
||||
custom: {
|
||||
[model.name]: {
|
||||
enabled: model.enabled,
|
||||
name: model.name,
|
||||
threshold: model.threshold,
|
||||
object_config: {
|
||||
objects: [objectData.objectLabel],
|
||||
classification_type: objectData.objectType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
toast.success(t("toast.success.updatedModel"), {
|
||||
position: "top-center",
|
||||
});
|
||||
} else {
|
||||
// State model - update classes
|
||||
// Note: For state models, updating classes requires renaming categories
|
||||
// which is handled through the dataset API, not the config API
|
||||
// We'll need to implement this by calling the rename endpoint for each class
|
||||
// For now, we just show a message that this requires retraining
|
||||
|
||||
toast.info(t("edit.stateClassesInfo"), {
|
||||
position: "top-center",
|
||||
});
|
||||
}
|
||||
|
||||
onSuccess();
|
||||
onClose();
|
||||
} catch (err) {
|
||||
const error = err as {
|
||||
response?: { data?: { message?: string; detail?: string } };
|
||||
};
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
toast.error(t("toast.error.updateModelFailed", { errorMessage }), {
|
||||
position: "top-center",
|
||||
});
|
||||
} finally {
|
||||
setIsSaving(false);
|
||||
}
|
||||
},
|
||||
[isObjectModel, model, t, onSuccess, onClose],
|
||||
);
|
||||
|
||||
const handleCancel = useCallback(() => {
|
||||
form.reset();
|
||||
onClose();
|
||||
}, [form, onClose]);
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={(open) => !open && handleCancel()}>
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle>{t("edit.title")}</DialogTitle>
|
||||
<DialogDescription>
|
||||
{isStateModel
|
||||
? t("edit.descriptionState")
|
||||
: t("edit.descriptionObject")}
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-6">
|
||||
<Form {...form}>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)} className="space-y-4">
|
||||
{isObjectModel && (
|
||||
<>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="objectLabel"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel className="text-primary-variant">
|
||||
{t("wizard.step1.objectLabel")}
|
||||
</FormLabel>
|
||||
<Select
|
||||
onValueChange={field.onChange}
|
||||
defaultValue={field.value}
|
||||
>
|
||||
<FormControl>
|
||||
<SelectTrigger className="h-8">
|
||||
<SelectValue
|
||||
placeholder={t(
|
||||
"wizard.step1.objectLabelPlaceholder",
|
||||
)}
|
||||
/>
|
||||
</SelectTrigger>
|
||||
</FormControl>
|
||||
<SelectContent>
|
||||
{objectLabels.map((label) => (
|
||||
<SelectItem
|
||||
key={label}
|
||||
value={label}
|
||||
className="cursor-pointer hover:bg-secondary-highlight"
|
||||
>
|
||||
{getTranslatedLabel(label)}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="objectType"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel className="text-primary-variant">
|
||||
{t("wizard.step1.classificationType")}
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<RadioGroup
|
||||
onValueChange={field.onChange}
|
||||
defaultValue={field.value}
|
||||
className="flex flex-col gap-4 pt-2"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<RadioGroupItem
|
||||
className={
|
||||
watchedObjectType === "sub_label"
|
||||
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
||||
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
||||
}
|
||||
id="sub_label"
|
||||
value="sub_label"
|
||||
/>
|
||||
<Label
|
||||
className="cursor-pointer"
|
||||
htmlFor="sub_label"
|
||||
>
|
||||
{t("wizard.step1.classificationSubLabel")}
|
||||
</Label>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<RadioGroupItem
|
||||
className={
|
||||
watchedObjectType === "attribute"
|
||||
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
||||
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
||||
}
|
||||
id="attribute"
|
||||
value="attribute"
|
||||
/>
|
||||
<Label
|
||||
className="cursor-pointer"
|
||||
htmlFor="attribute"
|
||||
>
|
||||
{t("wizard.step1.classificationAttribute")}
|
||||
</Label>
|
||||
</div>
|
||||
</RadioGroup>
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
|
||||
{isStateModel && (
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between">
|
||||
<FormLabel className="text-primary-variant">
|
||||
{t("wizard.step1.states")}
|
||||
</FormLabel>
|
||||
<Button
|
||||
type="button"
|
||||
variant="secondary"
|
||||
className="size-6 rounded-md bg-secondary-foreground p-1 text-background"
|
||||
onClick={handleAddClass}
|
||||
>
|
||||
<LuPlus />
|
||||
</Button>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
{watchedClasses?.map((_: string, index: number) => (
|
||||
<FormField
|
||||
key={index}
|
||||
control={
|
||||
(form as ReturnType<typeof useForm<StateFormData>>)
|
||||
.control
|
||||
}
|
||||
name={`classes.${index}` as const}
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormControl>
|
||||
<div className="flex items-center gap-2">
|
||||
<Input
|
||||
className="text-md h-8"
|
||||
placeholder={t(
|
||||
"wizard.step1.classPlaceholder",
|
||||
)}
|
||||
{...field}
|
||||
/>
|
||||
{watchedClasses &&
|
||||
watchedClasses.length > 1 && (
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="h-8 w-8 p-0"
|
||||
onClick={() => handleRemoveClass(index)}
|
||||
>
|
||||
<LuX className="size-4" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</FormControl>
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
{isStateModel &&
|
||||
"classes" in form.formState.errors &&
|
||||
form.formState.errors.classes && (
|
||||
<p className="text-sm font-medium text-destructive">
|
||||
{form.formState.errors.classes.message}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||
<Button
|
||||
type="button"
|
||||
onClick={handleCancel}
|
||||
className="sm:flex-1"
|
||||
disabled={isSaving}
|
||||
>
|
||||
{t("button.cancel", { ns: "common" })}
|
||||
</Button>
|
||||
<Button
|
||||
type="submit"
|
||||
variant="select"
|
||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||
disabled={!form.formState.isValid || isSaving}
|
||||
>
|
||||
{isSaving
|
||||
? t("button.saving", { ns: "common" })
|
||||
: t("button.save", { ns: "common" })}
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
</Form>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@ -317,6 +317,21 @@ export default function Step3ChooseExamples({
|
||||
return unclassifiedImages.length === 0;
|
||||
}, [unclassifiedImages]);
|
||||
|
||||
const handleBack = useCallback(() => {
|
||||
if (currentClassIndex > 0) {
|
||||
const previousClass = allClasses[currentClassIndex - 1];
|
||||
setCurrentClassIndex((prev) => prev - 1);
|
||||
|
||||
// Restore selections for the previous class
|
||||
const previousSelections = Object.entries(imageClassifications)
|
||||
.filter(([_, className]) => className === previousClass)
|
||||
.map(([imageName, _]) => imageName);
|
||||
setSelectedImages(new Set(previousSelections));
|
||||
} else {
|
||||
onBack();
|
||||
}
|
||||
}, [currentClassIndex, allClasses, imageClassifications, onBack]);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-6">
|
||||
{isTraining ? (
|
||||
@ -420,7 +435,7 @@ export default function Step3ChooseExamples({
|
||||
|
||||
{!isTraining && (
|
||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||
<Button type="button" onClick={onBack} className="sm:flex-1">
|
||||
<Button type="button" onClick={handleBack} className="sm:flex-1">
|
||||
{t("button.back", { ns: "common" })}
|
||||
</Button>
|
||||
<Button
|
||||
|
||||
@ -348,6 +348,26 @@ export function GeneralFilterContent({
|
||||
onClose,
|
||||
}: GeneralFilterContentProps) {
|
||||
const { t } = useTranslation(["components/filter"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
const allAudioListenLabels = useMemo<string[]>(() => {
|
||||
if (!config) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const labels = new Set<string>();
|
||||
Object.values(config.cameras).forEach((camera) => {
|
||||
if (camera?.audio?.enabled) {
|
||||
camera.audio.listen.forEach((label) => {
|
||||
labels.add(label);
|
||||
});
|
||||
}
|
||||
});
|
||||
return [...labels].sort();
|
||||
}, [config]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="overflow-x-hidden">
|
||||
@ -373,7 +393,10 @@ export function GeneralFilterContent({
|
||||
{allLabels.map((item) => (
|
||||
<FilterSwitch
|
||||
key={item}
|
||||
label={getTranslatedLabel(item)}
|
||||
label={getTranslatedLabel(
|
||||
item,
|
||||
allAudioListenLabels.includes(item) ? "audio" : "object",
|
||||
)}
|
||||
isChecked={currentLabels?.includes(item) ?? false}
|
||||
onCheckedChange={(isChecked) => {
|
||||
if (isChecked) {
|
||||
|
||||
@ -8,7 +8,7 @@ import {
|
||||
FormMessage,
|
||||
} from "@/components/ui/form";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { useState, useEffect } from "react";
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { useFormContext } from "react-hook-form";
|
||||
import { generateFixedHash, isValidId } from "@/utils/stringUtil";
|
||||
import { useTranslation } from "react-i18next";
|
||||
@ -25,6 +25,7 @@ type NameAndIdFieldsProps<T extends FieldValues = FieldValues> = {
|
||||
processId?: (name: string) => string;
|
||||
placeholderName?: string;
|
||||
placeholderId?: string;
|
||||
idVisible?: boolean;
|
||||
};
|
||||
|
||||
export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
||||
@ -39,10 +40,12 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
||||
processId,
|
||||
placeholderName,
|
||||
placeholderId,
|
||||
idVisible,
|
||||
}: NameAndIdFieldsProps<T>) {
|
||||
const { t } = useTranslation(["common"]);
|
||||
const { watch, setValue, trigger } = useFormContext<T>();
|
||||
const [isIdVisible, setIsIdVisible] = useState(false);
|
||||
const { watch, setValue, trigger, formState } = useFormContext<T>();
|
||||
const [isIdVisible, setIsIdVisible] = useState(idVisible ?? false);
|
||||
const hasUserTypedRef = useRef(false);
|
||||
|
||||
const defaultProcessId = (name: string) => {
|
||||
const normalized = name.replace(/\s+/g, "_").toLowerCase();
|
||||
@ -58,6 +61,7 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
||||
useEffect(() => {
|
||||
const subscription = watch((value, { name }) => {
|
||||
if (name === nameField) {
|
||||
hasUserTypedRef.current = true;
|
||||
const processedId = effectiveProcessId(value[nameField] || "");
|
||||
setValue(idField, processedId as PathValue<T, Path<T>>);
|
||||
trigger(idField);
|
||||
@ -66,6 +70,14 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
||||
return () => subscription.unsubscribe();
|
||||
}, [watch, setValue, trigger, nameField, idField, effectiveProcessId]);
|
||||
|
||||
// Auto-expand if there's an error on the ID field after user has typed
|
||||
useEffect(() => {
|
||||
const idError = formState.errors[idField];
|
||||
if (idError && hasUserTypedRef.current && !isIdVisible) {
|
||||
setIsIdVisible(true);
|
||||
}
|
||||
}, [formState.errors, idField, isIdVisible]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormField
|
||||
|
||||
@ -258,6 +258,7 @@ export default function CreateTriggerDialog({
|
||||
nameLabel={t("triggers.dialog.form.name.title")}
|
||||
nameDescription={t("triggers.dialog.form.name.description")}
|
||||
placeholderName={t("triggers.dialog.form.name.placeholder")}
|
||||
idVisible={!!trigger}
|
||||
/>
|
||||
|
||||
<FormField
|
||||
|
||||
@ -59,6 +59,47 @@ export default function ObjectTrackOverlay({
|
||||
|
||||
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
||||
|
||||
const {
|
||||
pathStroke,
|
||||
pointRadius,
|
||||
pointStroke,
|
||||
zoneStroke,
|
||||
boxStroke,
|
||||
highlightRadius,
|
||||
} = useMemo(() => {
|
||||
const BASE_WIDTH = 1280;
|
||||
const BASE_HEIGHT = 720;
|
||||
const BASE_PATH_STROKE = 5;
|
||||
const BASE_POINT_RADIUS = 7;
|
||||
const BASE_POINT_STROKE = 3;
|
||||
const BASE_ZONE_STROKE = 5;
|
||||
const BASE_BOX_STROKE = 5;
|
||||
const BASE_HIGHLIGHT_RADIUS = 5;
|
||||
|
||||
const scale = Math.sqrt(
|
||||
(videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT),
|
||||
);
|
||||
|
||||
const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale));
|
||||
const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale));
|
||||
const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale));
|
||||
const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale));
|
||||
const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale));
|
||||
const highlightRadius = Math.max(
|
||||
2,
|
||||
Math.round(BASE_HIGHLIGHT_RADIUS * scale),
|
||||
);
|
||||
|
||||
return {
|
||||
pathStroke,
|
||||
pointRadius,
|
||||
pointStroke,
|
||||
zoneStroke,
|
||||
boxStroke,
|
||||
highlightRadius,
|
||||
};
|
||||
}, [videoWidth, videoHeight]);
|
||||
|
||||
// Fetch all event data in a single request (CSV ids)
|
||||
const { data: eventsData } = useSWR<Event[]>(
|
||||
selectedObjectIds.length > 0
|
||||
@ -214,16 +255,21 @@ export default function ObjectTrackOverlay({
|
||||
b.timestamp - a.timestamp,
|
||||
)[0]?.data?.zones || [];
|
||||
|
||||
// bounding box (with tolerance for browsers with seek precision by-design issues)
|
||||
const boxCandidates = timelineData?.filter(
|
||||
(event: TrackingDetailsSequence) =>
|
||||
event.timestamp <= effectiveCurrentTime + TOLERANCE &&
|
||||
event.data.box,
|
||||
);
|
||||
const currentBox = boxCandidates?.sort(
|
||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||
b.timestamp - a.timestamp,
|
||||
)[0]?.data?.box;
|
||||
// bounding box - only show if there's a timeline event at/near the current time with a box
|
||||
// Search all timeline events (not just those before current time) to find one matching the seek position
|
||||
const nearbyTimelineEvent = timelineData
|
||||
?.filter((event: TrackingDetailsSequence) => event.data.box)
|
||||
.sort(
|
||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||
Math.abs(a.timestamp - effectiveCurrentTime) -
|
||||
Math.abs(b.timestamp - effectiveCurrentTime),
|
||||
)
|
||||
.find(
|
||||
(event: TrackingDetailsSequence) =>
|
||||
Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE,
|
||||
);
|
||||
|
||||
const currentBox = nearbyTimelineEvent?.data?.box;
|
||||
|
||||
return {
|
||||
objectId,
|
||||
@ -349,7 +395,7 @@ export default function ObjectTrackOverlay({
|
||||
points={zone.points}
|
||||
fill={zone.fill}
|
||||
stroke={zone.stroke}
|
||||
strokeWidth="5"
|
||||
strokeWidth={zoneStroke}
|
||||
opacity="0.7"
|
||||
/>
|
||||
))}
|
||||
@ -369,7 +415,7 @@ export default function ObjectTrackOverlay({
|
||||
d={generateStraightPath(absolutePositions)}
|
||||
fill="none"
|
||||
stroke={objData.color}
|
||||
strokeWidth="5"
|
||||
strokeWidth={pathStroke}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
@ -381,13 +427,13 @@ export default function ObjectTrackOverlay({
|
||||
<circle
|
||||
cx={pos.x}
|
||||
cy={pos.y}
|
||||
r="7"
|
||||
r={pointRadius}
|
||||
fill={getPointColor(
|
||||
objData.color,
|
||||
pos.lifecycle_item?.class_type,
|
||||
)}
|
||||
stroke="white"
|
||||
strokeWidth="3"
|
||||
strokeWidth={pointStroke}
|
||||
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
||||
onClick={() => handlePointClick(pos.timestamp)}
|
||||
/>
|
||||
@ -416,7 +462,7 @@ export default function ObjectTrackOverlay({
|
||||
height={objData.currentBox[3] * videoHeight}
|
||||
fill="none"
|
||||
stroke={objData.color}
|
||||
strokeWidth="5"
|
||||
strokeWidth={boxStroke}
|
||||
opacity="0.9"
|
||||
/>
|
||||
<circle
|
||||
@ -428,10 +474,10 @@ export default function ObjectTrackOverlay({
|
||||
(objData.currentBox[1] + objData.currentBox[3]) *
|
||||
videoHeight
|
||||
}
|
||||
r="5"
|
||||
r={highlightRadius}
|
||||
fill="rgb(255, 255, 0)" // yellow highlight
|
||||
stroke={objData.color}
|
||||
strokeWidth="5"
|
||||
strokeWidth={boxStroke}
|
||||
opacity="1"
|
||||
/>
|
||||
</g>
|
||||
|
||||
@ -8,7 +8,7 @@ import Heading from "@/components/ui/heading";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
||||
import { getIconForLabel } from "@/utils/iconUtil";
|
||||
import { LuCircle, LuSettings } from "react-icons/lu";
|
||||
import { LuCircle, LuFolderX, LuSettings } from "react-icons/lu";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
Tooltip,
|
||||
@ -38,9 +38,12 @@ import { HiDotsHorizontal } from "react-icons/hi";
|
||||
import axios from "axios";
|
||||
import { toast } from "sonner";
|
||||
import { useDetailStream } from "@/context/detail-stream-context";
|
||||
import { isDesktop, isIOS } from "react-device-detect";
|
||||
import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect";
|
||||
import Chip from "@/components/indicators/Chip";
|
||||
import { FaDownload, FaHistory } from "react-icons/fa";
|
||||
import { useApiHost } from "@/api";
|
||||
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
|
||||
import ObjectTrackOverlay from "../ObjectTrackOverlay";
|
||||
|
||||
type TrackingDetailsProps = {
|
||||
className?: string;
|
||||
@ -57,9 +60,19 @@ export function TrackingDetails({
|
||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||
const { t } = useTranslation(["views/explore"]);
|
||||
const navigate = useNavigate();
|
||||
const apiHost = useApiHost();
|
||||
const imgRef = useRef<HTMLImageElement | null>(null);
|
||||
const [imgLoaded, setImgLoaded] = useState(false);
|
||||
const [displaySource, _setDisplaySource] = useState<"video" | "image">(
|
||||
"video",
|
||||
);
|
||||
const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } =
|
||||
useDetailStream();
|
||||
|
||||
// manualOverride holds a record-stream timestamp explicitly chosen by the
|
||||
// user (eg, clicking a lifecycle row). When null we display `currentTime`.
|
||||
const [manualOverride, setManualOverride] = useState<number | null>(null);
|
||||
|
||||
// event.start_time is detect time, convert to record, then subtract padding
|
||||
const [currentTime, setCurrentTime] = useState(
|
||||
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
|
||||
@ -79,10 +92,14 @@ export function TrackingDetails({
|
||||
return resolveZoneName(config, zone);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
// Use manualOverride (set when seeking in image mode) if present so
|
||||
// lifecycle rows and overlays follow image-mode seeks. Otherwise fall
|
||||
// back to currentTime used for video mode.
|
||||
const effectiveTime = useMemo(() => {
|
||||
return currentTime - annotationOffset / 1000;
|
||||
}, [currentTime, annotationOffset]);
|
||||
const displayedRecordTime = manualOverride ?? currentTime;
|
||||
return displayedRecordTime - annotationOffset / 1000;
|
||||
}, [manualOverride, currentTime, annotationOffset]);
|
||||
|
||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||
const [_selectedZone, setSelectedZone] = useState("");
|
||||
@ -125,20 +142,30 @@ export function TrackingDetails({
|
||||
|
||||
const handleLifecycleClick = useCallback(
|
||||
(item: TrackingDetailsSequence) => {
|
||||
if (!videoRef.current) return;
|
||||
if (!videoRef.current && !imgRef.current) return;
|
||||
|
||||
// Convert lifecycle timestamp (detect stream) to record stream time
|
||||
const targetTimeRecord = item.timestamp + annotationOffset / 1000;
|
||||
|
||||
// Convert to video-relative time for seeking
|
||||
if (displaySource === "image") {
|
||||
// For image mode: set a manual override timestamp and update
|
||||
// currentTime so overlays render correctly.
|
||||
setManualOverride(targetTimeRecord);
|
||||
setCurrentTime(targetTimeRecord);
|
||||
return;
|
||||
}
|
||||
|
||||
// For video mode: convert to video-relative time and seek player
|
||||
const eventStartRecord =
|
||||
(event.start_time ?? 0) + annotationOffset / 1000;
|
||||
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
||||
const relativeTime = targetTimeRecord - videoStartTime;
|
||||
|
||||
videoRef.current.currentTime = relativeTime;
|
||||
if (videoRef.current) {
|
||||
videoRef.current.currentTime = relativeTime;
|
||||
}
|
||||
},
|
||||
[event.start_time, annotationOffset],
|
||||
[event.start_time, annotationOffset, displaySource],
|
||||
);
|
||||
|
||||
const formattedStart = config
|
||||
@ -179,11 +206,20 @@ export function TrackingDetails({
|
||||
}, [eventSequence]);
|
||||
|
||||
useEffect(() => {
|
||||
if (seekToTimestamp === null || !videoRef.current) return;
|
||||
if (seekToTimestamp === null) return;
|
||||
|
||||
if (displaySource === "image") {
|
||||
// For image mode, set the manual override so the snapshot updates to
|
||||
// the exact record timestamp.
|
||||
setManualOverride(seekToTimestamp);
|
||||
setSeekToTimestamp(null);
|
||||
return;
|
||||
}
|
||||
|
||||
// seekToTimestamp is a record stream timestamp
|
||||
// event.start_time is detect stream time, convert to record
|
||||
// The video clip starts at (eventStartRecord - REVIEW_PADDING)
|
||||
if (!videoRef.current) return;
|
||||
const eventStartRecord = event.start_time + annotationOffset / 1000;
|
||||
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
||||
const relativeTime = seekToTimestamp - videoStartTime;
|
||||
@ -191,7 +227,14 @@ export function TrackingDetails({
|
||||
videoRef.current.currentTime = relativeTime;
|
||||
}
|
||||
setSeekToTimestamp(null);
|
||||
}, [seekToTimestamp, event.start_time, annotationOffset]);
|
||||
}, [
|
||||
seekToTimestamp,
|
||||
event.start_time,
|
||||
annotationOffset,
|
||||
apiHost,
|
||||
event.camera,
|
||||
displaySource,
|
||||
]);
|
||||
|
||||
const isWithinEventRange =
|
||||
effectiveTime !== undefined &&
|
||||
@ -294,6 +337,27 @@ export function TrackingDetails({
|
||||
[event.start_time, annotationOffset],
|
||||
);
|
||||
|
||||
const [src, setSrc] = useState(
|
||||
`${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
|
||||
);
|
||||
const [hasError, setHasError] = useState(false);
|
||||
|
||||
// Derive the record timestamp to display: manualOverride if present,
|
||||
// otherwise use currentTime.
|
||||
const displayedRecordTime = manualOverride ?? currentTime;
|
||||
|
||||
useEffect(() => {
|
||||
if (displayedRecordTime) {
|
||||
const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
|
||||
setSrc(newSrc);
|
||||
}
|
||||
setImgLoaded(false);
|
||||
setHasError(false);
|
||||
|
||||
// we know that these deps are correct
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [displayedRecordTime]);
|
||||
|
||||
if (!config) {
|
||||
return <ActivityIndicator />;
|
||||
}
|
||||
@ -311,9 +375,10 @@ export function TrackingDetails({
|
||||
|
||||
<div
|
||||
className={cn(
|
||||
"flex w-full items-center justify-center",
|
||||
"flex items-center justify-center",
|
||||
isDesktop && "overflow-hidden",
|
||||
cameraAspect === "tall" ? "max-h-[50dvh] lg:max-h-[70dvh]" : "w-full",
|
||||
cameraAspect === "tall" && isMobileOnly && "w-full",
|
||||
cameraAspect !== "tall" && isDesktop && "flex-[3]",
|
||||
)}
|
||||
style={{ aspectRatio: aspectRatio }}
|
||||
@ -325,21 +390,75 @@ export function TrackingDetails({
|
||||
cameraAspect === "tall" ? "h-full" : "w-full",
|
||||
)}
|
||||
>
|
||||
<HlsVideoPlayer
|
||||
videoRef={videoRef}
|
||||
containerRef={containerRef}
|
||||
visible={true}
|
||||
currentSource={videoSource}
|
||||
hotKeys={false}
|
||||
supportsFullscreen={false}
|
||||
fullscreen={false}
|
||||
frigateControls={true}
|
||||
onTimeUpdate={handleTimeUpdate}
|
||||
onSeekToTime={handleSeekToTime}
|
||||
isDetailMode={true}
|
||||
camera={event.camera}
|
||||
currentTimeOverride={currentTime}
|
||||
/>
|
||||
{displaySource == "video" && (
|
||||
<HlsVideoPlayer
|
||||
videoRef={videoRef}
|
||||
containerRef={containerRef}
|
||||
visible={true}
|
||||
currentSource={videoSource}
|
||||
hotKeys={false}
|
||||
supportsFullscreen={false}
|
||||
fullscreen={false}
|
||||
frigateControls={true}
|
||||
onTimeUpdate={handleTimeUpdate}
|
||||
onSeekToTime={handleSeekToTime}
|
||||
isDetailMode={true}
|
||||
camera={event.camera}
|
||||
currentTimeOverride={currentTime}
|
||||
/>
|
||||
)}
|
||||
{displaySource == "image" && (
|
||||
<>
|
||||
<ImageLoadingIndicator
|
||||
className="absolute inset-0"
|
||||
imgLoaded={imgLoaded}
|
||||
/>
|
||||
{hasError && (
|
||||
<div className="relative aspect-video">
|
||||
<div className="flex flex-col items-center justify-center p-20 text-center">
|
||||
<LuFolderX className="size-16" />
|
||||
{t("objectLifecycle.noImageFound")}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div
|
||||
className={cn("relative", imgLoaded ? "visible" : "invisible")}
|
||||
>
|
||||
<div className="absolute z-50 size-full">
|
||||
<ObjectTrackOverlay
|
||||
key={`overlay-${displayedRecordTime}`}
|
||||
camera={event.camera}
|
||||
showBoundingBoxes={true}
|
||||
currentTime={displayedRecordTime}
|
||||
videoWidth={imgRef?.current?.naturalWidth ?? 0}
|
||||
videoHeight={imgRef?.current?.naturalHeight ?? 0}
|
||||
className="absolute inset-0 z-10"
|
||||
onSeekToTime={handleSeekToTime}
|
||||
/>
|
||||
</div>
|
||||
<img
|
||||
key={event.id}
|
||||
ref={imgRef}
|
||||
className={cn(
|
||||
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
|
||||
)}
|
||||
loading={isSafari ? "eager" : "lazy"}
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={src}
|
||||
onLoad={() => setImgLoaded(true)}
|
||||
onError={() => setHasError(true)}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
<div
|
||||
className={cn(
|
||||
"absolute top-2 z-[5] flex items-center gap-2",
|
||||
|
||||
@ -289,6 +289,7 @@ export default function VideoControls({
|
||||
}}
|
||||
onUploadFrame={onUploadFrame}
|
||||
containerRef={containerRef}
|
||||
fullscreen={fullscreen}
|
||||
/>
|
||||
)}
|
||||
{features.fullscreen && toggleFullscreen && (
|
||||
@ -306,6 +307,7 @@ type FrigatePlusUploadButtonProps = {
|
||||
onClose: () => void;
|
||||
onUploadFrame: () => void;
|
||||
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
||||
fullscreen?: boolean;
|
||||
};
|
||||
function FrigatePlusUploadButton({
|
||||
video,
|
||||
@ -313,6 +315,7 @@ function FrigatePlusUploadButton({
|
||||
onClose,
|
||||
onUploadFrame,
|
||||
containerRef,
|
||||
fullscreen,
|
||||
}: FrigatePlusUploadButtonProps) {
|
||||
const { t } = useTranslation(["components/player"]);
|
||||
|
||||
@ -349,7 +352,11 @@ function FrigatePlusUploadButton({
|
||||
/>
|
||||
</AlertDialogTrigger>
|
||||
<AlertDialogContent
|
||||
portalProps={{ container: containerRef?.current }}
|
||||
portalProps={
|
||||
fullscreen && containerRef?.current
|
||||
? { container: containerRef.current }
|
||||
: undefined
|
||||
}
|
||||
className="md:max-w-2xl lg:max-w-3xl xl:max-w-4xl"
|
||||
>
|
||||
<AlertDialogHeader>
|
||||
|
||||
@ -174,9 +174,7 @@ export default function CameraWizardDialog({
|
||||
...(friendlyName && { friendly_name: friendlyName }),
|
||||
ffmpeg: {
|
||||
inputs: wizardData.streams.map((stream, index) => {
|
||||
const isRestreamed =
|
||||
wizardData.restreamIds?.includes(stream.id) ?? false;
|
||||
if (isRestreamed) {
|
||||
if (stream.restream) {
|
||||
const go2rtcStreamName =
|
||||
wizardData.streams!.length === 1
|
||||
? finalCameraName
|
||||
@ -234,7 +232,11 @@ export default function CameraWizardDialog({
|
||||
wizardData.streams!.length === 1
|
||||
? finalCameraName
|
||||
: `${finalCameraName}_${index + 1}`;
|
||||
go2rtcStreams[streamName] = [stream.url];
|
||||
|
||||
const streamUrl = stream.useFfmpeg
|
||||
? `ffmpeg:${stream.url}`
|
||||
: stream.url;
|
||||
go2rtcStreams[streamName] = [streamUrl];
|
||||
});
|
||||
|
||||
if (Object.keys(go2rtcStreams).length > 0) {
|
||||
|
||||
@ -385,7 +385,7 @@ export default function Step1NameCamera({
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
className="h-8"
|
||||
className="text-md h-8"
|
||||
placeholder={t(
|
||||
"cameraWizard.step1.cameraNamePlaceholder",
|
||||
)}
|
||||
@ -475,7 +475,7 @@ export default function Step1NameCamera({
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
className="h-8"
|
||||
className="text-md h-8"
|
||||
placeholder="192.168.1.100"
|
||||
{...field}
|
||||
/>
|
||||
@ -495,7 +495,7 @@ export default function Step1NameCamera({
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
className="h-8"
|
||||
className="text-md h-8"
|
||||
placeholder={t(
|
||||
"cameraWizard.step1.usernamePlaceholder",
|
||||
)}
|
||||
@ -518,7 +518,7 @@ export default function Step1NameCamera({
|
||||
<FormControl>
|
||||
<div className="relative">
|
||||
<Input
|
||||
className="h-8 pr-10"
|
||||
className="text-md h-8 pr-10"
|
||||
type={showPassword ? "text" : "password"}
|
||||
placeholder={t(
|
||||
"cameraWizard.step1.passwordPlaceholder",
|
||||
@ -558,7 +558,7 @@ export default function Step1NameCamera({
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
className="h-8"
|
||||
className="text-md h-8"
|
||||
placeholder="rtsp://username:password@host:port/path"
|
||||
{...field}
|
||||
/>
|
||||
@ -608,6 +608,12 @@ export default function Step1NameCamera({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{isTesting && (
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ActivityIndicator className="size-4" />
|
||||
{testStatus}
|
||||
</div>
|
||||
)}
|
||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||
<Button
|
||||
type="button"
|
||||
@ -635,10 +641,7 @@ export default function Step1NameCamera({
|
||||
variant="select"
|
||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||
>
|
||||
{isTesting && <ActivityIndicator className="size-4" />}
|
||||
{isTesting && testStatus
|
||||
? testStatus
|
||||
: t("cameraWizard.step1.testConnection")}
|
||||
{t("cameraWizard.step1.testConnection")}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@ -201,16 +201,12 @@ export default function Step2StreamConfig({
|
||||
|
||||
const setRestream = useCallback(
|
||||
(streamId: string) => {
|
||||
const currentIds = wizardData.restreamIds || [];
|
||||
const isSelected = currentIds.includes(streamId);
|
||||
const newIds = isSelected
|
||||
? currentIds.filter((id) => id !== streamId)
|
||||
: [...currentIds, streamId];
|
||||
onUpdate({
|
||||
restreamIds: newIds,
|
||||
});
|
||||
const stream = streams.find((s) => s.id === streamId);
|
||||
if (!stream) return;
|
||||
|
||||
updateStream(streamId, { restream: !stream.restream });
|
||||
},
|
||||
[wizardData.restreamIds, onUpdate],
|
||||
[streams, updateStream],
|
||||
);
|
||||
|
||||
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
|
||||
@ -435,9 +431,7 @@ export default function Step2StreamConfig({
|
||||
{t("cameraWizard.step2.go2rtc")}
|
||||
</span>
|
||||
<Switch
|
||||
checked={(wizardData.restreamIds || []).includes(
|
||||
stream.id,
|
||||
)}
|
||||
checked={stream.restream || false}
|
||||
onCheckedChange={() => setRestream(stream.id)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@ -1,7 +1,13 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import { Switch } from "@/components/ui/switch";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { LuRotateCcw } from "react-icons/lu";
|
||||
import { LuRotateCcw, LuInfo } from "react-icons/lu";
|
||||
import { useState, useCallback, useMemo, useEffect } from "react";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import axios from "axios";
|
||||
@ -216,7 +222,6 @@ export default function Step3Validation({
|
||||
brandTemplate: wizardData.brandTemplate,
|
||||
customUrl: wizardData.customUrl,
|
||||
streams: wizardData.streams,
|
||||
restreamIds: wizardData.restreamIds,
|
||||
};
|
||||
|
||||
onSave(configData);
|
||||
@ -322,6 +327,51 @@ export default function Step3Validation({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{result?.success && (
|
||||
<div className="mb-3 flex items-center justify-between">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm">
|
||||
{t("cameraWizard.step3.ffmpegModule")}
|
||||
</span>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="h-4 w-4 p-0"
|
||||
>
|
||||
<LuInfo className="size-3" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="pointer-events-auto w-80 text-xs">
|
||||
<div className="space-y-2">
|
||||
<div className="font-medium">
|
||||
{t("cameraWizard.step3.ffmpegModule")}
|
||||
</div>
|
||||
<div className="text-muted-foreground">
|
||||
{t(
|
||||
"cameraWizard.step3.ffmpegModuleDescription",
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
</div>
|
||||
<Switch
|
||||
checked={stream.useFfmpeg || false}
|
||||
onCheckedChange={(checked) => {
|
||||
onUpdate({
|
||||
streams: streams.map((s) =>
|
||||
s.id === stream.id
|
||||
? { ...s, useFfmpeg: checked }
|
||||
: s,
|
||||
),
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
|
||||
<span className="break-all text-sm text-muted-foreground">
|
||||
{stream.url}
|
||||
@ -491,8 +541,7 @@ function StreamIssues({
|
||||
|
||||
// Restreaming check
|
||||
if (stream.roles.includes("record")) {
|
||||
const restreamIds = wizardData.restreamIds || [];
|
||||
if (restreamIds.includes(stream.id)) {
|
||||
if (stream.restream) {
|
||||
result.push({
|
||||
type: "warning",
|
||||
message: t("cameraWizard.step3.issues.restreamingWarning"),
|
||||
@ -660,9 +709,10 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
||||
|
||||
useEffect(() => {
|
||||
// Register stream with go2rtc
|
||||
const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url;
|
||||
axios
|
||||
.put(`go2rtc/streams/${streamId}`, null, {
|
||||
params: { src: stream.url },
|
||||
params: { src: streamUrl },
|
||||
})
|
||||
.then(() => {
|
||||
// Add small delay to allow go2rtc api to run and initialize the stream
|
||||
@ -680,7 +730,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
||||
// do nothing on cleanup errors - go2rtc won't consume the streams
|
||||
});
|
||||
};
|
||||
}, [stream.url, streamId]);
|
||||
}, [stream.url, stream.useFfmpeg, streamId]);
|
||||
|
||||
const resolution = stream.testResult?.resolution;
|
||||
let aspectRatio = "16/9";
|
||||
|
||||
@ -368,7 +368,11 @@ function ReviewGroup({
|
||||
return (
|
||||
<div
|
||||
data-review-id={id}
|
||||
className="cursor-pointer rounded-lg bg-secondary py-3"
|
||||
className={`mx-1 cursor-pointer rounded-lg bg-secondary px-0 py-3 outline outline-[2px] -outline-offset-[1.8px] ${
|
||||
isActive
|
||||
? "shadow-selected outline-selected"
|
||||
: "outline-transparent duration-500"
|
||||
}`}
|
||||
>
|
||||
<div
|
||||
className={cn(
|
||||
@ -383,10 +387,10 @@ function ReviewGroup({
|
||||
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
|
||||
<LuCircle
|
||||
className={cn(
|
||||
"size-3",
|
||||
isActive
|
||||
? "fill-selected text-selected"
|
||||
: "fill-muted duration-500 dark:fill-secondary-highlight dark:text-secondary-highlight",
|
||||
"size-3 duration-500",
|
||||
review.severity == "alert"
|
||||
? "fill-severity_alert text-severity_alert"
|
||||
: "fill-severity_detection text-severity_detection",
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
@ -455,6 +459,7 @@ function ReviewGroup({
|
||||
<EventList
|
||||
key={event.id}
|
||||
event={event}
|
||||
review={review}
|
||||
effectiveTime={effectiveTime}
|
||||
annotationOffset={annotationOffset}
|
||||
onSeek={onSeek}
|
||||
@ -489,6 +494,7 @@ function ReviewGroup({
|
||||
|
||||
type EventListProps = {
|
||||
event: Event;
|
||||
review: ReviewSegment;
|
||||
effectiveTime?: number;
|
||||
annotationOffset: number;
|
||||
onSeek: (ts: number, play?: boolean) => void;
|
||||
@ -496,6 +502,7 @@ type EventListProps = {
|
||||
};
|
||||
function EventList({
|
||||
event,
|
||||
review,
|
||||
effectiveTime,
|
||||
annotationOffset,
|
||||
onSeek,
|
||||
@ -614,6 +621,7 @@ function EventList({
|
||||
|
||||
<div className="mt-2">
|
||||
<ObjectTimeline
|
||||
review={review}
|
||||
eventId={event.id}
|
||||
onSeek={handleTimelineClick}
|
||||
effectiveTime={effectiveTime}
|
||||
@ -772,6 +780,7 @@ function LifecycleItem({
|
||||
|
||||
// Fetch and render timeline entries for a single event id on demand.
|
||||
function ObjectTimeline({
|
||||
review,
|
||||
eventId,
|
||||
onSeek,
|
||||
effectiveTime,
|
||||
@ -779,6 +788,7 @@ function ObjectTimeline({
|
||||
startTime,
|
||||
endTime,
|
||||
}: {
|
||||
review: ReviewSegment;
|
||||
eventId: string;
|
||||
onSeek: (ts: number, play?: boolean) => void;
|
||||
effectiveTime?: number;
|
||||
@ -787,13 +797,27 @@ function ObjectTimeline({
|
||||
endTime?: number;
|
||||
}) {
|
||||
const { t } = useTranslation("views/events");
|
||||
const { data: timeline, isValidating } = useSWR<TrackingDetailsSequence[]>([
|
||||
const { data: fullTimeline, isValidating } = useSWR<
|
||||
TrackingDetailsSequence[]
|
||||
>([
|
||||
"timeline",
|
||||
{
|
||||
source_id: eventId,
|
||||
},
|
||||
]);
|
||||
|
||||
const timeline = useMemo(() => {
|
||||
if (!fullTimeline) {
|
||||
return fullTimeline;
|
||||
}
|
||||
|
||||
return fullTimeline.filter(
|
||||
(t) =>
|
||||
t.timestamp >= review.start_time &&
|
||||
(review.end_time == undefined || t.timestamp <= review.end_time),
|
||||
);
|
||||
}, [fullTimeline, review]);
|
||||
|
||||
if (isValidating && (!timeline || timeline.length === 0)) {
|
||||
return <ActivityIndicator className="ml-2 size-3" />;
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ export default function Step1NameAndType({
|
||||
|
||||
const form = useForm<z.infer<typeof formSchema>>({
|
||||
resolver: zodResolver(formSchema),
|
||||
mode: "onChange",
|
||||
mode: "onBlur",
|
||||
defaultValues: {
|
||||
enabled: true,
|
||||
name: initialData?.name ?? trigger?.name ?? "",
|
||||
|
||||
@ -845,6 +845,7 @@ function FaceAttemptGroup({
|
||||
selectedItems={selectedFaces}
|
||||
i18nLibrary="views/faceLibrary"
|
||||
objectType="person"
|
||||
noClassificationLabel="details.unknown"
|
||||
onClick={(data) => {
|
||||
if (data) {
|
||||
onClickFaces([data.filename], true);
|
||||
|
||||
@ -157,9 +157,11 @@ function MobileMenuItem({
|
||||
const { t } = useTranslation(["views/settings"]);
|
||||
|
||||
return (
|
||||
<Button
|
||||
variant="ghost"
|
||||
className={cn("w-full justify-between pr-2", className)}
|
||||
<div
|
||||
className={cn(
|
||||
"inline-flex h-10 w-full cursor-pointer items-center justify-between whitespace-nowrap rounded-md px-4 py-2 pr-2 text-sm font-medium text-primary-variant disabled:pointer-events-none disabled:opacity-50",
|
||||
className,
|
||||
)}
|
||||
onClick={() => {
|
||||
onSelect(item.key);
|
||||
onClose?.();
|
||||
@ -167,7 +169,7 @@ function MobileMenuItem({
|
||||
>
|
||||
<div className="smart-capitalize">{t("menu." + item.key)}</div>
|
||||
<LuChevronRight className="size-4" />
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -273,6 +275,9 @@ export default function Settings() {
|
||||
} else {
|
||||
setPageToggle(page as SettingsType);
|
||||
}
|
||||
if (isMobile) {
|
||||
setContentMobileOpen(true);
|
||||
}
|
||||
}
|
||||
// don't clear url params if we're creating a new object mask
|
||||
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
||||
@ -282,6 +287,9 @@ export default function Settings() {
|
||||
const cameraNames = cameras.map((c) => c.name);
|
||||
if (cameraNames.includes(camera)) {
|
||||
setSelectedCamera(camera);
|
||||
if (isMobile) {
|
||||
setContentMobileOpen(true);
|
||||
}
|
||||
}
|
||||
// don't clear url params if we're creating a new object mask or trigger
|
||||
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
||||
|
||||
@ -85,6 +85,8 @@ export type StreamConfig = {
|
||||
quality?: string;
|
||||
testResult?: TestResult;
|
||||
userTested?: boolean;
|
||||
useFfmpeg?: boolean;
|
||||
restream?: boolean;
|
||||
};
|
||||
|
||||
export type TestResult = {
|
||||
@ -105,7 +107,6 @@ export type WizardFormData = {
|
||||
brandTemplate?: CameraBrand;
|
||||
customUrl?: string;
|
||||
streams?: StreamConfig[];
|
||||
restreamIds?: string[];
|
||||
};
|
||||
|
||||
// API Response Types
|
||||
@ -146,6 +147,7 @@ export type CameraConfigData = {
|
||||
inputs: {
|
||||
path: string;
|
||||
roles: string[];
|
||||
input_args?: string;
|
||||
}[];
|
||||
};
|
||||
live?: {
|
||||
|
||||
@ -307,6 +307,7 @@ export type CustomClassificationModelConfig = {
|
||||
threshold: number;
|
||||
object_config?: {
|
||||
objects: string[];
|
||||
classification_type: string;
|
||||
};
|
||||
state_config?: {
|
||||
cameras: {
|
||||
|
||||
@ -43,5 +43,5 @@ export function generateFixedHash(name: string, prefix: string = "id"): string {
|
||||
* @returns True if the name is valid, false otherwise
|
||||
*/
|
||||
export function isValidId(name: string): boolean {
|
||||
return /^[a-zA-Z0-9_-]+$/.test(name);
|
||||
return /^[a-zA-Z0-9_-]+$/.test(name) && !/^\d+$/.test(name);
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
|
||||
import ClassificationModelEditDialog from "@/components/classification/ClassificationModelEditDialog";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
|
||||
import { Button, buttonVariants } from "@/components/ui/button";
|
||||
@ -10,18 +11,17 @@ import {
|
||||
CustomClassificationModelConfig,
|
||||
FrigateConfig,
|
||||
} from "@/types/frigateConfig";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { FaFolderPlus } from "react-icons/fa";
|
||||
import { MdModelTraining } from "react-icons/md";
|
||||
import { LuTrash2 } from "react-icons/lu";
|
||||
import { LuPencil, LuTrash2 } from "react-icons/lu";
|
||||
import { FiMoreVertical } from "react-icons/fi";
|
||||
import useSWR from "swr";
|
||||
import Heading from "@/components/ui/heading";
|
||||
import { useOverlayState } from "@/hooks/use-overlay-state";
|
||||
import axios from "axios";
|
||||
import { toast } from "sonner";
|
||||
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
@ -164,6 +164,7 @@ export default function ModelSelectionView({
|
||||
key={config.name}
|
||||
config={config}
|
||||
onClick={() => onClick(config)}
|
||||
onUpdate={() => refreshConfig()}
|
||||
onDelete={() => refreshConfig()}
|
||||
/>
|
||||
))}
|
||||
@ -202,9 +203,10 @@ function NoModelsView({
|
||||
type ModelCardProps = {
|
||||
config: CustomClassificationModelConfig;
|
||||
onClick: () => void;
|
||||
onUpdate: () => void;
|
||||
onDelete: () => void;
|
||||
};
|
||||
function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||
function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
||||
const { t } = useTranslation(["views/classificationModel"]);
|
||||
|
||||
const { data: dataset } = useSWR<{
|
||||
@ -212,42 +214,50 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
|
||||
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
||||
const bypassDialogRef = useRef(false);
|
||||
|
||||
useKeyboardListener(["Shift"], (_, modifiers) => {
|
||||
bypassDialogRef.current = modifiers.shift;
|
||||
return false;
|
||||
});
|
||||
const [editDialogOpen, setEditDialogOpen] = useState(false);
|
||||
|
||||
const handleDelete = useCallback(async () => {
|
||||
await axios
|
||||
.delete(`classification/${config.name}`)
|
||||
.then((resp) => {
|
||||
if (resp.status == 200) {
|
||||
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
||||
position: "top-center",
|
||||
});
|
||||
onDelete();
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
||||
position: "top-center",
|
||||
});
|
||||
try {
|
||||
await axios.delete(`classification/${config.name}`);
|
||||
await axios.put("/config/set", {
|
||||
requires_restart: 0,
|
||||
update_topic: `config/classification/custom/${config.name}`,
|
||||
config_data: {
|
||||
classification: {
|
||||
custom: {
|
||||
[config.name]: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
||||
position: "top-center",
|
||||
});
|
||||
onDelete();
|
||||
} catch (err) {
|
||||
const error = err as {
|
||||
response?: { data?: { message?: string; detail?: string } };
|
||||
};
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
||||
position: "top-center",
|
||||
});
|
||||
}
|
||||
}, [config, onDelete, t]);
|
||||
|
||||
const handleDeleteClick = useCallback(() => {
|
||||
if (bypassDialogRef.current) {
|
||||
handleDelete();
|
||||
} else {
|
||||
setDeleteDialogOpen(true);
|
||||
}
|
||||
}, [handleDelete]);
|
||||
const handleDeleteClick = useCallback((e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setDeleteDialogOpen(true);
|
||||
}, []);
|
||||
|
||||
const handleEditClick = useCallback((e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setEditDialogOpen(true);
|
||||
}, []);
|
||||
|
||||
const coverImage = useMemo(() => {
|
||||
if (!dataset) {
|
||||
@ -269,6 +279,13 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||
|
||||
return (
|
||||
<>
|
||||
<ClassificationModelEditDialog
|
||||
open={editDialogOpen}
|
||||
model={config}
|
||||
onClose={() => setEditDialogOpen(false)}
|
||||
onSuccess={() => onUpdate()}
|
||||
/>
|
||||
|
||||
<AlertDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
|
||||
@ -304,7 +321,7 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||
className="size-full"
|
||||
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
||||
/>
|
||||
<ImageShadowOverlay />
|
||||
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
|
||||
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
|
||||
{config.name}
|
||||
</div>
|
||||
@ -315,14 +332,17 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||
<FiMoreVertical className="size-5 text-white" />
|
||||
</BlurredIconButton>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
<DropdownMenuContent
|
||||
align="end"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<DropdownMenuItem onClick={handleEditClick}>
|
||||
<LuPencil className="mr-2 size-4" />
|
||||
<span>{t("button.edit", { ns: "common" })}</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem onClick={handleDeleteClick}>
|
||||
<LuTrash2 className="mr-2 size-4" />
|
||||
<span>
|
||||
{bypassDialogRef.current
|
||||
? t("button.deleteNow", { ns: "common" })
|
||||
: t("button.delete", { ns: "common" })}
|
||||
</span>
|
||||
<span>{t("button.delete", { ns: "common" })}</span>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
|
||||
@ -327,31 +327,39 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
||||
</AlertDialog>
|
||||
|
||||
<div className="flex flex-row justify-between gap-2 p-2 align-middle">
|
||||
<div className="flex flex-row items-center justify-center gap-2">
|
||||
<Button
|
||||
className="flex items-center gap-2.5 rounded-lg"
|
||||
aria-label={t("label.back", { ns: "common" })}
|
||||
onClick={() => navigate(-1)}
|
||||
>
|
||||
<IoMdArrowRoundBack className="size-5 text-secondary-foreground" />
|
||||
{isDesktop && (
|
||||
<div className="text-primary">
|
||||
{t("button.back", { ns: "common" })}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
<LibrarySelector
|
||||
pageToggle={pageToggle}
|
||||
dataset={dataset || {}}
|
||||
trainImages={trainImages || []}
|
||||
setPageToggle={setPageToggle}
|
||||
onDelete={onDelete}
|
||||
onRename={() => {}}
|
||||
/>
|
||||
</div>
|
||||
{(isDesktop || !selectedImages?.length) && (
|
||||
<div className="flex flex-row items-center justify-center gap-2">
|
||||
<Button
|
||||
className="flex items-center gap-2.5 rounded-lg"
|
||||
aria-label={t("label.back", { ns: "common" })}
|
||||
onClick={() => navigate(-1)}
|
||||
>
|
||||
<IoMdArrowRoundBack className="size-5 text-secondary-foreground" />
|
||||
{isDesktop && (
|
||||
<div className="text-primary">
|
||||
{t("button.back", { ns: "common" })}
|
||||
</div>
|
||||
)}
|
||||
</Button>
|
||||
|
||||
<LibrarySelector
|
||||
pageToggle={pageToggle}
|
||||
dataset={dataset || {}}
|
||||
trainImages={trainImages || []}
|
||||
setPageToggle={setPageToggle}
|
||||
onDelete={onDelete}
|
||||
onRename={() => {}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{selectedImages?.length > 0 ? (
|
||||
<div className="flex items-center justify-center gap-2">
|
||||
<div className="mx-1 flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||
<div
|
||||
className={cn(
|
||||
"flex w-full items-center justify-end gap-2",
|
||||
isMobileOnly && "justify-between",
|
||||
)}
|
||||
>
|
||||
<div className="flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||
<div className="p-1">{`${selectedImages.length} selected`}</div>
|
||||
<div className="p-1">{"|"}</div>
|
||||
<div
|
||||
@ -961,6 +969,7 @@ function ObjectTrainGrid({
|
||||
selectedItems={selectedImages}
|
||||
i18nLibrary="views/classificationModel"
|
||||
objectType={model.object_config?.objects?.at(0) ?? "Object"}
|
||||
noClassificationLabel="details.none"
|
||||
onClick={(data) => {
|
||||
if (data) {
|
||||
onClickImages([data.filename], true);
|
||||
|
||||
@ -136,7 +136,7 @@ export default function EventView({
|
||||
|
||||
const [selectedReviews, setSelectedReviews] = useState<ReviewSegment[]>([]);
|
||||
const onSelectReview = useCallback(
|
||||
(review: ReviewSegment, ctrl: boolean) => {
|
||||
(review: ReviewSegment, ctrl: boolean, detail: boolean) => {
|
||||
if (selectedReviews.length > 0 || ctrl) {
|
||||
const index = selectedReviews.findIndex((r) => r.id === review.id);
|
||||
|
||||
@ -156,17 +156,31 @@ export default function EventView({
|
||||
setSelectedReviews(copy);
|
||||
}
|
||||
} else {
|
||||
// If a specific date is selected in the calendar and it's after the event start,
|
||||
// use the selected date instead of the event start time
|
||||
const effectiveStartTime =
|
||||
timeRange.after > review.start_time
|
||||
? timeRange.after
|
||||
: review.start_time;
|
||||
|
||||
onOpenRecording({
|
||||
camera: review.camera,
|
||||
startTime: review.start_time - REVIEW_PADDING,
|
||||
startTime: effectiveStartTime - REVIEW_PADDING,
|
||||
severity: review.severity,
|
||||
timelineType: detail ? "detail" : undefined,
|
||||
});
|
||||
|
||||
review.has_been_reviewed = true;
|
||||
markItemAsReviewed(review);
|
||||
}
|
||||
},
|
||||
[selectedReviews, setSelectedReviews, onOpenRecording, markItemAsReviewed],
|
||||
[
|
||||
selectedReviews,
|
||||
setSelectedReviews,
|
||||
onOpenRecording,
|
||||
markItemAsReviewed,
|
||||
timeRange.after,
|
||||
],
|
||||
);
|
||||
const onSelectAllReviews = useCallback(() => {
|
||||
if (!currentReviewItems || currentReviewItems.length == 0) {
|
||||
@ -402,7 +416,6 @@ export default function EventView({
|
||||
onSelectAllReviews={onSelectAllReviews}
|
||||
setSelectedReviews={setSelectedReviews}
|
||||
pullLatestData={pullLatestData}
|
||||
onOpenRecording={onOpenRecording}
|
||||
/>
|
||||
)}
|
||||
{severity == "significant_motion" && (
|
||||
@ -442,11 +455,14 @@ type DetectionReviewProps = {
|
||||
loading: boolean;
|
||||
markItemAsReviewed: (review: ReviewSegment) => void;
|
||||
markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void;
|
||||
onSelectReview: (review: ReviewSegment, ctrl: boolean) => void;
|
||||
onSelectReview: (
|
||||
review: ReviewSegment,
|
||||
ctrl: boolean,
|
||||
detail: boolean,
|
||||
) => void;
|
||||
onSelectAllReviews: () => void;
|
||||
setSelectedReviews: (reviews: ReviewSegment[]) => void;
|
||||
pullLatestData: () => void;
|
||||
onOpenRecording: (recordingInfo: RecordingStartingPoint) => void;
|
||||
};
|
||||
function DetectionReview({
|
||||
contentRef,
|
||||
@ -466,7 +482,6 @@ function DetectionReview({
|
||||
onSelectAllReviews,
|
||||
setSelectedReviews,
|
||||
pullLatestData,
|
||||
onOpenRecording,
|
||||
}: DetectionReviewProps) {
|
||||
const { t } = useTranslation(["views/events"]);
|
||||
|
||||
@ -758,16 +773,7 @@ function DetectionReview({
|
||||
ctrl: boolean,
|
||||
detail: boolean,
|
||||
) => {
|
||||
if (detail) {
|
||||
onOpenRecording({
|
||||
camera: review.camera,
|
||||
startTime: review.start_time - REVIEW_PADDING,
|
||||
severity: review.severity,
|
||||
timelineType: "detail",
|
||||
});
|
||||
} else {
|
||||
onSelectReview(review, ctrl);
|
||||
}
|
||||
onSelectReview(review, ctrl, detail);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@ -970,7 +970,6 @@ function Timeline({
|
||||
"relative overflow-hidden",
|
||||
isDesktop
|
||||
? cn(
|
||||
"no-scrollbar overflow-y-auto",
|
||||
timelineType == "timeline"
|
||||
? "w-[100px] flex-shrink-0"
|
||||
: timelineType == "detail"
|
||||
|
||||
@ -709,11 +709,11 @@ export default function CameraSettingsView({
|
||||
<div className="flex w-full flex-row items-center gap-2 pt-2 md:w-[25%]">
|
||||
<Button
|
||||
className="flex flex-1"
|
||||
aria-label={t("button.cancel", { ns: "common" })}
|
||||
aria-label={t("button.reset", { ns: "common" })}
|
||||
onClick={onCancel}
|
||||
type="button"
|
||||
>
|
||||
<Trans>button.cancel</Trans>
|
||||
<Trans>button.reset</Trans>
|
||||
</Button>
|
||||
<Button
|
||||
variant="select"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user