mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-08 14:25:41 +03:00
Merge branch 'dev' into dev-zones-friendly-name
This commit is contained in:
commit
90b7086b1e
@ -5,6 +5,12 @@ set -euxo pipefail
|
|||||||
SQLITE3_VERSION="3.46.1"
|
SQLITE3_VERSION="3.46.1"
|
||||||
PYSQLITE3_VERSION="0.5.3"
|
PYSQLITE3_VERSION="0.5.3"
|
||||||
|
|
||||||
|
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
|
||||||
|
if ! dpkg -l | grep -q libsqlite3-dev; then
|
||||||
|
echo "Installing libsqlite3-dev for compilation..."
|
||||||
|
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
|
||||||
# Fetch the pre-built sqlite amalgamation instead of building from source
|
# Fetch the pre-built sqlite amalgamation instead of building from source
|
||||||
if [[ ! -d "sqlite" ]]; then
|
if [[ ! -d "sqlite" ]]; then
|
||||||
mkdir sqlite
|
mkdir sqlite
|
||||||
|
|||||||
@ -2,9 +2,9 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Download the MxAccl for Frigate github release
|
# Download the MxAccl for Frigate github release
|
||||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
|
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
|
||||||
unzip /tmp/mxaccl.zip -d /tmp
|
unzip /tmp/mxaccl.zip -d /tmp
|
||||||
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
|
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
|
||||||
rm /tmp/mxaccl.zip
|
rm /tmp/mxaccl.zip
|
||||||
|
|
||||||
# Install Python dependencies
|
# Install Python dependencies
|
||||||
|
|||||||
@ -56,7 +56,7 @@ pywebpush == 2.0.*
|
|||||||
# alpr
|
# alpr
|
||||||
pyclipper == 1.3.*
|
pyclipper == 1.3.*
|
||||||
shapely == 2.0.*
|
shapely == 2.0.*
|
||||||
Levenshtein==0.26.*
|
rapidfuzz==3.12.*
|
||||||
# HailoRT Wheels
|
# HailoRT Wheels
|
||||||
appdirs==1.4.*
|
appdirs==1.4.*
|
||||||
argcomplete==2.0.*
|
argcomplete==2.0.*
|
||||||
|
|||||||
@ -24,10 +24,13 @@ echo "Adding MemryX GPG key and repository..."
|
|||||||
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
||||||
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
||||||
|
|
||||||
# Update and install memx-drivers
|
# Update and install specific SDK 2.1 packages
|
||||||
echo "Installing memx-drivers..."
|
echo "Installing MemryX SDK 2.1 packages..."
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y memx-drivers
|
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
|
||||||
|
|
||||||
|
# Hold packages to prevent automatic upgrades
|
||||||
|
sudo apt-mark hold memx-drivers memx-accl mxa-manager
|
||||||
|
|
||||||
# ARM-specific board setup
|
# ARM-specific board setup
|
||||||
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
||||||
@ -37,11 +40,5 @@ fi
|
|||||||
|
|
||||||
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
||||||
|
|
||||||
# Install other runtime packages
|
echo "MemryX SDK 2.1 installation complete!"
|
||||||
packages=("memx-accl" "mxa-manager")
|
|
||||||
for pkg in "${packages[@]}"; do
|
|
||||||
echo "Installing $pkg..."
|
|
||||||
sudo apt install -y "$pkg"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "MemryX installation complete!"
|
|
||||||
|
|||||||
@ -112,7 +112,7 @@ RUN apt-get update \
|
|||||||
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
||||||
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
|
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
|
||||||
|
|
||||||
FROM wget AS jetson-ffmpeg
|
FROM wget AS jetson-ffmpeg
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
|||||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
||||||
pip3 uninstall -y onnxruntime \
|
pip3 uninstall -y onnxruntime \
|
||||||
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
|
&& pip3 install -U /deps/trt-wheels/*.whl \
|
||||||
|
&& pip3 install -U /deps/trt-model-wheels/*.whl \
|
||||||
&& ldconfig
|
&& ldconfig
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
|
|||||||
@ -1 +1,2 @@
|
|||||||
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
||||||
|
numpy == 1.26.*; platform_machine == 'aarch64'
|
||||||
|
|||||||
@ -37,7 +37,6 @@ from frigate.stats.prometheus import get_metrics, update_metrics
|
|||||||
from frigate.util.builtin import (
|
from frigate.util.builtin import (
|
||||||
clean_camera_user_pass,
|
clean_camera_user_pass,
|
||||||
flatten_config_data,
|
flatten_config_data,
|
||||||
get_tz_modifiers,
|
|
||||||
process_config_query_string,
|
process_config_query_string,
|
||||||
update_yaml_file_bulk,
|
update_yaml_file_bulk,
|
||||||
)
|
)
|
||||||
@ -48,6 +47,7 @@ from frigate.util.services import (
|
|||||||
restart_frigate,
|
restart_frigate,
|
||||||
vainfo_hwaccel,
|
vainfo_hwaccel,
|
||||||
)
|
)
|
||||||
|
from frigate.util.time import get_tz_modifiers
|
||||||
from frigate.version import VERSION
|
from frigate.version import VERSION
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -403,9 +403,10 @@ def config_set(request: Request, body: AppConfigSetBody):
|
|||||||
settings,
|
settings,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Handle nested config updates (e.g., config/classification/custom/{name})
|
# Generic handling for global config updates
|
||||||
settings = config.get_nested_object(body.update_topic)
|
settings = config.get_nested_object(body.update_topic)
|
||||||
if settings:
|
|
||||||
|
# Publish None for removal, actual config for add/update
|
||||||
request.app.config_publisher.publisher.publish(
|
request.app.config_publisher.publisher.publish(
|
||||||
body.update_topic, settings
|
body.update_topic, settings
|
||||||
)
|
)
|
||||||
|
|||||||
@ -31,14 +31,14 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
|||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.config.camera import DetectConfig
|
from frigate.config.camera import DetectConfig
|
||||||
from frigate.const import CLIPS_DIR, FACE_DIR
|
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.util.classification import (
|
from frigate.util.classification import (
|
||||||
collect_object_classification_examples,
|
collect_object_classification_examples,
|
||||||
collect_state_classification_examples,
|
collect_state_classification_examples,
|
||||||
)
|
)
|
||||||
from frigate.util.path import get_event_snapshot
|
from frigate.util.file import get_event_snapshot
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -828,9 +828,13 @@ def delete_classification_model(request: Request, name: str):
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Delete the classification model's data directory
|
# Delete the classification model's data directory in clips
|
||||||
model_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
||||||
|
if os.path.exists(data_dir):
|
||||||
|
shutil.rmtree(data_dir)
|
||||||
|
|
||||||
|
# Delete the classification model's files in model_cache
|
||||||
|
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
|
||||||
if os.path.exists(model_dir):
|
if os.path.exists(model_dir):
|
||||||
shutil.rmtree(model_dir)
|
shutil.rmtree(model_dir)
|
||||||
|
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
@ -57,8 +58,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
|||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||||
from frigate.track.object_processing import TrackedObject
|
from frigate.track.object_processing import TrackedObject
|
||||||
from frigate.util.builtin import get_tz_modifiers
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
from frigate.util.time import get_dst_transitions, get_tz_modifiers
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -813,7 +814,6 @@ def events_summary(
|
|||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
):
|
):
|
||||||
tz_name = params.timezone
|
tz_name = params.timezone
|
||||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
|
|
||||||
has_clip = params.has_clip
|
has_clip = params.has_clip
|
||||||
has_snapshot = params.has_snapshot
|
has_snapshot = params.has_snapshot
|
||||||
|
|
||||||
@ -828,7 +828,33 @@ def events_summary(
|
|||||||
if len(clauses) == 0:
|
if len(clauses) == 0:
|
||||||
clauses.append((True))
|
clauses.append((True))
|
||||||
|
|
||||||
groups = (
|
time_range_query = (
|
||||||
|
Event.select(
|
||||||
|
fn.MIN(Event.start_time).alias("min_time"),
|
||||||
|
fn.MAX(Event.start_time).alias("max_time"),
|
||||||
|
)
|
||||||
|
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
|
||||||
|
.dicts()
|
||||||
|
.get()
|
||||||
|
)
|
||||||
|
|
||||||
|
min_time = time_range_query.get("min_time")
|
||||||
|
max_time = time_range_query.get("max_time")
|
||||||
|
|
||||||
|
if min_time is None or max_time is None:
|
||||||
|
return JSONResponse(content=[])
|
||||||
|
|
||||||
|
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
|
||||||
|
|
||||||
|
grouped: dict[tuple, dict] = {}
|
||||||
|
|
||||||
|
for period_start, period_end, period_offset in dst_periods:
|
||||||
|
hours_offset = int(period_offset / 60 / 60)
|
||||||
|
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||||
|
period_hour_modifier = f"{hours_offset} hour"
|
||||||
|
period_minute_modifier = f"{minutes_offset} minute"
|
||||||
|
|
||||||
|
period_groups = (
|
||||||
Event.select(
|
Event.select(
|
||||||
Event.camera,
|
Event.camera,
|
||||||
Event.label,
|
Event.label,
|
||||||
@ -837,24 +863,56 @@ def events_summary(
|
|||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d",
|
"%Y-%m-%d",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
Event.start_time,
|
||||||
|
"unixepoch",
|
||||||
|
period_hour_modifier,
|
||||||
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("day"),
|
).alias("day"),
|
||||||
Event.zones,
|
Event.zones,
|
||||||
fn.COUNT(Event.id).alias("count"),
|
fn.COUNT(Event.id).alias("count"),
|
||||||
)
|
)
|
||||||
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
|
.where(
|
||||||
|
reduce(operator.and_, clauses)
|
||||||
|
& (Event.camera << allowed_cameras)
|
||||||
|
& (Event.start_time >= period_start)
|
||||||
|
& (Event.start_time <= period_end)
|
||||||
|
)
|
||||||
.group_by(
|
.group_by(
|
||||||
Event.camera,
|
Event.camera,
|
||||||
Event.label,
|
Event.label,
|
||||||
Event.sub_label,
|
Event.sub_label,
|
||||||
Event.data,
|
Event.data,
|
||||||
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
|
(Event.start_time + period_offset).cast("int") / (3600 * 24),
|
||||||
Event.zones,
|
Event.zones,
|
||||||
)
|
)
|
||||||
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
return JSONResponse(content=[e for e in groups.dicts()])
|
for g in period_groups:
|
||||||
|
key = (
|
||||||
|
g.camera,
|
||||||
|
g.label,
|
||||||
|
g.sub_label,
|
||||||
|
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
|
||||||
|
g.day,
|
||||||
|
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if key in grouped:
|
||||||
|
grouped[key]["count"] += int(g.count or 0)
|
||||||
|
else:
|
||||||
|
grouped[key] = {
|
||||||
|
"camera": g.camera,
|
||||||
|
"label": g.label,
|
||||||
|
"sub_label": g.sub_label,
|
||||||
|
"data": g.data,
|
||||||
|
"day": g.day,
|
||||||
|
"zones": g.zones,
|
||||||
|
"count": int(g.count or 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
return JSONResponse(content=list(grouped.values()))
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
|
|||||||
@ -34,7 +34,7 @@ from frigate.record.export import (
|
|||||||
PlaybackSourceEnum,
|
PlaybackSourceEnum,
|
||||||
RecordingExporter,
|
RecordingExporter,
|
||||||
)
|
)
|
||||||
from frigate.util.builtin import is_current_hour
|
from frigate.util.time import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -44,9 +44,9 @@ from frigate.const import (
|
|||||||
)
|
)
|
||||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||||
from frigate.track.object_processing import TrackedObjectProcessor
|
from frigate.track.object_processing import TrackedObjectProcessor
|
||||||
from frigate.util.builtin import get_tz_modifiers
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
from frigate.util.image import get_image_from_recording
|
from frigate.util.image import get_image_from_recording
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
from frigate.util.time import get_dst_transitions
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -424,7 +424,6 @@ def all_recordings_summary(
|
|||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
):
|
):
|
||||||
"""Returns true/false by day indicating if recordings exist"""
|
"""Returns true/false by day indicating if recordings exist"""
|
||||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
|
||||||
|
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
if cameras != "all":
|
if cameras != "all":
|
||||||
@ -432,41 +431,70 @@ def all_recordings_summary(
|
|||||||
filtered = requested.intersection(allowed_cameras)
|
filtered = requested.intersection(allowed_cameras)
|
||||||
if not filtered:
|
if not filtered:
|
||||||
return JSONResponse(content={})
|
return JSONResponse(content={})
|
||||||
cameras = ",".join(filtered)
|
camera_list = list(filtered)
|
||||||
else:
|
else:
|
||||||
cameras = allowed_cameras
|
camera_list = allowed_cameras
|
||||||
|
|
||||||
query = (
|
time_range_query = (
|
||||||
|
Recordings.select(
|
||||||
|
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||||
|
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||||
|
)
|
||||||
|
.where(Recordings.camera << camera_list)
|
||||||
|
.dicts()
|
||||||
|
.get()
|
||||||
|
)
|
||||||
|
|
||||||
|
min_time = time_range_query.get("min_time")
|
||||||
|
max_time = time_range_query.get("max_time")
|
||||||
|
|
||||||
|
if min_time is None or max_time is None:
|
||||||
|
return JSONResponse(content={})
|
||||||
|
|
||||||
|
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||||
|
|
||||||
|
days: dict[str, bool] = {}
|
||||||
|
|
||||||
|
for period_start, period_end, period_offset in dst_periods:
|
||||||
|
hours_offset = int(period_offset / 60 / 60)
|
||||||
|
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||||
|
period_hour_modifier = f"{hours_offset} hour"
|
||||||
|
period_minute_modifier = f"{minutes_offset} minute"
|
||||||
|
|
||||||
|
period_query = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d",
|
"%Y-%m-%d",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Recordings.start_time + seconds_offset,
|
Recordings.start_time,
|
||||||
"unixepoch",
|
"unixepoch",
|
||||||
hour_modifier,
|
period_hour_modifier,
|
||||||
minute_modifier,
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("day")
|
).alias("day")
|
||||||
)
|
)
|
||||||
|
.where(
|
||||||
|
(Recordings.camera << camera_list)
|
||||||
|
& (Recordings.end_time >= period_start)
|
||||||
|
& (Recordings.start_time <= period_end)
|
||||||
|
)
|
||||||
.group_by(
|
.group_by(
|
||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d",
|
"%Y-%m-%d",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Recordings.start_time + seconds_offset,
|
Recordings.start_time,
|
||||||
"unixepoch",
|
"unixepoch",
|
||||||
hour_modifier,
|
period_hour_modifier,
|
||||||
minute_modifier,
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
.order_by(Recordings.start_time.desc())
|
.order_by(Recordings.start_time.desc())
|
||||||
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
if params.cameras != "all":
|
for g in period_query:
|
||||||
query = query.where(Recordings.camera << cameras.split(","))
|
days[g.day] = True
|
||||||
|
|
||||||
recording_days = query.namedtuples()
|
|
||||||
days = {day.day: True for day in recording_days}
|
|
||||||
|
|
||||||
return JSONResponse(content=days)
|
return JSONResponse(content=days)
|
||||||
|
|
||||||
@ -476,21 +504,54 @@ def all_recordings_summary(
|
|||||||
)
|
)
|
||||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||||
"""Returns hourly summary for recordings of given camera"""
|
"""Returns hourly summary for recordings of given camera"""
|
||||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
|
|
||||||
|
time_range_query = (
|
||||||
|
Recordings.select(
|
||||||
|
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||||
|
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||||
|
)
|
||||||
|
.where(Recordings.camera == camera_name)
|
||||||
|
.dicts()
|
||||||
|
.get()
|
||||||
|
)
|
||||||
|
|
||||||
|
min_time = time_range_query.get("min_time")
|
||||||
|
max_time = time_range_query.get("max_time")
|
||||||
|
|
||||||
|
days: dict[str, dict] = {}
|
||||||
|
|
||||||
|
if min_time is None or max_time is None:
|
||||||
|
return JSONResponse(content=list(days.values()))
|
||||||
|
|
||||||
|
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||||
|
|
||||||
|
for period_start, period_end, period_offset in dst_periods:
|
||||||
|
hours_offset = int(period_offset / 60 / 60)
|
||||||
|
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||||
|
period_hour_modifier = f"{hours_offset} hour"
|
||||||
|
period_minute_modifier = f"{minutes_offset} minute"
|
||||||
|
|
||||||
recording_groups = (
|
recording_groups = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d %H",
|
"%Y-%m-%d %H",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
|
Recordings.start_time,
|
||||||
|
"unixepoch",
|
||||||
|
period_hour_modifier,
|
||||||
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("hour"),
|
).alias("hour"),
|
||||||
fn.SUM(Recordings.duration).alias("duration"),
|
fn.SUM(Recordings.duration).alias("duration"),
|
||||||
fn.SUM(Recordings.motion).alias("motion"),
|
fn.SUM(Recordings.motion).alias("motion"),
|
||||||
fn.SUM(Recordings.objects).alias("objects"),
|
fn.SUM(Recordings.objects).alias("objects"),
|
||||||
)
|
)
|
||||||
.where(Recordings.camera == camera_name)
|
.where(
|
||||||
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
|
(Recordings.camera == camera_name)
|
||||||
|
& (Recordings.end_time >= period_start)
|
||||||
|
& (Recordings.start_time <= period_end)
|
||||||
|
)
|
||||||
|
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||||
.order_by(Recordings.start_time.desc())
|
.order_by(Recordings.start_time.desc())
|
||||||
.namedtuples()
|
.namedtuples()
|
||||||
)
|
)
|
||||||
@ -500,20 +561,24 @@ async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
|||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d %H",
|
"%Y-%m-%d %H",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
Event.start_time,
|
||||||
|
"unixepoch",
|
||||||
|
period_hour_modifier,
|
||||||
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("hour"),
|
).alias("hour"),
|
||||||
fn.COUNT(Event.id).alias("count"),
|
fn.COUNT(Event.id).alias("count"),
|
||||||
)
|
)
|
||||||
.where(Event.camera == camera_name, Event.has_clip)
|
.where(Event.camera == camera_name, Event.has_clip)
|
||||||
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
|
.where(
|
||||||
|
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||||
|
)
|
||||||
|
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||||
.namedtuples()
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
event_map = {g.hour: g.count for g in event_groups}
|
event_map = {g.hour: g.count for g in event_groups}
|
||||||
|
|
||||||
days = {}
|
|
||||||
|
|
||||||
for recording_group in recording_groups:
|
for recording_group in recording_groups:
|
||||||
parts = recording_group.hour.split()
|
parts = recording_group.hour.split()
|
||||||
hour = parts[1]
|
hour = parts[1]
|
||||||
@ -526,11 +591,16 @@ async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
|||||||
"objects": recording_group.objects,
|
"objects": recording_group.objects,
|
||||||
"duration": round(recording_group.duration),
|
"duration": round(recording_group.duration),
|
||||||
}
|
}
|
||||||
if day not in days:
|
if day in days:
|
||||||
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
|
# merge counts if already present (edge-case at DST boundary)
|
||||||
else:
|
days[day]["events"] += events_count or 0
|
||||||
days[day]["events"] += events_count
|
|
||||||
days[day]["hours"].append(hour_data)
|
days[day]["hours"].append(hour_data)
|
||||||
|
else:
|
||||||
|
days[day] = {
|
||||||
|
"events": events_count or 0,
|
||||||
|
"hours": [hour_data],
|
||||||
|
"day": day,
|
||||||
|
}
|
||||||
|
|
||||||
return JSONResponse(content=list(days.values()))
|
return JSONResponse(content=list(days.values()))
|
||||||
|
|
||||||
|
|||||||
@ -36,7 +36,7 @@ from frigate.config import FrigateConfig
|
|||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||||
from frigate.review.types import SeverityEnum
|
from frigate.review.types import SeverityEnum
|
||||||
from frigate.util.builtin import get_tz_modifiers
|
from frigate.util.time import get_dst_transitions
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -197,7 +197,6 @@ async def review_summary(
|
|||||||
|
|
||||||
user_id = current_user["username"]
|
user_id = current_user["username"]
|
||||||
|
|
||||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
|
||||||
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
||||||
|
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
@ -329,16 +328,57 @@ async def review_summary(
|
|||||||
)
|
)
|
||||||
clauses.append(reduce(operator.or_, label_clauses))
|
clauses.append(reduce(operator.or_, label_clauses))
|
||||||
|
|
||||||
|
# Find the time range of available data
|
||||||
|
time_range_query = (
|
||||||
|
ReviewSegment.select(
|
||||||
|
fn.MIN(ReviewSegment.start_time).alias("min_time"),
|
||||||
|
fn.MAX(ReviewSegment.start_time).alias("max_time"),
|
||||||
|
)
|
||||||
|
.where(reduce(operator.and_, clauses) if clauses else True)
|
||||||
|
.dicts()
|
||||||
|
.get()
|
||||||
|
)
|
||||||
|
|
||||||
|
min_time = time_range_query.get("min_time")
|
||||||
|
max_time = time_range_query.get("max_time")
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"last24Hours": last_24_query,
|
||||||
|
}
|
||||||
|
|
||||||
|
# If no data, return early
|
||||||
|
if min_time is None or max_time is None:
|
||||||
|
return JSONResponse(content=data)
|
||||||
|
|
||||||
|
# Get DST transition periods
|
||||||
|
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||||
|
|
||||||
day_in_seconds = 60 * 60 * 24
|
day_in_seconds = 60 * 60 * 24
|
||||||
last_month_query = (
|
|
||||||
|
# Query each DST period separately with the correct offset
|
||||||
|
for period_start, period_end, period_offset in dst_periods:
|
||||||
|
# Calculate hour/minute modifiers for this period
|
||||||
|
hours_offset = int(period_offset / 60 / 60)
|
||||||
|
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||||
|
period_hour_modifier = f"{hours_offset} hour"
|
||||||
|
period_minute_modifier = f"{minutes_offset} minute"
|
||||||
|
|
||||||
|
# Build clauses including time range for this period
|
||||||
|
period_clauses = clauses.copy()
|
||||||
|
period_clauses.append(
|
||||||
|
(ReviewSegment.start_time >= period_start)
|
||||||
|
& (ReviewSegment.start_time <= period_end)
|
||||||
|
)
|
||||||
|
|
||||||
|
period_query = (
|
||||||
ReviewSegment.select(
|
ReviewSegment.select(
|
||||||
fn.strftime(
|
fn.strftime(
|
||||||
"%Y-%m-%d",
|
"%Y-%m-%d",
|
||||||
fn.datetime(
|
fn.datetime(
|
||||||
ReviewSegment.start_time,
|
ReviewSegment.start_time,
|
||||||
"unixepoch",
|
"unixepoch",
|
||||||
hour_modifier,
|
period_hour_modifier,
|
||||||
minute_modifier,
|
period_minute_modifier,
|
||||||
),
|
),
|
||||||
).alias("day"),
|
).alias("day"),
|
||||||
fn.SUM(
|
fn.SUM(
|
||||||
@ -399,19 +439,24 @@ async def review_summary(
|
|||||||
& (UserReviewStatus.user_id == user_id)
|
& (UserReviewStatus.user_id == user_id)
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.where(reduce(operator.and_, clauses) if clauses else True)
|
.where(reduce(operator.and_, period_clauses))
|
||||||
.group_by(
|
.group_by(
|
||||||
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
|
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
|
||||||
)
|
)
|
||||||
.order_by(ReviewSegment.start_time.desc())
|
.order_by(ReviewSegment.start_time.desc())
|
||||||
)
|
)
|
||||||
|
|
||||||
data = {
|
# Merge results from this period
|
||||||
"last24Hours": last_24_query,
|
for e in period_query.dicts().iterator():
|
||||||
}
|
day_key = e["day"]
|
||||||
|
if day_key in data:
|
||||||
for e in last_month_query.dicts().iterator():
|
# Merge counts if day already exists (edge case at DST boundary)
|
||||||
data[e["day"]] = e
|
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
|
||||||
|
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
|
||||||
|
data[day_key]["total_alert"] += e["total_alert"] or 0
|
||||||
|
data[day_key]["total_detection"] += e["total_detection"] or 0
|
||||||
|
else:
|
||||||
|
data[day_key] = e
|
||||||
|
|
||||||
return JSONResponse(content=data)
|
return JSONResponse(content=data)
|
||||||
|
|
||||||
|
|||||||
@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
|
|||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from Levenshtein import distance, jaro_winkler
|
|
||||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||||
|
from rapidfuzz.distance import JaroWinkler, Levenshtein
|
||||||
from shapely.geometry import Polygon
|
from shapely.geometry import Polygon
|
||||||
|
|
||||||
from frigate.comms.event_metadata_updater import (
|
from frigate.comms.event_metadata_updater import (
|
||||||
@ -1123,7 +1123,9 @@ class LicensePlateProcessingMixin:
|
|||||||
for i, plate in enumerate(plates):
|
for i, plate in enumerate(plates):
|
||||||
merged = False
|
merged = False
|
||||||
for j, cluster in enumerate(clusters):
|
for j, cluster in enumerate(clusters):
|
||||||
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
|
sims = [
|
||||||
|
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
|
||||||
|
]
|
||||||
if len(sims) > 0:
|
if len(sims) > 0:
|
||||||
avg_sim = sum(sims) / len(sims)
|
avg_sim = sum(sims) / len(sims)
|
||||||
if avg_sim >= self.cluster_threshold:
|
if avg_sim >= self.cluster_threshold:
|
||||||
@ -1500,7 +1502,7 @@ class LicensePlateProcessingMixin:
|
|||||||
and current_time - data["last_seen"]
|
and current_time - data["last_seen"]
|
||||||
<= self.config.cameras[camera].lpr.expire_time
|
<= self.config.cameras[camera].lpr.expire_time
|
||||||
):
|
):
|
||||||
similarity = jaro_winkler(data["plate"], top_plate)
|
similarity = JaroWinkler.similarity(data["plate"], top_plate)
|
||||||
if similarity >= self.similarity_threshold:
|
if similarity >= self.similarity_threshold:
|
||||||
plate_id = existing_id
|
plate_id = existing_id
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@ -1580,7 +1582,8 @@ class LicensePlateProcessingMixin:
|
|||||||
for label, plates_list in self.lpr_config.known_plates.items()
|
for label, plates_list in self.lpr_config.known_plates.items()
|
||||||
if any(
|
if any(
|
||||||
re.match(f"^{plate}$", rep_plate)
|
re.match(f"^{plate}$", rep_plate)
|
||||||
or distance(plate, rep_plate) <= self.lpr_config.match_distance
|
or Levenshtein.distance(plate, rep_plate)
|
||||||
|
<= self.lpr_config.match_distance
|
||||||
for plate in plates_list
|
for plate in plates_list
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
|||||||
@ -20,8 +20,8 @@ from frigate.genai import GenAIClient
|
|||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||||
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
|
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from frigate.embeddings import Embeddings
|
from frigate.embeddings import Embeddings
|
||||||
|
|||||||
@ -22,7 +22,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
|||||||
from frigate.embeddings.util import ZScoreNormalization
|
from frigate.embeddings.util import ZScoreNormalization
|
||||||
from frigate.models import Event, Trigger
|
from frigate.models import Event, Trigger
|
||||||
from frigate.util.builtin import cosine_distance
|
from frigate.util.builtin import cosine_distance
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
|
|
||||||
from ..post.api import PostProcessorApi
|
from ..post.api import PostProcessorApi
|
||||||
from ..types import DataProcessorMetrics
|
from ..types import DataProcessorMetrics
|
||||||
|
|||||||
@ -466,6 +466,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
now,
|
now,
|
||||||
self.labelmap[best_id],
|
self.labelmap[best_id],
|
||||||
score,
|
score,
|
||||||
|
max_files=200,
|
||||||
)
|
)
|
||||||
|
|
||||||
if score < self.model_config.threshold:
|
if score < self.model_config.threshold:
|
||||||
@ -529,6 +530,7 @@ def write_classification_attempt(
|
|||||||
timestamp: float,
|
timestamp: float,
|
||||||
label: str,
|
label: str,
|
||||||
score: float,
|
score: float,
|
||||||
|
max_files: int = 100,
|
||||||
) -> None:
|
) -> None:
|
||||||
if "-" in label:
|
if "-" in label:
|
||||||
label = label.replace("-", "_")
|
label = label.replace("-", "_")
|
||||||
@ -544,5 +546,5 @@ def write_classification_attempt(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# delete oldest face image if maximum is reached
|
# delete oldest face image if maximum is reached
|
||||||
if len(files) > 100:
|
if len(files) > max_files:
|
||||||
os.unlink(os.path.join(folder, files[-1]))
|
os.unlink(os.path.join(folder, files[-1]))
|
||||||
|
|||||||
@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
camera = obj_data["camera"]
|
camera = obj_data["camera"]
|
||||||
|
|
||||||
if not self.config.cameras[camera].face_recognition.enabled:
|
if not self.config.cameras[camera].face_recognition.enabled:
|
||||||
|
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
|
||||||
return
|
return
|
||||||
|
|
||||||
start = datetime.datetime.now().timestamp()
|
start = datetime.datetime.now().timestamp()
|
||||||
@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
person_box = obj_data.get("box")
|
person_box = obj_data.get("box")
|
||||||
|
|
||||||
if not person_box:
|
if not person_box:
|
||||||
|
logger.debug(f"No person box available for {id}")
|
||||||
return
|
return
|
||||||
|
|
||||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||||
except Exception:
|
except Exception as e:
|
||||||
|
logger.debug(f"Failed to convert face frame color for {id}: {e}")
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# don't run for object without attributes
|
# don't run for object without attributes
|
||||||
@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
# no faces detected in this frame
|
# no faces detected in this frame
|
||||||
if not face:
|
if not face:
|
||||||
|
logger.debug(f"No face attributes found for {id}")
|
||||||
return
|
return
|
||||||
|
|
||||||
face_box = face.get("box")
|
face_box = face.get("box")
|
||||||
@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
res = self.recognizer.classify(face_frame)
|
res = self.recognizer.classify(face_frame)
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
|
logger.debug(f"Face recognizer returned no result for {id}")
|
||||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -330,6 +335,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
||||||
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
||||||
self.recognizer.clear()
|
self.recognizer.clear()
|
||||||
|
return {"success": True, "message": "Face classifier cleared."}
|
||||||
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
||||||
img = cv2.imdecode(
|
img = cv2.imdecode(
|
||||||
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
||||||
|
|||||||
@ -17,6 +17,7 @@ from frigate.detectors.detector_config import (
|
|||||||
BaseDetectorConfig,
|
BaseDetectorConfig,
|
||||||
ModelTypeEnum,
|
ModelTypeEnum,
|
||||||
)
|
)
|
||||||
|
from frigate.util.file import FileLock
|
||||||
from frigate.util.model import post_process_yolo
|
from frigate.util.model import post_process_yolo
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -177,29 +178,6 @@ class MemryXDetector(DetectionApi):
|
|||||||
logger.error(f"Failed to initialize MemryX model: {e}")
|
logger.error(f"Failed to initialize MemryX model: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _acquire_file_lock(self, lock_path: str, timeout: int = 60, poll: float = 0.2):
|
|
||||||
"""
|
|
||||||
Create an exclusive lock file. Blocks (with polling) until it can acquire,
|
|
||||||
or raises TimeoutError. Uses only stdlib (os.O_EXCL).
|
|
||||||
"""
|
|
||||||
start = time.time()
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
|
||||||
os.close(fd)
|
|
||||||
return
|
|
||||||
except FileExistsError:
|
|
||||||
if time.time() - start > timeout:
|
|
||||||
raise TimeoutError(f"Timeout waiting for lock: {lock_path}")
|
|
||||||
time.sleep(poll)
|
|
||||||
|
|
||||||
def _release_file_lock(self, lock_path: str):
|
|
||||||
"""Best-effort removal of the lock file."""
|
|
||||||
try:
|
|
||||||
os.remove(lock_path)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load_yolo_constants(self):
|
def load_yolo_constants(self):
|
||||||
base = f"{self.cache_dir}/{self.model_folder}"
|
base = f"{self.cache_dir}/{self.model_folder}"
|
||||||
# constants for yolov9 post-processing
|
# constants for yolov9 post-processing
|
||||||
@ -212,9 +190,9 @@ class MemryXDetector(DetectionApi):
|
|||||||
os.makedirs(self.cache_dir, exist_ok=True)
|
os.makedirs(self.cache_dir, exist_ok=True)
|
||||||
|
|
||||||
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
|
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
|
||||||
self._acquire_file_lock(lock_path)
|
lock = FileLock(lock_path, timeout=60)
|
||||||
|
|
||||||
try:
|
with lock:
|
||||||
# ---------- CASE 1: user provided a custom model path ----------
|
# ---------- CASE 1: user provided a custom model path ----------
|
||||||
if self.memx_model_path:
|
if self.memx_model_path:
|
||||||
if not self.memx_model_path.endswith(".zip"):
|
if not self.memx_model_path.endswith(".zip"):
|
||||||
@ -338,9 +316,6 @@ class MemryXDetector(DetectionApi):
|
|||||||
f"Failed to remove downloaded zip {zip_path}: {e}"
|
f"Failed to remove downloaded zip {zip_path}: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
|
||||||
self._release_file_lock(lock_path)
|
|
||||||
|
|
||||||
def send_input(self, connection_id, tensor_input: np.ndarray):
|
def send_input(self, connection_id, tensor_input: np.ndarray):
|
||||||
"""Pre-process (if needed) and send frame to MemryX input queue"""
|
"""Pre-process (if needed) and send frame to MemryX input queue"""
|
||||||
if tensor_input is None:
|
if tensor_input is None:
|
||||||
|
|||||||
@ -29,7 +29,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
|||||||
from frigate.models import Event, Trigger
|
from frigate.models import Event, Trigger
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
|
|
||||||
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
||||||
from .onnx.jina_v2_embedding import JinaV2Embedding
|
from .onnx.jina_v2_embedding import JinaV2Embedding
|
||||||
|
|||||||
@ -62,8 +62,8 @@ from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
|
|||||||
from frigate.genai import get_genai_client
|
from frigate.genai import get_genai_client
|
||||||
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
||||||
from frigate.util.builtin import serialize
|
from frigate.util.builtin import serialize
|
||||||
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
from frigate.util.image import SharedMemoryFrameManager
|
from frigate.util.image import SharedMemoryFrameManager
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
|
||||||
|
|
||||||
from .embeddings import Embeddings
|
from .embeddings import Embeddings
|
||||||
|
|
||||||
@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||||
|
|
||||||
if self.config.face_recognition.enabled:
|
if self.config.face_recognition.enabled:
|
||||||
|
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
|
||||||
self.realtime_processors.append(
|
self.realtime_processors.append(
|
||||||
FaceRealTimeProcessor(
|
FaceRealTimeProcessor(
|
||||||
self.config, self.requestor, self.event_metadata_publisher, metrics
|
self.config, self.requestor, self.event_metadata_publisher, metrics
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
logger.debug("FaceRealTimeProcessor initialized successfully")
|
||||||
|
|
||||||
if self.config.classification.bird.enabled:
|
if self.config.classification.bird.enabled:
|
||||||
self.realtime_processors.append(
|
self.realtime_processors.append(
|
||||||
@ -283,11 +285,32 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
logger.info("Exiting embeddings maintenance...")
|
logger.info("Exiting embeddings maintenance...")
|
||||||
|
|
||||||
def _check_classification_config_updates(self) -> None:
|
def _check_classification_config_updates(self) -> None:
|
||||||
"""Check for classification config updates and add new processors."""
|
"""Check for classification config updates and add/remove processors."""
|
||||||
topic, model_config = self.classification_config_subscriber.check_for_update()
|
topic, model_config = self.classification_config_subscriber.check_for_update()
|
||||||
|
|
||||||
if topic and model_config:
|
if topic:
|
||||||
model_name = topic.split("/")[-1]
|
model_name = topic.split("/")[-1]
|
||||||
|
|
||||||
|
if model_config is None:
|
||||||
|
self.realtime_processors = [
|
||||||
|
processor
|
||||||
|
for processor in self.realtime_processors
|
||||||
|
if not (
|
||||||
|
isinstance(
|
||||||
|
processor,
|
||||||
|
(
|
||||||
|
CustomStateClassificationProcessor,
|
||||||
|
CustomObjectClassificationProcessor,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
and processor.model_config.name == model_name
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Successfully removed classification processor for model: {model_name}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
self.config.classification.custom[model_name] = model_config
|
self.config.classification.custom[model_name] = model_config
|
||||||
|
|
||||||
# Check if processor already exists
|
# Check if processor already exists
|
||||||
@ -374,7 +397,14 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
source_type, _, camera, frame_name, data = update
|
source_type, _, camera, frame_name, data = update
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
|
||||||
|
)
|
||||||
|
|
||||||
if not camera or source_type != EventTypeEnum.tracked_object:
|
if not camera or source_type != EventTypeEnum.tracked_object:
|
||||||
|
logger.debug(
|
||||||
|
f"Skipping update - camera: {camera}, source_type: {source_type}"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.config.semantic_search.enabled:
|
if self.config.semantic_search.enabled:
|
||||||
@ -384,6 +414,9 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
# no need to process updated objects if no processors are active
|
# no need to process updated objects if no processors are active
|
||||||
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
||||||
|
logger.debug(
|
||||||
|
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Create our own thumbnail based on the bounding box and the frame time
|
# Create our own thumbnail based on the bounding box and the frame time
|
||||||
@ -392,6 +425,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
frame_name, camera_config.frame_shape_yuv
|
frame_name, camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
|
logger.debug(f"Frame {frame_name} not found for camera {camera}")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
@ -400,7 +434,11 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
|
||||||
|
)
|
||||||
for processor in self.realtime_processors:
|
for processor in self.realtime_processors:
|
||||||
|
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
|
||||||
processor.process_frame(data, yuv_frame)
|
processor.process_frame(data, yuv_frame)
|
||||||
|
|
||||||
for processor in self.post_processors:
|
for processor in self.post_processors:
|
||||||
|
|||||||
@ -12,7 +12,7 @@ from frigate.config import FrigateConfig
|
|||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.models import Event, Timeline
|
from frigate.models import Event, Timeline
|
||||||
from frigate.util.path import delete_event_snapshot, delete_event_thumbnail
|
from frigate.util.file import delete_event_snapshot, delete_event_thumbnail
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ from multiprocessing import Queue, Value
|
|||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import zmq
|
||||||
|
|
||||||
from frigate.comms.object_detector_signaler import (
|
from frigate.comms.object_detector_signaler import (
|
||||||
ObjectDetectorPublisher,
|
ObjectDetectorPublisher,
|
||||||
@ -377,6 +378,15 @@ class RemoteObjectDetector:
|
|||||||
if self.stop_event.is_set():
|
if self.stop_event.is_set():
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
|
# Drain any stale detection results from the ZMQ buffer before making a new request
|
||||||
|
# This prevents reading detection results from a previous request
|
||||||
|
# NOTE: This should never happen, but can in some rare cases
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
|
||||||
|
except zmq.Again:
|
||||||
|
break
|
||||||
|
|
||||||
# copy input to shared memory
|
# copy input to shared memory
|
||||||
self.np_shm[:] = tensor_input[:]
|
self.np_shm[:] = tensor_input[:]
|
||||||
self.detection_queue.put(self.name)
|
self.detection_queue.put(self.name)
|
||||||
|
|||||||
@ -14,7 +14,8 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
|||||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||||
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
from frigate.util.builtin import clear_and_unlink
|
||||||
|
from frigate.util.time import get_tomorrow_at_time
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
|
|||||||
parse_preset_hardware_acceleration_encode,
|
parse_preset_hardware_acceleration_encode,
|
||||||
)
|
)
|
||||||
from frigate.models import Export, Previews, Recordings
|
from frigate.models import Export, Previews, Recordings
|
||||||
from frigate.util.builtin import is_current_hour
|
from frigate.util.time import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -15,12 +15,9 @@ from collections.abc import Mapping
|
|||||||
from multiprocessing.sharedctypes import Synchronized
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Optional, Tuple, Union
|
from typing import Any, Dict, Optional, Tuple, Union
|
||||||
from zoneinfo import ZoneInfoNotFoundError
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytz
|
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
from tzlocal import get_localzone
|
|
||||||
|
|
||||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||||
|
|
||||||
@ -157,17 +154,6 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
|
|||||||
return labels
|
return labels
|
||||||
|
|
||||||
|
|
||||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
|
||||||
seconds_offset = (
|
|
||||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
|
||||||
)
|
|
||||||
hours_offset = int(seconds_offset / 60 / 60)
|
|
||||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
|
||||||
hour_modifier = f"{hours_offset} hour"
|
|
||||||
minute_modifier = f"{minutes_offset} minute"
|
|
||||||
return hour_modifier, minute_modifier, seconds_offset
|
|
||||||
|
|
||||||
|
|
||||||
def to_relative_box(
|
def to_relative_box(
|
||||||
width: int, height: int, box: Tuple[int, int, int, int]
|
width: int, height: int, box: Tuple[int, int, int, int]
|
||||||
) -> Tuple[int | float, int | float, int | float, int | float]:
|
) -> Tuple[int | float, int | float, int | float, int | float]:
|
||||||
@ -298,34 +284,6 @@ def find_by_key(dictionary, target_key):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
|
||||||
"""Returns the datetime of the following day at 2am."""
|
|
||||||
try:
|
|
||||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
|
||||||
except ZoneInfoNotFoundError:
|
|
||||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
|
||||||
days=1
|
|
||||||
)
|
|
||||||
logger.warning(
|
|
||||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
|
||||||
)
|
|
||||||
|
|
||||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
|
||||||
datetime.timezone.utc
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def is_current_hour(timestamp: int) -> bool:
|
|
||||||
"""Returns if timestamp is in the current UTC hour."""
|
|
||||||
start_of_next_hour = (
|
|
||||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
|
||||||
minute=0, second=0, microsecond=0
|
|
||||||
)
|
|
||||||
+ datetime.timedelta(hours=1)
|
|
||||||
).timestamp()
|
|
||||||
return timestamp < start_of_next_hour
|
|
||||||
|
|
||||||
|
|
||||||
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
||||||
"""clear file then unlink to avoid space retained by file descriptors."""
|
"""clear file then unlink to avoid space retained by file descriptors."""
|
||||||
if not missing_ok and not file.exists():
|
if not missing_ok and not file.exists():
|
||||||
|
|||||||
@ -20,8 +20,8 @@ from frigate.const import (
|
|||||||
from frigate.log import redirect_output_to_logger
|
from frigate.log import redirect_output_to_logger
|
||||||
from frigate.models import Event, Recordings, ReviewSegment
|
from frigate.models import Event, Recordings, ReviewSegment
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
|
from frigate.util.file import get_event_thumbnail_bytes
|
||||||
from frigate.util.image import get_image_from_recording
|
from frigate.util.image import get_image_from_recording
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
|
||||||
from frigate.util.process import FrigateProcess
|
from frigate.util.process import FrigateProcess
|
||||||
|
|
||||||
BATCH_SIZE = 16
|
BATCH_SIZE = 16
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
import time
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Callable, List
|
from typing import Callable, List
|
||||||
|
|
||||||
@ -10,40 +9,11 @@ import requests
|
|||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.const import UPDATE_MODEL_STATE
|
from frigate.const import UPDATE_MODEL_STATE
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
|
from frigate.util.file import FileLock
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FileLock:
|
|
||||||
def __init__(self, path):
|
|
||||||
self.path = path
|
|
||||||
self.lock_file = f"{path}.lock"
|
|
||||||
|
|
||||||
# we have not acquired the lock yet so it should not exist
|
|
||||||
if os.path.exists(self.lock_file):
|
|
||||||
try:
|
|
||||||
os.remove(self.lock_file)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def acquire(self):
|
|
||||||
parent_dir = os.path.dirname(self.lock_file)
|
|
||||||
os.makedirs(parent_dir, exist_ok=True)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
with open(self.lock_file, "x"):
|
|
||||||
return
|
|
||||||
except FileExistsError:
|
|
||||||
time.sleep(0.1)
|
|
||||||
|
|
||||||
def release(self):
|
|
||||||
try:
|
|
||||||
os.remove(self.lock_file)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ModelDownloader:
|
class ModelDownloader:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -81,15 +51,13 @@ class ModelDownloader:
|
|||||||
def _download_models(self):
|
def _download_models(self):
|
||||||
for file_name in self.file_names:
|
for file_name in self.file_names:
|
||||||
path = os.path.join(self.download_path, file_name)
|
path = os.path.join(self.download_path, file_name)
|
||||||
lock = FileLock(path)
|
lock_path = f"{path}.lock"
|
||||||
|
lock = FileLock(lock_path, cleanup_stale_on_init=True)
|
||||||
|
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
lock.acquire()
|
with lock:
|
||||||
try:
|
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
self.download_func(path)
|
self.download_func(path)
|
||||||
finally:
|
|
||||||
lock.release()
|
|
||||||
|
|
||||||
self.requestor.send_data(
|
self.requestor.send_data(
|
||||||
UPDATE_MODEL_STATE,
|
UPDATE_MODEL_STATE,
|
||||||
|
|||||||
276
frigate/util/file.py
Normal file
276
frigate/util/file.py
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
"""Path and file utilities."""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import fcntl
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
from numpy import ndarray
|
||||||
|
|
||||||
|
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
||||||
|
if event.thumbnail:
|
||||||
|
return base64.b64decode(event.thumbnail)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
||||||
|
) as f:
|
||||||
|
return f.read()
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_event_snapshot(event: Event) -> ndarray:
|
||||||
|
media_name = f"{event.camera}-{event.id}"
|
||||||
|
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||||
|
|
||||||
|
|
||||||
|
### Deletion
|
||||||
|
|
||||||
|
|
||||||
|
def delete_event_images(event: Event) -> bool:
|
||||||
|
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_event_snapshot(event: Event) -> bool:
|
||||||
|
media_name = f"{event.camera}-{event.id}"
|
||||||
|
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||||
|
|
||||||
|
try:
|
||||||
|
media_path.unlink(missing_ok=True)
|
||||||
|
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
||||||
|
media_path.unlink(missing_ok=True)
|
||||||
|
# also delete clean.png (legacy) for backward compatibility
|
||||||
|
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||||
|
media_path.unlink(missing_ok=True)
|
||||||
|
return True
|
||||||
|
except OSError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def delete_event_thumbnail(event: Event) -> bool:
|
||||||
|
if event.thumbnail:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
||||||
|
missing_ok=True
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
### File Locking
|
||||||
|
|
||||||
|
|
||||||
|
class FileLock:
|
||||||
|
"""
|
||||||
|
A file-based lock for coordinating access to resources across processes.
|
||||||
|
|
||||||
|
Uses fcntl.flock() for proper POSIX file locking on Linux. Supports timeouts,
|
||||||
|
stale lock detection, and can be used as a context manager.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
# Using as a context manager (recommended)
|
||||||
|
with FileLock("/path/to/resource.lock", timeout=60):
|
||||||
|
# Critical section
|
||||||
|
do_something()
|
||||||
|
|
||||||
|
# Manual acquisition and release
|
||||||
|
lock = FileLock("/path/to/resource.lock")
|
||||||
|
if lock.acquire(timeout=60):
|
||||||
|
try:
|
||||||
|
do_something()
|
||||||
|
finally:
|
||||||
|
lock.release()
|
||||||
|
```
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
lock_path: Path to the lock file
|
||||||
|
timeout: Maximum time to wait for lock acquisition (seconds)
|
||||||
|
poll_interval: Time to wait between lock acquisition attempts (seconds)
|
||||||
|
stale_timeout: Time after which a lock is considered stale (seconds)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
lock_path: str | Path,
|
||||||
|
timeout: int = 300,
|
||||||
|
poll_interval: float = 1.0,
|
||||||
|
stale_timeout: int = 600,
|
||||||
|
cleanup_stale_on_init: bool = False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize a FileLock.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lock_path: Path to the lock file
|
||||||
|
timeout: Maximum time to wait for lock acquisition in seconds (default: 300)
|
||||||
|
poll_interval: Time to wait between lock attempts in seconds (default: 1.0)
|
||||||
|
stale_timeout: Time after which a lock is considered stale in seconds (default: 600)
|
||||||
|
cleanup_stale_on_init: Whether to clean up stale locks on initialization (default: False)
|
||||||
|
"""
|
||||||
|
self.lock_path = Path(lock_path)
|
||||||
|
self.timeout = timeout
|
||||||
|
self.poll_interval = poll_interval
|
||||||
|
self.stale_timeout = stale_timeout
|
||||||
|
self._fd: Optional[int] = None
|
||||||
|
self._acquired = False
|
||||||
|
|
||||||
|
if cleanup_stale_on_init:
|
||||||
|
self._cleanup_stale_lock()
|
||||||
|
|
||||||
|
def _cleanup_stale_lock(self) -> bool:
|
||||||
|
"""
|
||||||
|
Clean up a stale lock file if it exists and is old.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if lock was cleaned up, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if self.lock_path.exists():
|
||||||
|
# Check if lock file is older than stale_timeout
|
||||||
|
lock_age = time.time() - self.lock_path.stat().st_mtime
|
||||||
|
if lock_age > self.stale_timeout:
|
||||||
|
logger.warning(
|
||||||
|
f"Removing stale lock file: {self.lock_path} (age: {lock_age:.1f}s)"
|
||||||
|
)
|
||||||
|
self.lock_path.unlink()
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error cleaning up stale lock: {e}")
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_stale(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the lock file is stale (older than stale_timeout).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if lock is stale, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if self.lock_path.exists():
|
||||||
|
lock_age = time.time() - self.lock_path.stat().st_mtime
|
||||||
|
return lock_age > self.stale_timeout
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def acquire(self, timeout: Optional[int] = None) -> bool:
|
||||||
|
"""
|
||||||
|
Acquire the file lock using fcntl.flock().
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timeout: Maximum time to wait for lock in seconds (uses instance timeout if None)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if lock acquired, False if timeout or error
|
||||||
|
"""
|
||||||
|
if self._acquired:
|
||||||
|
logger.warning(f"Lock already acquired: {self.lock_path}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
if timeout is None:
|
||||||
|
timeout = self.timeout
|
||||||
|
|
||||||
|
# Ensure parent directory exists
|
||||||
|
self.lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Clean up stale lock before attempting to acquire
|
||||||
|
self._cleanup_stale_lock()
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._fd = os.open(self.lock_path, os.O_CREAT | os.O_RDWR)
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
while time.time() - start_time < timeout:
|
||||||
|
try:
|
||||||
|
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
self._acquired = True
|
||||||
|
logger.debug(f"Acquired lock: {self.lock_path}")
|
||||||
|
return True
|
||||||
|
except (OSError, IOError):
|
||||||
|
# Lock is held by another process
|
||||||
|
if time.time() - start_time >= timeout:
|
||||||
|
logger.warning(f"Timeout waiting for lock: {self.lock_path}")
|
||||||
|
os.close(self._fd)
|
||||||
|
self._fd = None
|
||||||
|
return False
|
||||||
|
|
||||||
|
time.sleep(self.poll_interval)
|
||||||
|
|
||||||
|
# Timeout reached
|
||||||
|
if self._fd is not None:
|
||||||
|
os.close(self._fd)
|
||||||
|
self._fd = None
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error acquiring lock: {e}")
|
||||||
|
if self._fd is not None:
|
||||||
|
try:
|
||||||
|
os.close(self._fd)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
self._fd = None
|
||||||
|
return False
|
||||||
|
|
||||||
|
def release(self) -> None:
|
||||||
|
"""
|
||||||
|
Release the file lock.
|
||||||
|
|
||||||
|
This closes the file descriptor and removes the lock file.
|
||||||
|
"""
|
||||||
|
if not self._acquired:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Close file descriptor and release fcntl lock
|
||||||
|
if self._fd is not None:
|
||||||
|
try:
|
||||||
|
fcntl.flock(self._fd, fcntl.LOCK_UN)
|
||||||
|
os.close(self._fd)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error closing lock file descriptor: {e}")
|
||||||
|
finally:
|
||||||
|
self._fd = None
|
||||||
|
|
||||||
|
# Remove lock file
|
||||||
|
if self.lock_path.exists():
|
||||||
|
self.lock_path.unlink()
|
||||||
|
logger.debug(f"Released lock: {self.lock_path}")
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Lock file already removed, that's fine
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error releasing lock: {e}")
|
||||||
|
finally:
|
||||||
|
self._acquired = False
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
"""Context manager entry - acquire the lock."""
|
||||||
|
if not self.acquire():
|
||||||
|
raise TimeoutError(f"Failed to acquire lock: {self.lock_path}")
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
"""Context manager exit - release the lock."""
|
||||||
|
self.release()
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
"""Destructor - ensure lock is released."""
|
||||||
|
if self._acquired:
|
||||||
|
self.release()
|
||||||
@ -1,62 +0,0 @@
|
|||||||
"""Path utilities."""
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import cv2
|
|
||||||
from numpy import ndarray
|
|
||||||
|
|
||||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
|
||||||
from frigate.models import Event
|
|
||||||
|
|
||||||
|
|
||||||
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
|
||||||
if event.thumbnail:
|
|
||||||
return base64.b64decode(event.thumbnail)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
with open(
|
|
||||||
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
|
||||||
) as f:
|
|
||||||
return f.read()
|
|
||||||
except Exception:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_event_snapshot(event: Event) -> ndarray:
|
|
||||||
media_name = f"{event.camera}-{event.id}"
|
|
||||||
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
|
||||||
|
|
||||||
|
|
||||||
### Deletion
|
|
||||||
|
|
||||||
|
|
||||||
def delete_event_images(event: Event) -> bool:
|
|
||||||
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_event_snapshot(event: Event) -> bool:
|
|
||||||
media_name = f"{event.camera}-{event.id}"
|
|
||||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
|
||||||
|
|
||||||
try:
|
|
||||||
media_path.unlink(missing_ok=True)
|
|
||||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
|
||||||
media_path.unlink(missing_ok=True)
|
|
||||||
# also delete clean.png (legacy) for backward compatibility
|
|
||||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
|
||||||
media_path.unlink(missing_ok=True)
|
|
||||||
return True
|
|
||||||
except OSError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def delete_event_thumbnail(event: Event) -> bool:
|
|
||||||
if event.thumbnail:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
|
||||||
missing_ok=True
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
@ -1,6 +1,5 @@
|
|||||||
"""RKNN model conversion utility for Frigate."""
|
"""RKNN model conversion utility for Frigate."""
|
||||||
|
|
||||||
import fcntl
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -9,6 +8,8 @@ import time
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from frigate.util.file import FileLock
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
MODEL_TYPE_CONFIGS = {
|
MODEL_TYPE_CONFIGS = {
|
||||||
@ -245,112 +246,6 @@ def convert_onnx_to_rknn(
|
|||||||
logger.warning(f"Failed to remove temporary ONNX file: {e}")
|
logger.warning(f"Failed to remove temporary ONNX file: {e}")
|
||||||
|
|
||||||
|
|
||||||
def cleanup_stale_lock(lock_file_path: Path) -> bool:
|
|
||||||
"""
|
|
||||||
Clean up a stale lock file if it exists and is old.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
lock_file_path: Path to the lock file
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if lock was cleaned up, False otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if lock_file_path.exists():
|
|
||||||
# Check if lock file is older than 10 minutes (stale)
|
|
||||||
lock_age = time.time() - lock_file_path.stat().st_mtime
|
|
||||||
if lock_age > 600: # 10 minutes
|
|
||||||
logger.warning(
|
|
||||||
f"Removing stale lock file: {lock_file_path} (age: {lock_age:.1f}s)"
|
|
||||||
)
|
|
||||||
lock_file_path.unlink()
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error cleaning up stale lock: {e}")
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def acquire_conversion_lock(lock_file_path: Path, timeout: int = 300) -> bool:
|
|
||||||
"""
|
|
||||||
Acquire a file-based lock for model conversion.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
lock_file_path: Path to the lock file
|
|
||||||
timeout: Maximum time to wait for lock in seconds
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if lock acquired, False if timeout or error
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
cleanup_stale_lock(lock_file_path)
|
|
||||||
lock_fd = os.open(lock_file_path, os.O_CREAT | os.O_RDWR)
|
|
||||||
|
|
||||||
# Try to acquire exclusive lock
|
|
||||||
start_time = time.time()
|
|
||||||
while time.time() - start_time < timeout:
|
|
||||||
try:
|
|
||||||
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
||||||
# Lock acquired successfully
|
|
||||||
logger.debug(f"Acquired conversion lock: {lock_file_path}")
|
|
||||||
return True
|
|
||||||
except (OSError, IOError):
|
|
||||||
# Lock is held by another process, wait and retry
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
logger.warning(
|
|
||||||
f"Timeout waiting for conversion lock: {lock_file_path}"
|
|
||||||
)
|
|
||||||
os.close(lock_fd)
|
|
||||||
return False
|
|
||||||
|
|
||||||
logger.debug("Waiting for conversion lock to be released...")
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
os.close(lock_fd)
|
|
||||||
return False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error acquiring conversion lock: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def release_conversion_lock(lock_file_path: Path) -> None:
|
|
||||||
"""
|
|
||||||
Release the conversion lock.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
lock_file_path: Path to the lock file
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if lock_file_path.exists():
|
|
||||||
lock_file_path.unlink()
|
|
||||||
logger.debug(f"Released conversion lock: {lock_file_path}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error releasing conversion lock: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
def is_lock_stale(lock_file_path: Path, max_age: int = 600) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a lock file is stale (older than max_age seconds).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
lock_file_path: Path to the lock file
|
|
||||||
max_age: Maximum age in seconds before considering lock stale
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if lock is stale, False otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if lock_file_path.exists():
|
|
||||||
lock_age = time.time() - lock_file_path.stat().st_mtime
|
|
||||||
return lock_age > max_age
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def wait_for_conversion_completion(
|
def wait_for_conversion_completion(
|
||||||
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
|
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
|
||||||
) -> bool:
|
) -> bool:
|
||||||
@ -358,6 +253,7 @@ def wait_for_conversion_completion(
|
|||||||
Wait for another process to complete the conversion.
|
Wait for another process to complete the conversion.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
model_type: Type of model being converted
|
||||||
rknn_path: Path to the expected RKNN model
|
rknn_path: Path to the expected RKNN model
|
||||||
lock_file_path: Path to the lock file to monitor
|
lock_file_path: Path to the lock file to monitor
|
||||||
timeout: Maximum time to wait in seconds
|
timeout: Maximum time to wait in seconds
|
||||||
@ -366,6 +262,8 @@ def wait_for_conversion_completion(
|
|||||||
True if RKNN model appears, False if timeout
|
True if RKNN model appears, False if timeout
|
||||||
"""
|
"""
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
lock = FileLock(lock_file_path, stale_timeout=600)
|
||||||
|
|
||||||
while time.time() - start_time < timeout:
|
while time.time() - start_time < timeout:
|
||||||
# Check if RKNN model appeared
|
# Check if RKNN model appeared
|
||||||
if rknn_path.exists():
|
if rknn_path.exists():
|
||||||
@ -385,11 +283,14 @@ def wait_for_conversion_completion(
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Check if lock is stale
|
# Check if lock is stale
|
||||||
if is_lock_stale(lock_file_path):
|
if lock.is_stale():
|
||||||
logger.warning("Lock file is stale, attempting to clean up and retry...")
|
logger.warning("Lock file is stale, attempting to clean up and retry...")
|
||||||
cleanup_stale_lock(lock_file_path)
|
lock._cleanup_stale_lock()
|
||||||
# Try to acquire lock again
|
# Try to acquire lock again
|
||||||
if acquire_conversion_lock(lock_file_path, timeout=60):
|
retry_lock = FileLock(
|
||||||
|
lock_file_path, timeout=60, cleanup_stale_on_init=True
|
||||||
|
)
|
||||||
|
if retry_lock.acquire():
|
||||||
try:
|
try:
|
||||||
# Check if RKNN file appeared while waiting
|
# Check if RKNN file appeared while waiting
|
||||||
if rknn_path.exists():
|
if rknn_path.exists():
|
||||||
@ -415,7 +316,7 @@ def wait_for_conversion_completion(
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
release_conversion_lock(lock_file_path)
|
retry_lock.release()
|
||||||
|
|
||||||
logger.debug("Waiting for RKNN model to appear...")
|
logger.debug("Waiting for RKNN model to appear...")
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
@ -452,8 +353,9 @@ def auto_convert_model(
|
|||||||
return str(rknn_path)
|
return str(rknn_path)
|
||||||
|
|
||||||
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
|
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
|
||||||
|
lock = FileLock(lock_file_path, timeout=300, cleanup_stale_on_init=True)
|
||||||
|
|
||||||
if acquire_conversion_lock(lock_file_path):
|
if lock.acquire():
|
||||||
try:
|
try:
|
||||||
if rknn_path.exists():
|
if rknn_path.exists():
|
||||||
logger.info(
|
logger.info(
|
||||||
@ -476,7 +378,7 @@ def auto_convert_model(
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
release_conversion_lock(lock_file_path)
|
lock.release()
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Another process is converting {model_path}, waiting for completion..."
|
f"Another process is converting {model_path}, waiting for completion..."
|
||||||
|
|||||||
100
frigate/util/time.py
Normal file
100
frigate/util/time.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
"""Time utilities."""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
from typing import Tuple
|
||||||
|
from zoneinfo import ZoneInfoNotFoundError
|
||||||
|
|
||||||
|
import pytz
|
||||||
|
from tzlocal import get_localzone
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||||
|
seconds_offset = (
|
||||||
|
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||||
|
)
|
||||||
|
hours_offset = int(seconds_offset / 60 / 60)
|
||||||
|
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||||
|
hour_modifier = f"{hours_offset} hour"
|
||||||
|
minute_modifier = f"{minutes_offset} minute"
|
||||||
|
return hour_modifier, minute_modifier, seconds_offset
|
||||||
|
|
||||||
|
|
||||||
|
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||||
|
"""Returns the datetime of the following day at 2am."""
|
||||||
|
try:
|
||||||
|
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||||
|
except ZoneInfoNotFoundError:
|
||||||
|
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||||
|
days=1
|
||||||
|
)
|
||||||
|
logger.warning(
|
||||||
|
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||||
|
)
|
||||||
|
|
||||||
|
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||||
|
datetime.timezone.utc
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_current_hour(timestamp: int) -> bool:
|
||||||
|
"""Returns if timestamp is in the current UTC hour."""
|
||||||
|
start_of_next_hour = (
|
||||||
|
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||||
|
minute=0, second=0, microsecond=0
|
||||||
|
)
|
||||||
|
+ datetime.timedelta(hours=1)
|
||||||
|
).timestamp()
|
||||||
|
return timestamp < start_of_next_hour
|
||||||
|
|
||||||
|
|
||||||
|
def get_dst_transitions(
|
||||||
|
tz_name: str, start_time: float, end_time: float
|
||||||
|
) -> list[tuple[float, float]]:
|
||||||
|
"""
|
||||||
|
Find DST transition points and return time periods with consistent offsets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tz_name: Timezone name (e.g., 'America/New_York')
|
||||||
|
start_time: Start timestamp (UTC)
|
||||||
|
end_time: End timestamp (UTC)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of (period_start, period_end, seconds_offset) tuples representing
|
||||||
|
continuous periods with the same UTC offset
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
tz = pytz.timezone(tz_name)
|
||||||
|
except pytz.UnknownTimeZoneError:
|
||||||
|
# If timezone is invalid, return single period with no offset
|
||||||
|
return [(start_time, end_time, 0)]
|
||||||
|
|
||||||
|
periods = []
|
||||||
|
current = start_time
|
||||||
|
|
||||||
|
# Get initial offset
|
||||||
|
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
||||||
|
local_dt = dt.astimezone(tz)
|
||||||
|
prev_offset = local_dt.utcoffset().total_seconds()
|
||||||
|
period_start = start_time
|
||||||
|
|
||||||
|
# Check each day for offset changes
|
||||||
|
while current <= end_time:
|
||||||
|
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
||||||
|
local_dt = dt.astimezone(tz)
|
||||||
|
current_offset = local_dt.utcoffset().total_seconds()
|
||||||
|
|
||||||
|
if current_offset != prev_offset:
|
||||||
|
# Found a transition - close previous period
|
||||||
|
periods.append((period_start, current, prev_offset))
|
||||||
|
period_start = current
|
||||||
|
prev_offset = current_offset
|
||||||
|
|
||||||
|
current += 86400 # Check daily
|
||||||
|
|
||||||
|
# Add final period
|
||||||
|
periods.append((period_start, end_time, prev_offset))
|
||||||
|
|
||||||
|
return periods
|
||||||
@ -34,7 +34,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
|||||||
from frigate.track import ObjectTracker
|
from frigate.track import ObjectTracker
|
||||||
from frigate.track.norfair_tracker import NorfairTracker
|
from frigate.track.norfair_tracker import NorfairTracker
|
||||||
from frigate.track.tracked_object import TrackedObjectAttribute
|
from frigate.track.tracked_object import TrackedObjectAttribute
|
||||||
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
|
from frigate.util.builtin import EventsPerSecond
|
||||||
from frigate.util.image import (
|
from frigate.util.image import (
|
||||||
FrameManager,
|
FrameManager,
|
||||||
SharedMemoryFrameManager,
|
SharedMemoryFrameManager,
|
||||||
@ -53,6 +53,7 @@ from frigate.util.object import (
|
|||||||
reduce_detections,
|
reduce_detections,
|
||||||
)
|
)
|
||||||
from frigate.util.process import FrigateProcess
|
from frigate.util.process import FrigateProcess
|
||||||
|
from frigate.util.time import get_tomorrow_at_time
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,8 @@
|
|||||||
{
|
{
|
||||||
"documentTitle": "Classification Models",
|
"documentTitle": "Classification Models",
|
||||||
|
"details": {
|
||||||
|
"scoreInfo": "Score represents the average classification confidence across all detections of this object."
|
||||||
|
},
|
||||||
"button": {
|
"button": {
|
||||||
"deleteClassificationAttempts": "Delete Classification Images",
|
"deleteClassificationAttempts": "Delete Classification Images",
|
||||||
"renameCategory": "Rename Class",
|
"renameCategory": "Rename Class",
|
||||||
@ -7,23 +10,27 @@
|
|||||||
"deleteImages": "Delete Images",
|
"deleteImages": "Delete Images",
|
||||||
"trainModel": "Train Model",
|
"trainModel": "Train Model",
|
||||||
"addClassification": "Add Classification",
|
"addClassification": "Add Classification",
|
||||||
"deleteModels": "Delete Models"
|
"deleteModels": "Delete Models",
|
||||||
|
"editModel": "Edit Model"
|
||||||
},
|
},
|
||||||
"toast": {
|
"toast": {
|
||||||
"success": {
|
"success": {
|
||||||
"deletedCategory": "Deleted Class",
|
"deletedCategory": "Deleted Class",
|
||||||
"deletedImage": "Deleted Images",
|
"deletedImage": "Deleted Images",
|
||||||
"deletedModel": "Successfully deleted {{count}} model(s)",
|
"deletedModel_one": "Successfully deleted {{count}} model",
|
||||||
|
"deletedModel_other": "Successfully deleted {{count}} models",
|
||||||
"categorizedImage": "Successfully Classified Image",
|
"categorizedImage": "Successfully Classified Image",
|
||||||
"trainedModel": "Successfully trained model.",
|
"trainedModel": "Successfully trained model.",
|
||||||
"trainingModel": "Successfully started model training."
|
"trainingModel": "Successfully started model training.",
|
||||||
|
"updatedModel": "Successfully updated model configuration"
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
|
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
|
||||||
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
|
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
|
||||||
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
|
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
|
||||||
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
|
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
|
||||||
"trainingFailed": "Failed to start model training: {{errorMessage}}"
|
"trainingFailed": "Failed to start model training: {{errorMessage}}",
|
||||||
|
"updateModelFailed": "Failed to update model: {{errorMessage}}"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"deleteCategory": {
|
"deleteCategory": {
|
||||||
@ -35,6 +42,12 @@
|
|||||||
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
|
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
|
||||||
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
|
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
|
||||||
},
|
},
|
||||||
|
"edit": {
|
||||||
|
"title": "Edit Classification Model",
|
||||||
|
"descriptionState": "Edit the classes for this state classification model. Changes will require retraining the model.",
|
||||||
|
"descriptionObject": "Edit the object type and classification type for this object classification model.",
|
||||||
|
"stateClassesInfo": "Note: Changing state classes requires retraining the model with the updated classes."
|
||||||
|
},
|
||||||
"deleteDatasetImages": {
|
"deleteDatasetImages": {
|
||||||
"title": "Delete Dataset Images",
|
"title": "Delete Dataset Images",
|
||||||
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
|
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
|
||||||
|
|||||||
@ -6,7 +6,8 @@
|
|||||||
},
|
},
|
||||||
"details": {
|
"details": {
|
||||||
"timestamp": "Timestamp",
|
"timestamp": "Timestamp",
|
||||||
"unknown": "Unknown"
|
"unknown": "Unknown",
|
||||||
|
"scoreInfo": "Score is a weighted average of all face scores, weighted by the size of the face in each image."
|
||||||
},
|
},
|
||||||
"documentTitle": "Face Library - Frigate",
|
"documentTitle": "Face Library - Frigate",
|
||||||
"uploadFaceImage": {
|
"uploadFaceImage": {
|
||||||
|
|||||||
@ -271,6 +271,8 @@
|
|||||||
"disconnectStream": "Disconnect",
|
"disconnectStream": "Disconnect",
|
||||||
"estimatedBandwidth": "Estimated Bandwidth",
|
"estimatedBandwidth": "Estimated Bandwidth",
|
||||||
"roles": "Roles",
|
"roles": "Roles",
|
||||||
|
"ffmpegModule": "Use stream compatibility mode",
|
||||||
|
"ffmpegModuleDescription": "If the stream does not load after several attempts, try enabling this. When enabled, Frigate will use the ffmpeg module with go2rtc. This may provide better compatibility with some camera streams.",
|
||||||
"none": "None",
|
"none": "None",
|
||||||
"error": "Error",
|
"error": "Error",
|
||||||
"streamValidated": "Stream {{number}} validated successfully",
|
"streamValidated": "Stream {{number}} validated successfully",
|
||||||
|
|||||||
@ -7,11 +7,12 @@ import {
|
|||||||
} from "@/types/classification";
|
} from "@/types/classification";
|
||||||
import { Event } from "@/types/event";
|
import { Event } from "@/types/event";
|
||||||
import { forwardRef, useMemo, useRef, useState } from "react";
|
import { forwardRef, useMemo, useRef, useState } from "react";
|
||||||
import { isDesktop, isMobile } from "react-device-detect";
|
import { isDesktop, isMobile, isMobileOnly } from "react-device-detect";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import TimeAgo from "../dynamic/TimeAgo";
|
import TimeAgo from "../dynamic/TimeAgo";
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
||||||
import { LuSearch } from "react-icons/lu";
|
import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover";
|
||||||
|
import { LuSearch, LuInfo } from "react-icons/lu";
|
||||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||||
import { useNavigate } from "react-router-dom";
|
import { useNavigate } from "react-router-dom";
|
||||||
import { HiSquare2Stack } from "react-icons/hi2";
|
import { HiSquare2Stack } from "react-icons/hi2";
|
||||||
@ -181,6 +182,7 @@ type GroupedClassificationCardProps = {
|
|||||||
selectedItems: string[];
|
selectedItems: string[];
|
||||||
i18nLibrary: string;
|
i18nLibrary: string;
|
||||||
objectType: string;
|
objectType: string;
|
||||||
|
noClassificationLabel?: string;
|
||||||
onClick: (data: ClassificationItemData | undefined) => void;
|
onClick: (data: ClassificationItemData | undefined) => void;
|
||||||
children?: (data: ClassificationItemData) => React.ReactNode;
|
children?: (data: ClassificationItemData) => React.ReactNode;
|
||||||
};
|
};
|
||||||
@ -190,6 +192,7 @@ export function GroupedClassificationCard({
|
|||||||
threshold,
|
threshold,
|
||||||
selectedItems,
|
selectedItems,
|
||||||
i18nLibrary,
|
i18nLibrary,
|
||||||
|
noClassificationLabel = "details.none",
|
||||||
onClick,
|
onClick,
|
||||||
children,
|
children,
|
||||||
}: GroupedClassificationCardProps) {
|
}: GroupedClassificationCardProps) {
|
||||||
@ -222,10 +225,14 @@ export function GroupedClassificationCard({
|
|||||||
const bestTyped: ClassificationItemData = best;
|
const bestTyped: ClassificationItemData = best;
|
||||||
return {
|
return {
|
||||||
...bestTyped,
|
...bestTyped,
|
||||||
name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name,
|
name: event
|
||||||
|
? event.sub_label && event.sub_label !== "none"
|
||||||
|
? event.sub_label
|
||||||
|
: t(noClassificationLabel)
|
||||||
|
: bestTyped.name,
|
||||||
score: event?.data?.sub_label_score || bestTyped.score,
|
score: event?.data?.sub_label_score || bestTyped.score,
|
||||||
};
|
};
|
||||||
}, [group, event, t]);
|
}, [group, event, noClassificationLabel, t]);
|
||||||
|
|
||||||
const bestScoreStatus = useMemo(() => {
|
const bestScoreStatus = useMemo(() => {
|
||||||
if (!bestItem?.score || !threshold) {
|
if (!bestItem?.score || !threshold) {
|
||||||
@ -257,8 +264,8 @@ export function GroupedClassificationCard({
|
|||||||
|
|
||||||
const Overlay = isDesktop ? Dialog : MobilePage;
|
const Overlay = isDesktop ? Dialog : MobilePage;
|
||||||
const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger;
|
const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger;
|
||||||
const Header = isDesktop ? DialogHeader : MobilePageHeader;
|
|
||||||
const Content = isDesktop ? DialogContent : MobilePageContent;
|
const Content = isDesktop ? DialogContent : MobilePageContent;
|
||||||
|
const Header = isDesktop ? DialogHeader : MobilePageHeader;
|
||||||
const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle;
|
const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle;
|
||||||
const ContentDescription = isDesktop
|
const ContentDescription = isDesktop
|
||||||
? DialogDescription
|
? DialogDescription
|
||||||
@ -291,9 +298,9 @@ export function GroupedClassificationCard({
|
|||||||
<Trigger asChild></Trigger>
|
<Trigger asChild></Trigger>
|
||||||
<Content
|
<Content
|
||||||
className={cn(
|
className={cn(
|
||||||
"",
|
"scrollbar-container",
|
||||||
isDesktop && "min-w-[50%] max-w-[65%]",
|
isDesktop && "min-w-[50%] max-w-[65%]",
|
||||||
isMobile && "flex flex-col",
|
isMobile && "overflow-y-auto",
|
||||||
)}
|
)}
|
||||||
onOpenAutoFocus={(e) => e.preventDefault()}
|
onOpenAutoFocus={(e) => e.preventDefault()}
|
||||||
>
|
>
|
||||||
@ -301,18 +308,21 @@ export function GroupedClassificationCard({
|
|||||||
<Header
|
<Header
|
||||||
className={cn(
|
className={cn(
|
||||||
"mx-2 flex flex-row items-center gap-4",
|
"mx-2 flex flex-row items-center gap-4",
|
||||||
isMobile && "flex-shrink-0",
|
isMobileOnly && "top-0 mx-4",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div>
|
<div
|
||||||
<ContentTitle
|
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex items-center gap-2 font-normal capitalize",
|
"",
|
||||||
isMobile && "px-2",
|
isMobile && "flex flex-col items-center justify-center",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{event?.sub_label ? event.sub_label : t("details.unknown")}
|
<ContentTitle className="flex items-center gap-2 font-normal capitalize">
|
||||||
{event?.sub_label && (
|
{event?.sub_label && event.sub_label !== "none"
|
||||||
|
? event.sub_label
|
||||||
|
: t(noClassificationLabel)}
|
||||||
|
{event?.sub_label && event.sub_label !== "none" && (
|
||||||
|
<div className="flex items-center gap-1">
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"",
|
"",
|
||||||
@ -321,6 +331,22 @@ export function GroupedClassificationCard({
|
|||||||
bestScoreStatus == "unknown" && "text-danger",
|
bestScoreStatus == "unknown" && "text-danger",
|
||||||
)}
|
)}
|
||||||
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
|
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
|
||||||
|
<Popover>
|
||||||
|
<PopoverTrigger asChild>
|
||||||
|
<button
|
||||||
|
className="focus:outline-none"
|
||||||
|
aria-label={t("details.scoreInfo", {
|
||||||
|
ns: i18nLibrary,
|
||||||
|
})}
|
||||||
|
>
|
||||||
|
<LuInfo className="size-3" />
|
||||||
|
</button>
|
||||||
|
</PopoverTrigger>
|
||||||
|
<PopoverContent className="w-80 text-sm">
|
||||||
|
{t("details.scoreInfo", { ns: i18nLibrary })}
|
||||||
|
</PopoverContent>
|
||||||
|
</Popover>
|
||||||
|
</div>
|
||||||
)}
|
)}
|
||||||
</ContentTitle>
|
</ContentTitle>
|
||||||
<ContentDescription className={cn("", isMobile && "px-2")}>
|
<ContentDescription className={cn("", isMobile && "px-2")}>
|
||||||
@ -364,7 +390,7 @@ export function GroupedClassificationCard({
|
|||||||
className={cn(
|
className={cn(
|
||||||
"grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8",
|
"grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8",
|
||||||
isDesktop && "p-2",
|
isDesktop && "p-2",
|
||||||
isMobile && "scrollbar-container flex-1 overflow-y-auto",
|
isMobile && "px-4 pb-4",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{group.map((data: ClassificationItemData) => (
|
{group.map((data: ClassificationItemData) => (
|
||||||
|
|||||||
@ -37,6 +37,7 @@ import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
|||||||
import { Button, buttonVariants } from "../ui/button";
|
import { Button, buttonVariants } from "../ui/button";
|
||||||
import { Trans, useTranslation } from "react-i18next";
|
import { Trans, useTranslation } from "react-i18next";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
|
import { LuCircle } from "react-icons/lu";
|
||||||
|
|
||||||
type ReviewCardProps = {
|
type ReviewCardProps = {
|
||||||
event: ReviewSegment;
|
event: ReviewSegment;
|
||||||
@ -142,7 +143,7 @@ export default function ReviewCard({
|
|||||||
className={cn(
|
className={cn(
|
||||||
"size-full rounded-lg",
|
"size-full rounded-lg",
|
||||||
activeReviewItem?.id == event.id &&
|
activeReviewItem?.id == event.id &&
|
||||||
"outline outline-[3px] outline-offset-1 outline-selected",
|
"outline outline-[3px] -outline-offset-[2.8px] outline-selected duration-200",
|
||||||
imgLoaded ? "visible" : "invisible",
|
imgLoaded ? "visible" : "invisible",
|
||||||
)}
|
)}
|
||||||
src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`}
|
src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`}
|
||||||
@ -165,6 +166,14 @@ export default function ReviewCard({
|
|||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<div className="flex items-center justify-evenly gap-1">
|
<div className="flex items-center justify-evenly gap-1">
|
||||||
<>
|
<>
|
||||||
|
<LuCircle
|
||||||
|
className={cn(
|
||||||
|
"size-2",
|
||||||
|
event.severity == "alert"
|
||||||
|
? "fill-severity_alert text-severity_alert"
|
||||||
|
: "fill-severity_detection text-severity_detection",
|
||||||
|
)}
|
||||||
|
/>
|
||||||
{event.data.objects.map((object) => {
|
{event.data.objects.map((object) => {
|
||||||
return getIconForLabel(
|
return getIconForLabel(
|
||||||
object,
|
object,
|
||||||
|
|||||||
@ -0,0 +1,477 @@
|
|||||||
|
import { Button } from "@/components/ui/button";
|
||||||
|
import {
|
||||||
|
Dialog,
|
||||||
|
DialogContent,
|
||||||
|
DialogDescription,
|
||||||
|
DialogHeader,
|
||||||
|
DialogTitle,
|
||||||
|
} from "@/components/ui/dialog";
|
||||||
|
import {
|
||||||
|
Form,
|
||||||
|
FormControl,
|
||||||
|
FormField,
|
||||||
|
FormItem,
|
||||||
|
FormLabel,
|
||||||
|
FormMessage,
|
||||||
|
} from "@/components/ui/form";
|
||||||
|
import { Input } from "@/components/ui/input";
|
||||||
|
import { Label } from "@/components/ui/label";
|
||||||
|
import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group";
|
||||||
|
import {
|
||||||
|
Select,
|
||||||
|
SelectContent,
|
||||||
|
SelectItem,
|
||||||
|
SelectTrigger,
|
||||||
|
SelectValue,
|
||||||
|
} from "@/components/ui/select";
|
||||||
|
import {
|
||||||
|
CustomClassificationModelConfig,
|
||||||
|
FrigateConfig,
|
||||||
|
} from "@/types/frigateConfig";
|
||||||
|
import { getTranslatedLabel } from "@/utils/i18n";
|
||||||
|
import { zodResolver } from "@hookform/resolvers/zod";
|
||||||
|
import axios from "axios";
|
||||||
|
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||||
|
import { useForm } from "react-hook-form";
|
||||||
|
import { useTranslation } from "react-i18next";
|
||||||
|
import { LuPlus, LuX } from "react-icons/lu";
|
||||||
|
import { toast } from "sonner";
|
||||||
|
import useSWR from "swr";
|
||||||
|
import { z } from "zod";
|
||||||
|
|
||||||
|
type ClassificationModelEditDialogProps = {
|
||||||
|
open: boolean;
|
||||||
|
model: CustomClassificationModelConfig;
|
||||||
|
onClose: () => void;
|
||||||
|
onSuccess: () => void;
|
||||||
|
};
|
||||||
|
|
||||||
|
type ObjectClassificationType = "sub_label" | "attribute";
|
||||||
|
|
||||||
|
type ObjectFormData = {
|
||||||
|
objectLabel: string;
|
||||||
|
objectType: ObjectClassificationType;
|
||||||
|
};
|
||||||
|
|
||||||
|
type StateFormData = {
|
||||||
|
classes: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export default function ClassificationModelEditDialog({
|
||||||
|
open,
|
||||||
|
model,
|
||||||
|
onClose,
|
||||||
|
onSuccess,
|
||||||
|
}: ClassificationModelEditDialogProps) {
|
||||||
|
const { t } = useTranslation(["views/classificationModel"]);
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
const [isSaving, setIsSaving] = useState(false);
|
||||||
|
|
||||||
|
const isStateModel = model.state_config !== undefined;
|
||||||
|
const isObjectModel = model.object_config !== undefined;
|
||||||
|
|
||||||
|
const objectLabels = useMemo(() => {
|
||||||
|
if (!config) return [];
|
||||||
|
|
||||||
|
const labels = new Set<string>();
|
||||||
|
|
||||||
|
Object.values(config.cameras).forEach((cameraConfig) => {
|
||||||
|
if (!cameraConfig.enabled || !cameraConfig.enabled_in_config) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cameraConfig.objects.track.forEach((label) => {
|
||||||
|
if (!config.model.all_attributes.includes(label)) {
|
||||||
|
labels.add(label);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return [...labels].sort();
|
||||||
|
}, [config]);
|
||||||
|
|
||||||
|
// Define form schema based on model type
|
||||||
|
const formSchema = useMemo(() => {
|
||||||
|
if (isObjectModel) {
|
||||||
|
return z.object({
|
||||||
|
objectLabel: z
|
||||||
|
.string()
|
||||||
|
.min(1, t("wizard.step1.errors.objectLabelRequired")),
|
||||||
|
objectType: z.enum(["sub_label", "attribute"]),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// State model
|
||||||
|
return z.object({
|
||||||
|
classes: z
|
||||||
|
.array(z.string())
|
||||||
|
.min(1, t("wizard.step1.errors.classRequired"))
|
||||||
|
.refine(
|
||||||
|
(classes) => {
|
||||||
|
const nonEmpty = classes.filter((c) => c.trim().length > 0);
|
||||||
|
return nonEmpty.length >= 2;
|
||||||
|
},
|
||||||
|
{ message: t("wizard.step1.errors.stateRequiresTwoClasses") },
|
||||||
|
)
|
||||||
|
.refine(
|
||||||
|
(classes) => {
|
||||||
|
const nonEmpty = classes.filter((c) => c.trim().length > 0);
|
||||||
|
const unique = new Set(nonEmpty.map((c) => c.toLowerCase()));
|
||||||
|
return unique.size === nonEmpty.length;
|
||||||
|
},
|
||||||
|
{ message: t("wizard.step1.errors.classesUnique") },
|
||||||
|
),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, [isObjectModel, t]);
|
||||||
|
|
||||||
|
const form = useForm<ObjectFormData | StateFormData>({
|
||||||
|
resolver: zodResolver(formSchema),
|
||||||
|
defaultValues: isObjectModel
|
||||||
|
? ({
|
||||||
|
objectLabel: model.object_config?.objects?.[0] || "",
|
||||||
|
objectType:
|
||||||
|
(model.object_config
|
||||||
|
?.classification_type as ObjectClassificationType) || "sub_label",
|
||||||
|
} as ObjectFormData)
|
||||||
|
: ({
|
||||||
|
classes: [""], // Will be populated from dataset
|
||||||
|
} as StateFormData),
|
||||||
|
mode: "onChange",
|
||||||
|
});
|
||||||
|
|
||||||
|
// Fetch dataset to get current classes for state models
|
||||||
|
const { data: dataset } = useSWR<{
|
||||||
|
[id: string]: string[];
|
||||||
|
}>(isStateModel ? `classification/${model.name}/dataset` : null, {
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update form with classes from dataset when loaded
|
||||||
|
useEffect(() => {
|
||||||
|
if (isStateModel && dataset) {
|
||||||
|
const classes = Object.keys(dataset).filter((key) => key !== "none");
|
||||||
|
if (classes.length > 0) {
|
||||||
|
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||||
|
"classes",
|
||||||
|
classes,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [dataset, isStateModel, form]);
|
||||||
|
|
||||||
|
const watchedClasses = isStateModel
|
||||||
|
? (form as ReturnType<typeof useForm<StateFormData>>).watch("classes")
|
||||||
|
: undefined;
|
||||||
|
const watchedObjectType = isObjectModel
|
||||||
|
? (form as ReturnType<typeof useForm<ObjectFormData>>).watch("objectType")
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
const handleAddClass = useCallback(() => {
|
||||||
|
const currentClasses = (
|
||||||
|
form as ReturnType<typeof useForm<StateFormData>>
|
||||||
|
).getValues("classes");
|
||||||
|
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||||
|
"classes",
|
||||||
|
[...currentClasses, ""],
|
||||||
|
{
|
||||||
|
shouldValidate: true,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}, [form]);
|
||||||
|
|
||||||
|
const handleRemoveClass = useCallback(
|
||||||
|
(index: number) => {
|
||||||
|
const currentClasses = (
|
||||||
|
form as ReturnType<typeof useForm<StateFormData>>
|
||||||
|
).getValues("classes");
|
||||||
|
const newClasses = currentClasses.filter((_, i) => i !== index);
|
||||||
|
|
||||||
|
// Ensure at least one field remains (even if empty)
|
||||||
|
if (newClasses.length === 0) {
|
||||||
|
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||||
|
"classes",
|
||||||
|
[""],
|
||||||
|
{ shouldValidate: true },
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
||||||
|
"classes",
|
||||||
|
newClasses,
|
||||||
|
{ shouldValidate: true },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[form],
|
||||||
|
);
|
||||||
|
|
||||||
|
const onSubmit = useCallback(
|
||||||
|
async (data: ObjectFormData | StateFormData) => {
|
||||||
|
setIsSaving(true);
|
||||||
|
try {
|
||||||
|
if (isObjectModel) {
|
||||||
|
const objectData = data as ObjectFormData;
|
||||||
|
|
||||||
|
// Update the config
|
||||||
|
await axios.put("/config/set", {
|
||||||
|
requires_restart: 0,
|
||||||
|
update_topic: `config/classification/custom/${model.name}`,
|
||||||
|
config_data: {
|
||||||
|
classification: {
|
||||||
|
custom: {
|
||||||
|
[model.name]: {
|
||||||
|
enabled: model.enabled,
|
||||||
|
name: model.name,
|
||||||
|
threshold: model.threshold,
|
||||||
|
object_config: {
|
||||||
|
objects: [objectData.objectLabel],
|
||||||
|
classification_type: objectData.objectType,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
toast.success(t("toast.success.updatedModel"), {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// State model - update classes
|
||||||
|
// Note: For state models, updating classes requires renaming categories
|
||||||
|
// which is handled through the dataset API, not the config API
|
||||||
|
// We'll need to implement this by calling the rename endpoint for each class
|
||||||
|
// For now, we just show a message that this requires retraining
|
||||||
|
|
||||||
|
toast.info(t("edit.stateClassesInfo"), {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
onSuccess();
|
||||||
|
onClose();
|
||||||
|
} catch (err) {
|
||||||
|
const error = err as {
|
||||||
|
response?: { data?: { message?: string; detail?: string } };
|
||||||
|
};
|
||||||
|
const errorMessage =
|
||||||
|
error.response?.data?.message ||
|
||||||
|
error.response?.data?.detail ||
|
||||||
|
"Unknown error";
|
||||||
|
toast.error(t("toast.error.updateModelFailed", { errorMessage }), {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
setIsSaving(false);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[isObjectModel, model, t, onSuccess, onClose],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleCancel = useCallback(() => {
|
||||||
|
form.reset();
|
||||||
|
onClose();
|
||||||
|
}, [form, onClose]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Dialog open={open} onOpenChange={(open) => !open && handleCancel()}>
|
||||||
|
<DialogContent>
|
||||||
|
<DialogHeader>
|
||||||
|
<DialogTitle>{t("edit.title")}</DialogTitle>
|
||||||
|
<DialogDescription>
|
||||||
|
{isStateModel
|
||||||
|
? t("edit.descriptionState")
|
||||||
|
: t("edit.descriptionObject")}
|
||||||
|
</DialogDescription>
|
||||||
|
</DialogHeader>
|
||||||
|
|
||||||
|
<div className="space-y-6">
|
||||||
|
<Form {...form}>
|
||||||
|
<form onSubmit={form.handleSubmit(onSubmit)} className="space-y-4">
|
||||||
|
{isObjectModel && (
|
||||||
|
<>
|
||||||
|
<FormField
|
||||||
|
control={form.control}
|
||||||
|
name="objectLabel"
|
||||||
|
render={({ field }) => (
|
||||||
|
<FormItem>
|
||||||
|
<FormLabel className="text-primary-variant">
|
||||||
|
{t("wizard.step1.objectLabel")}
|
||||||
|
</FormLabel>
|
||||||
|
<Select
|
||||||
|
onValueChange={field.onChange}
|
||||||
|
defaultValue={field.value}
|
||||||
|
>
|
||||||
|
<FormControl>
|
||||||
|
<SelectTrigger className="h-8">
|
||||||
|
<SelectValue
|
||||||
|
placeholder={t(
|
||||||
|
"wizard.step1.objectLabelPlaceholder",
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</SelectTrigger>
|
||||||
|
</FormControl>
|
||||||
|
<SelectContent>
|
||||||
|
{objectLabels.map((label) => (
|
||||||
|
<SelectItem
|
||||||
|
key={label}
|
||||||
|
value={label}
|
||||||
|
className="cursor-pointer hover:bg-secondary-highlight"
|
||||||
|
>
|
||||||
|
{getTranslatedLabel(label)}
|
||||||
|
</SelectItem>
|
||||||
|
))}
|
||||||
|
</SelectContent>
|
||||||
|
</Select>
|
||||||
|
<FormMessage />
|
||||||
|
</FormItem>
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<FormField
|
||||||
|
control={form.control}
|
||||||
|
name="objectType"
|
||||||
|
render={({ field }) => (
|
||||||
|
<FormItem>
|
||||||
|
<FormLabel className="text-primary-variant">
|
||||||
|
{t("wizard.step1.classificationType")}
|
||||||
|
</FormLabel>
|
||||||
|
<FormControl>
|
||||||
|
<RadioGroup
|
||||||
|
onValueChange={field.onChange}
|
||||||
|
defaultValue={field.value}
|
||||||
|
className="flex flex-col gap-4 pt-2"
|
||||||
|
>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<RadioGroupItem
|
||||||
|
className={
|
||||||
|
watchedObjectType === "sub_label"
|
||||||
|
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
||||||
|
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
||||||
|
}
|
||||||
|
id="sub_label"
|
||||||
|
value="sub_label"
|
||||||
|
/>
|
||||||
|
<Label
|
||||||
|
className="cursor-pointer"
|
||||||
|
htmlFor="sub_label"
|
||||||
|
>
|
||||||
|
{t("wizard.step1.classificationSubLabel")}
|
||||||
|
</Label>
|
||||||
|
</div>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<RadioGroupItem
|
||||||
|
className={
|
||||||
|
watchedObjectType === "attribute"
|
||||||
|
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
||||||
|
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
||||||
|
}
|
||||||
|
id="attribute"
|
||||||
|
value="attribute"
|
||||||
|
/>
|
||||||
|
<Label
|
||||||
|
className="cursor-pointer"
|
||||||
|
htmlFor="attribute"
|
||||||
|
>
|
||||||
|
{t("wizard.step1.classificationAttribute")}
|
||||||
|
</Label>
|
||||||
|
</div>
|
||||||
|
</RadioGroup>
|
||||||
|
</FormControl>
|
||||||
|
<FormMessage />
|
||||||
|
</FormItem>
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{isStateModel && (
|
||||||
|
<div className="space-y-2">
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<FormLabel className="text-primary-variant">
|
||||||
|
{t("wizard.step1.states")}
|
||||||
|
</FormLabel>
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
variant="secondary"
|
||||||
|
className="size-6 rounded-md bg-secondary-foreground p-1 text-background"
|
||||||
|
onClick={handleAddClass}
|
||||||
|
>
|
||||||
|
<LuPlus />
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
<div className="space-y-2">
|
||||||
|
{watchedClasses?.map((_: string, index: number) => (
|
||||||
|
<FormField
|
||||||
|
key={index}
|
||||||
|
control={
|
||||||
|
(form as ReturnType<typeof useForm<StateFormData>>)
|
||||||
|
.control
|
||||||
|
}
|
||||||
|
name={`classes.${index}` as const}
|
||||||
|
render={({ field }) => (
|
||||||
|
<FormItem>
|
||||||
|
<FormControl>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<Input
|
||||||
|
className="text-md h-8"
|
||||||
|
placeholder={t(
|
||||||
|
"wizard.step1.classPlaceholder",
|
||||||
|
)}
|
||||||
|
{...field}
|
||||||
|
/>
|
||||||
|
{watchedClasses &&
|
||||||
|
watchedClasses.length > 1 && (
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
variant="ghost"
|
||||||
|
size="sm"
|
||||||
|
className="h-8 w-8 p-0"
|
||||||
|
onClick={() => handleRemoveClass(index)}
|
||||||
|
>
|
||||||
|
<LuX className="size-4" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</FormControl>
|
||||||
|
</FormItem>
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
{isStateModel &&
|
||||||
|
"classes" in form.formState.errors &&
|
||||||
|
form.formState.errors.classes && (
|
||||||
|
<p className="text-sm font-medium text-destructive">
|
||||||
|
{form.formState.errors.classes.message}
|
||||||
|
</p>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
onClick={handleCancel}
|
||||||
|
className="sm:flex-1"
|
||||||
|
disabled={isSaving}
|
||||||
|
>
|
||||||
|
{t("button.cancel", { ns: "common" })}
|
||||||
|
</Button>
|
||||||
|
<Button
|
||||||
|
type="submit"
|
||||||
|
variant="select"
|
||||||
|
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||||
|
disabled={!form.formState.isValid || isSaving}
|
||||||
|
>
|
||||||
|
{isSaving
|
||||||
|
? t("button.saving", { ns: "common" })
|
||||||
|
: t("button.save", { ns: "common" })}
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</Form>
|
||||||
|
</div>
|
||||||
|
</DialogContent>
|
||||||
|
</Dialog>
|
||||||
|
);
|
||||||
|
}
|
||||||
@ -317,6 +317,21 @@ export default function Step3ChooseExamples({
|
|||||||
return unclassifiedImages.length === 0;
|
return unclassifiedImages.length === 0;
|
||||||
}, [unclassifiedImages]);
|
}, [unclassifiedImages]);
|
||||||
|
|
||||||
|
const handleBack = useCallback(() => {
|
||||||
|
if (currentClassIndex > 0) {
|
||||||
|
const previousClass = allClasses[currentClassIndex - 1];
|
||||||
|
setCurrentClassIndex((prev) => prev - 1);
|
||||||
|
|
||||||
|
// Restore selections for the previous class
|
||||||
|
const previousSelections = Object.entries(imageClassifications)
|
||||||
|
.filter(([_, className]) => className === previousClass)
|
||||||
|
.map(([imageName, _]) => imageName);
|
||||||
|
setSelectedImages(new Set(previousSelections));
|
||||||
|
} else {
|
||||||
|
onBack();
|
||||||
|
}
|
||||||
|
}, [currentClassIndex, allClasses, imageClassifications, onBack]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex flex-col gap-6">
|
<div className="flex flex-col gap-6">
|
||||||
{isTraining ? (
|
{isTraining ? (
|
||||||
@ -420,7 +435,7 @@ export default function Step3ChooseExamples({
|
|||||||
|
|
||||||
{!isTraining && (
|
{!isTraining && (
|
||||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||||
<Button type="button" onClick={onBack} className="sm:flex-1">
|
<Button type="button" onClick={handleBack} className="sm:flex-1">
|
||||||
{t("button.back", { ns: "common" })}
|
{t("button.back", { ns: "common" })}
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button
|
||||||
|
|||||||
@ -348,6 +348,26 @@ export function GeneralFilterContent({
|
|||||||
onClose,
|
onClose,
|
||||||
}: GeneralFilterContentProps) {
|
}: GeneralFilterContentProps) {
|
||||||
const { t } = useTranslation(["components/filter"]);
|
const { t } = useTranslation(["components/filter"]);
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const allAudioListenLabels = useMemo<string[]>(() => {
|
||||||
|
if (!config) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const labels = new Set<string>();
|
||||||
|
Object.values(config.cameras).forEach((camera) => {
|
||||||
|
if (camera?.audio?.enabled) {
|
||||||
|
camera.audio.listen.forEach((label) => {
|
||||||
|
labels.add(label);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return [...labels].sort();
|
||||||
|
}, [config]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<div className="overflow-x-hidden">
|
<div className="overflow-x-hidden">
|
||||||
@ -373,7 +393,10 @@ export function GeneralFilterContent({
|
|||||||
{allLabels.map((item) => (
|
{allLabels.map((item) => (
|
||||||
<FilterSwitch
|
<FilterSwitch
|
||||||
key={item}
|
key={item}
|
||||||
label={getTranslatedLabel(item)}
|
label={getTranslatedLabel(
|
||||||
|
item,
|
||||||
|
allAudioListenLabels.includes(item) ? "audio" : "object",
|
||||||
|
)}
|
||||||
isChecked={currentLabels?.includes(item) ?? false}
|
isChecked={currentLabels?.includes(item) ?? false}
|
||||||
onCheckedChange={(isChecked) => {
|
onCheckedChange={(isChecked) => {
|
||||||
if (isChecked) {
|
if (isChecked) {
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import {
|
|||||||
FormMessage,
|
FormMessage,
|
||||||
} from "@/components/ui/form";
|
} from "@/components/ui/form";
|
||||||
import { Input } from "@/components/ui/input";
|
import { Input } from "@/components/ui/input";
|
||||||
import { useState, useEffect } from "react";
|
import { useState, useEffect, useRef } from "react";
|
||||||
import { useFormContext } from "react-hook-form";
|
import { useFormContext } from "react-hook-form";
|
||||||
import { generateFixedHash, isValidId } from "@/utils/stringUtil";
|
import { generateFixedHash, isValidId } from "@/utils/stringUtil";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
@ -25,6 +25,7 @@ type NameAndIdFieldsProps<T extends FieldValues = FieldValues> = {
|
|||||||
processId?: (name: string) => string;
|
processId?: (name: string) => string;
|
||||||
placeholderName?: string;
|
placeholderName?: string;
|
||||||
placeholderId?: string;
|
placeholderId?: string;
|
||||||
|
idVisible?: boolean;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
||||||
@ -39,10 +40,12 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
|||||||
processId,
|
processId,
|
||||||
placeholderName,
|
placeholderName,
|
||||||
placeholderId,
|
placeholderId,
|
||||||
|
idVisible,
|
||||||
}: NameAndIdFieldsProps<T>) {
|
}: NameAndIdFieldsProps<T>) {
|
||||||
const { t } = useTranslation(["common"]);
|
const { t } = useTranslation(["common"]);
|
||||||
const { watch, setValue, trigger } = useFormContext<T>();
|
const { watch, setValue, trigger, formState } = useFormContext<T>();
|
||||||
const [isIdVisible, setIsIdVisible] = useState(false);
|
const [isIdVisible, setIsIdVisible] = useState(idVisible ?? false);
|
||||||
|
const hasUserTypedRef = useRef(false);
|
||||||
|
|
||||||
const defaultProcessId = (name: string) => {
|
const defaultProcessId = (name: string) => {
|
||||||
const normalized = name.replace(/\s+/g, "_").toLowerCase();
|
const normalized = name.replace(/\s+/g, "_").toLowerCase();
|
||||||
@ -58,6 +61,7 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
|||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const subscription = watch((value, { name }) => {
|
const subscription = watch((value, { name }) => {
|
||||||
if (name === nameField) {
|
if (name === nameField) {
|
||||||
|
hasUserTypedRef.current = true;
|
||||||
const processedId = effectiveProcessId(value[nameField] || "");
|
const processedId = effectiveProcessId(value[nameField] || "");
|
||||||
setValue(idField, processedId as PathValue<T, Path<T>>);
|
setValue(idField, processedId as PathValue<T, Path<T>>);
|
||||||
trigger(idField);
|
trigger(idField);
|
||||||
@ -66,6 +70,14 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
|||||||
return () => subscription.unsubscribe();
|
return () => subscription.unsubscribe();
|
||||||
}, [watch, setValue, trigger, nameField, idField, effectiveProcessId]);
|
}, [watch, setValue, trigger, nameField, idField, effectiveProcessId]);
|
||||||
|
|
||||||
|
// Auto-expand if there's an error on the ID field after user has typed
|
||||||
|
useEffect(() => {
|
||||||
|
const idError = formState.errors[idField];
|
||||||
|
if (idError && hasUserTypedRef.current && !isIdVisible) {
|
||||||
|
setIsIdVisible(true);
|
||||||
|
}
|
||||||
|
}, [formState.errors, idField, isIdVisible]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<FormField
|
<FormField
|
||||||
|
|||||||
@ -258,6 +258,7 @@ export default function CreateTriggerDialog({
|
|||||||
nameLabel={t("triggers.dialog.form.name.title")}
|
nameLabel={t("triggers.dialog.form.name.title")}
|
||||||
nameDescription={t("triggers.dialog.form.name.description")}
|
nameDescription={t("triggers.dialog.form.name.description")}
|
||||||
placeholderName={t("triggers.dialog.form.name.placeholder")}
|
placeholderName={t("triggers.dialog.form.name.placeholder")}
|
||||||
|
idVisible={!!trigger}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
<FormField
|
<FormField
|
||||||
|
|||||||
@ -59,6 +59,47 @@ export default function ObjectTrackOverlay({
|
|||||||
|
|
||||||
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
||||||
|
|
||||||
|
const {
|
||||||
|
pathStroke,
|
||||||
|
pointRadius,
|
||||||
|
pointStroke,
|
||||||
|
zoneStroke,
|
||||||
|
boxStroke,
|
||||||
|
highlightRadius,
|
||||||
|
} = useMemo(() => {
|
||||||
|
const BASE_WIDTH = 1280;
|
||||||
|
const BASE_HEIGHT = 720;
|
||||||
|
const BASE_PATH_STROKE = 5;
|
||||||
|
const BASE_POINT_RADIUS = 7;
|
||||||
|
const BASE_POINT_STROKE = 3;
|
||||||
|
const BASE_ZONE_STROKE = 5;
|
||||||
|
const BASE_BOX_STROKE = 5;
|
||||||
|
const BASE_HIGHLIGHT_RADIUS = 5;
|
||||||
|
|
||||||
|
const scale = Math.sqrt(
|
||||||
|
(videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT),
|
||||||
|
);
|
||||||
|
|
||||||
|
const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale));
|
||||||
|
const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale));
|
||||||
|
const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale));
|
||||||
|
const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale));
|
||||||
|
const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale));
|
||||||
|
const highlightRadius = Math.max(
|
||||||
|
2,
|
||||||
|
Math.round(BASE_HIGHLIGHT_RADIUS * scale),
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
pathStroke,
|
||||||
|
pointRadius,
|
||||||
|
pointStroke,
|
||||||
|
zoneStroke,
|
||||||
|
boxStroke,
|
||||||
|
highlightRadius,
|
||||||
|
};
|
||||||
|
}, [videoWidth, videoHeight]);
|
||||||
|
|
||||||
// Fetch all event data in a single request (CSV ids)
|
// Fetch all event data in a single request (CSV ids)
|
||||||
const { data: eventsData } = useSWR<Event[]>(
|
const { data: eventsData } = useSWR<Event[]>(
|
||||||
selectedObjectIds.length > 0
|
selectedObjectIds.length > 0
|
||||||
@ -214,16 +255,21 @@ export default function ObjectTrackOverlay({
|
|||||||
b.timestamp - a.timestamp,
|
b.timestamp - a.timestamp,
|
||||||
)[0]?.data?.zones || [];
|
)[0]?.data?.zones || [];
|
||||||
|
|
||||||
// bounding box (with tolerance for browsers with seek precision by-design issues)
|
// bounding box - only show if there's a timeline event at/near the current time with a box
|
||||||
const boxCandidates = timelineData?.filter(
|
// Search all timeline events (not just those before current time) to find one matching the seek position
|
||||||
(event: TrackingDetailsSequence) =>
|
const nearbyTimelineEvent = timelineData
|
||||||
event.timestamp <= effectiveCurrentTime + TOLERANCE &&
|
?.filter((event: TrackingDetailsSequence) => event.data.box)
|
||||||
event.data.box,
|
.sort(
|
||||||
);
|
|
||||||
const currentBox = boxCandidates?.sort(
|
|
||||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||||
b.timestamp - a.timestamp,
|
Math.abs(a.timestamp - effectiveCurrentTime) -
|
||||||
)[0]?.data?.box;
|
Math.abs(b.timestamp - effectiveCurrentTime),
|
||||||
|
)
|
||||||
|
.find(
|
||||||
|
(event: TrackingDetailsSequence) =>
|
||||||
|
Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE,
|
||||||
|
);
|
||||||
|
|
||||||
|
const currentBox = nearbyTimelineEvent?.data?.box;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
objectId,
|
objectId,
|
||||||
@ -349,7 +395,7 @@ export default function ObjectTrackOverlay({
|
|||||||
points={zone.points}
|
points={zone.points}
|
||||||
fill={zone.fill}
|
fill={zone.fill}
|
||||||
stroke={zone.stroke}
|
stroke={zone.stroke}
|
||||||
strokeWidth="5"
|
strokeWidth={zoneStroke}
|
||||||
opacity="0.7"
|
opacity="0.7"
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
@ -369,7 +415,7 @@ export default function ObjectTrackOverlay({
|
|||||||
d={generateStraightPath(absolutePositions)}
|
d={generateStraightPath(absolutePositions)}
|
||||||
fill="none"
|
fill="none"
|
||||||
stroke={objData.color}
|
stroke={objData.color}
|
||||||
strokeWidth="5"
|
strokeWidth={pathStroke}
|
||||||
strokeLinecap="round"
|
strokeLinecap="round"
|
||||||
strokeLinejoin="round"
|
strokeLinejoin="round"
|
||||||
/>
|
/>
|
||||||
@ -381,13 +427,13 @@ export default function ObjectTrackOverlay({
|
|||||||
<circle
|
<circle
|
||||||
cx={pos.x}
|
cx={pos.x}
|
||||||
cy={pos.y}
|
cy={pos.y}
|
||||||
r="7"
|
r={pointRadius}
|
||||||
fill={getPointColor(
|
fill={getPointColor(
|
||||||
objData.color,
|
objData.color,
|
||||||
pos.lifecycle_item?.class_type,
|
pos.lifecycle_item?.class_type,
|
||||||
)}
|
)}
|
||||||
stroke="white"
|
stroke="white"
|
||||||
strokeWidth="3"
|
strokeWidth={pointStroke}
|
||||||
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
||||||
onClick={() => handlePointClick(pos.timestamp)}
|
onClick={() => handlePointClick(pos.timestamp)}
|
||||||
/>
|
/>
|
||||||
@ -416,7 +462,7 @@ export default function ObjectTrackOverlay({
|
|||||||
height={objData.currentBox[3] * videoHeight}
|
height={objData.currentBox[3] * videoHeight}
|
||||||
fill="none"
|
fill="none"
|
||||||
stroke={objData.color}
|
stroke={objData.color}
|
||||||
strokeWidth="5"
|
strokeWidth={boxStroke}
|
||||||
opacity="0.9"
|
opacity="0.9"
|
||||||
/>
|
/>
|
||||||
<circle
|
<circle
|
||||||
@ -428,10 +474,10 @@ export default function ObjectTrackOverlay({
|
|||||||
(objData.currentBox[1] + objData.currentBox[3]) *
|
(objData.currentBox[1] + objData.currentBox[3]) *
|
||||||
videoHeight
|
videoHeight
|
||||||
}
|
}
|
||||||
r="5"
|
r={highlightRadius}
|
||||||
fill="rgb(255, 255, 0)" // yellow highlight
|
fill="rgb(255, 255, 0)" // yellow highlight
|
||||||
stroke={objData.color}
|
stroke={objData.color}
|
||||||
strokeWidth="5"
|
strokeWidth={boxStroke}
|
||||||
opacity="1"
|
opacity="1"
|
||||||
/>
|
/>
|
||||||
</g>
|
</g>
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import Heading from "@/components/ui/heading";
|
|||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
||||||
import { getIconForLabel } from "@/utils/iconUtil";
|
import { getIconForLabel } from "@/utils/iconUtil";
|
||||||
import { LuCircle, LuSettings } from "react-icons/lu";
|
import { LuCircle, LuFolderX, LuSettings } from "react-icons/lu";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import {
|
import {
|
||||||
Tooltip,
|
Tooltip,
|
||||||
@ -38,9 +38,12 @@ import { HiDotsHorizontal } from "react-icons/hi";
|
|||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import { useDetailStream } from "@/context/detail-stream-context";
|
import { useDetailStream } from "@/context/detail-stream-context";
|
||||||
import { isDesktop, isIOS } from "react-device-detect";
|
import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect";
|
||||||
import Chip from "@/components/indicators/Chip";
|
import Chip from "@/components/indicators/Chip";
|
||||||
import { FaDownload, FaHistory } from "react-icons/fa";
|
import { FaDownload, FaHistory } from "react-icons/fa";
|
||||||
|
import { useApiHost } from "@/api";
|
||||||
|
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
|
||||||
|
import ObjectTrackOverlay from "../ObjectTrackOverlay";
|
||||||
|
|
||||||
type TrackingDetailsProps = {
|
type TrackingDetailsProps = {
|
||||||
className?: string;
|
className?: string;
|
||||||
@ -57,9 +60,19 @@ export function TrackingDetails({
|
|||||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||||
const { t } = useTranslation(["views/explore"]);
|
const { t } = useTranslation(["views/explore"]);
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
|
const apiHost = useApiHost();
|
||||||
|
const imgRef = useRef<HTMLImageElement | null>(null);
|
||||||
|
const [imgLoaded, setImgLoaded] = useState(false);
|
||||||
|
const [displaySource, _setDisplaySource] = useState<"video" | "image">(
|
||||||
|
"video",
|
||||||
|
);
|
||||||
const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } =
|
const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } =
|
||||||
useDetailStream();
|
useDetailStream();
|
||||||
|
|
||||||
|
// manualOverride holds a record-stream timestamp explicitly chosen by the
|
||||||
|
// user (eg, clicking a lifecycle row). When null we display `currentTime`.
|
||||||
|
const [manualOverride, setManualOverride] = useState<number | null>(null);
|
||||||
|
|
||||||
// event.start_time is detect time, convert to record, then subtract padding
|
// event.start_time is detect time, convert to record, then subtract padding
|
||||||
const [currentTime, setCurrentTime] = useState(
|
const [currentTime, setCurrentTime] = useState(
|
||||||
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
|
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
|
||||||
@ -80,9 +93,13 @@ export function TrackingDetails({
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Use manualOverride (set when seeking in image mode) if present so
|
||||||
|
// lifecycle rows and overlays follow image-mode seeks. Otherwise fall
|
||||||
|
// back to currentTime used for video mode.
|
||||||
const effectiveTime = useMemo(() => {
|
const effectiveTime = useMemo(() => {
|
||||||
return currentTime - annotationOffset / 1000;
|
const displayedRecordTime = manualOverride ?? currentTime;
|
||||||
}, [currentTime, annotationOffset]);
|
return displayedRecordTime - annotationOffset / 1000;
|
||||||
|
}, [manualOverride, currentTime, annotationOffset]);
|
||||||
|
|
||||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||||
const [_selectedZone, setSelectedZone] = useState("");
|
const [_selectedZone, setSelectedZone] = useState("");
|
||||||
@ -125,20 +142,30 @@ export function TrackingDetails({
|
|||||||
|
|
||||||
const handleLifecycleClick = useCallback(
|
const handleLifecycleClick = useCallback(
|
||||||
(item: TrackingDetailsSequence) => {
|
(item: TrackingDetailsSequence) => {
|
||||||
if (!videoRef.current) return;
|
if (!videoRef.current && !imgRef.current) return;
|
||||||
|
|
||||||
// Convert lifecycle timestamp (detect stream) to record stream time
|
// Convert lifecycle timestamp (detect stream) to record stream time
|
||||||
const targetTimeRecord = item.timestamp + annotationOffset / 1000;
|
const targetTimeRecord = item.timestamp + annotationOffset / 1000;
|
||||||
|
|
||||||
// Convert to video-relative time for seeking
|
if (displaySource === "image") {
|
||||||
|
// For image mode: set a manual override timestamp and update
|
||||||
|
// currentTime so overlays render correctly.
|
||||||
|
setManualOverride(targetTimeRecord);
|
||||||
|
setCurrentTime(targetTimeRecord);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For video mode: convert to video-relative time and seek player
|
||||||
const eventStartRecord =
|
const eventStartRecord =
|
||||||
(event.start_time ?? 0) + annotationOffset / 1000;
|
(event.start_time ?? 0) + annotationOffset / 1000;
|
||||||
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
||||||
const relativeTime = targetTimeRecord - videoStartTime;
|
const relativeTime = targetTimeRecord - videoStartTime;
|
||||||
|
|
||||||
|
if (videoRef.current) {
|
||||||
videoRef.current.currentTime = relativeTime;
|
videoRef.current.currentTime = relativeTime;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
[event.start_time, annotationOffset],
|
[event.start_time, annotationOffset, displaySource],
|
||||||
);
|
);
|
||||||
|
|
||||||
const formattedStart = config
|
const formattedStart = config
|
||||||
@ -179,11 +206,20 @@ export function TrackingDetails({
|
|||||||
}, [eventSequence]);
|
}, [eventSequence]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (seekToTimestamp === null || !videoRef.current) return;
|
if (seekToTimestamp === null) return;
|
||||||
|
|
||||||
|
if (displaySource === "image") {
|
||||||
|
// For image mode, set the manual override so the snapshot updates to
|
||||||
|
// the exact record timestamp.
|
||||||
|
setManualOverride(seekToTimestamp);
|
||||||
|
setSeekToTimestamp(null);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// seekToTimestamp is a record stream timestamp
|
// seekToTimestamp is a record stream timestamp
|
||||||
// event.start_time is detect stream time, convert to record
|
// event.start_time is detect stream time, convert to record
|
||||||
// The video clip starts at (eventStartRecord - REVIEW_PADDING)
|
// The video clip starts at (eventStartRecord - REVIEW_PADDING)
|
||||||
|
if (!videoRef.current) return;
|
||||||
const eventStartRecord = event.start_time + annotationOffset / 1000;
|
const eventStartRecord = event.start_time + annotationOffset / 1000;
|
||||||
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
||||||
const relativeTime = seekToTimestamp - videoStartTime;
|
const relativeTime = seekToTimestamp - videoStartTime;
|
||||||
@ -191,7 +227,14 @@ export function TrackingDetails({
|
|||||||
videoRef.current.currentTime = relativeTime;
|
videoRef.current.currentTime = relativeTime;
|
||||||
}
|
}
|
||||||
setSeekToTimestamp(null);
|
setSeekToTimestamp(null);
|
||||||
}, [seekToTimestamp, event.start_time, annotationOffset]);
|
}, [
|
||||||
|
seekToTimestamp,
|
||||||
|
event.start_time,
|
||||||
|
annotationOffset,
|
||||||
|
apiHost,
|
||||||
|
event.camera,
|
||||||
|
displaySource,
|
||||||
|
]);
|
||||||
|
|
||||||
const isWithinEventRange =
|
const isWithinEventRange =
|
||||||
effectiveTime !== undefined &&
|
effectiveTime !== undefined &&
|
||||||
@ -294,6 +337,27 @@ export function TrackingDetails({
|
|||||||
[event.start_time, annotationOffset],
|
[event.start_time, annotationOffset],
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const [src, setSrc] = useState(
|
||||||
|
`${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
|
||||||
|
);
|
||||||
|
const [hasError, setHasError] = useState(false);
|
||||||
|
|
||||||
|
// Derive the record timestamp to display: manualOverride if present,
|
||||||
|
// otherwise use currentTime.
|
||||||
|
const displayedRecordTime = manualOverride ?? currentTime;
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (displayedRecordTime) {
|
||||||
|
const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
|
||||||
|
setSrc(newSrc);
|
||||||
|
}
|
||||||
|
setImgLoaded(false);
|
||||||
|
setHasError(false);
|
||||||
|
|
||||||
|
// we know that these deps are correct
|
||||||
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [displayedRecordTime]);
|
||||||
|
|
||||||
if (!config) {
|
if (!config) {
|
||||||
return <ActivityIndicator />;
|
return <ActivityIndicator />;
|
||||||
}
|
}
|
||||||
@ -311,9 +375,10 @@ export function TrackingDetails({
|
|||||||
|
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex w-full items-center justify-center",
|
"flex items-center justify-center",
|
||||||
isDesktop && "overflow-hidden",
|
isDesktop && "overflow-hidden",
|
||||||
cameraAspect === "tall" ? "max-h-[50dvh] lg:max-h-[70dvh]" : "w-full",
|
cameraAspect === "tall" ? "max-h-[50dvh] lg:max-h-[70dvh]" : "w-full",
|
||||||
|
cameraAspect === "tall" && isMobileOnly && "w-full",
|
||||||
cameraAspect !== "tall" && isDesktop && "flex-[3]",
|
cameraAspect !== "tall" && isDesktop && "flex-[3]",
|
||||||
)}
|
)}
|
||||||
style={{ aspectRatio: aspectRatio }}
|
style={{ aspectRatio: aspectRatio }}
|
||||||
@ -325,6 +390,7 @@ export function TrackingDetails({
|
|||||||
cameraAspect === "tall" ? "h-full" : "w-full",
|
cameraAspect === "tall" ? "h-full" : "w-full",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
|
{displaySource == "video" && (
|
||||||
<HlsVideoPlayer
|
<HlsVideoPlayer
|
||||||
videoRef={videoRef}
|
videoRef={videoRef}
|
||||||
containerRef={containerRef}
|
containerRef={containerRef}
|
||||||
@ -340,6 +406,59 @@ export function TrackingDetails({
|
|||||||
camera={event.camera}
|
camera={event.camera}
|
||||||
currentTimeOverride={currentTime}
|
currentTimeOverride={currentTime}
|
||||||
/>
|
/>
|
||||||
|
)}
|
||||||
|
{displaySource == "image" && (
|
||||||
|
<>
|
||||||
|
<ImageLoadingIndicator
|
||||||
|
className="absolute inset-0"
|
||||||
|
imgLoaded={imgLoaded}
|
||||||
|
/>
|
||||||
|
{hasError && (
|
||||||
|
<div className="relative aspect-video">
|
||||||
|
<div className="flex flex-col items-center justify-center p-20 text-center">
|
||||||
|
<LuFolderX className="size-16" />
|
||||||
|
{t("objectLifecycle.noImageFound")}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
<div
|
||||||
|
className={cn("relative", imgLoaded ? "visible" : "invisible")}
|
||||||
|
>
|
||||||
|
<div className="absolute z-50 size-full">
|
||||||
|
<ObjectTrackOverlay
|
||||||
|
key={`overlay-${displayedRecordTime}`}
|
||||||
|
camera={event.camera}
|
||||||
|
showBoundingBoxes={true}
|
||||||
|
currentTime={displayedRecordTime}
|
||||||
|
videoWidth={imgRef?.current?.naturalWidth ?? 0}
|
||||||
|
videoHeight={imgRef?.current?.naturalHeight ?? 0}
|
||||||
|
className="absolute inset-0 z-10"
|
||||||
|
onSeekToTime={handleSeekToTime}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<img
|
||||||
|
key={event.id}
|
||||||
|
ref={imgRef}
|
||||||
|
className={cn(
|
||||||
|
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
|
||||||
|
)}
|
||||||
|
loading={isSafari ? "eager" : "lazy"}
|
||||||
|
style={
|
||||||
|
isIOS
|
||||||
|
? {
|
||||||
|
WebkitUserSelect: "none",
|
||||||
|
WebkitTouchCallout: "none",
|
||||||
|
}
|
||||||
|
: undefined
|
||||||
|
}
|
||||||
|
draggable={false}
|
||||||
|
src={src}
|
||||||
|
onLoad={() => setImgLoaded(true)}
|
||||||
|
onError={() => setHasError(true)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"absolute top-2 z-[5] flex items-center gap-2",
|
"absolute top-2 z-[5] flex items-center gap-2",
|
||||||
|
|||||||
@ -289,6 +289,7 @@ export default function VideoControls({
|
|||||||
}}
|
}}
|
||||||
onUploadFrame={onUploadFrame}
|
onUploadFrame={onUploadFrame}
|
||||||
containerRef={containerRef}
|
containerRef={containerRef}
|
||||||
|
fullscreen={fullscreen}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{features.fullscreen && toggleFullscreen && (
|
{features.fullscreen && toggleFullscreen && (
|
||||||
@ -306,6 +307,7 @@ type FrigatePlusUploadButtonProps = {
|
|||||||
onClose: () => void;
|
onClose: () => void;
|
||||||
onUploadFrame: () => void;
|
onUploadFrame: () => void;
|
||||||
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
||||||
|
fullscreen?: boolean;
|
||||||
};
|
};
|
||||||
function FrigatePlusUploadButton({
|
function FrigatePlusUploadButton({
|
||||||
video,
|
video,
|
||||||
@ -313,6 +315,7 @@ function FrigatePlusUploadButton({
|
|||||||
onClose,
|
onClose,
|
||||||
onUploadFrame,
|
onUploadFrame,
|
||||||
containerRef,
|
containerRef,
|
||||||
|
fullscreen,
|
||||||
}: FrigatePlusUploadButtonProps) {
|
}: FrigatePlusUploadButtonProps) {
|
||||||
const { t } = useTranslation(["components/player"]);
|
const { t } = useTranslation(["components/player"]);
|
||||||
|
|
||||||
@ -349,7 +352,11 @@ function FrigatePlusUploadButton({
|
|||||||
/>
|
/>
|
||||||
</AlertDialogTrigger>
|
</AlertDialogTrigger>
|
||||||
<AlertDialogContent
|
<AlertDialogContent
|
||||||
portalProps={{ container: containerRef?.current }}
|
portalProps={
|
||||||
|
fullscreen && containerRef?.current
|
||||||
|
? { container: containerRef.current }
|
||||||
|
: undefined
|
||||||
|
}
|
||||||
className="md:max-w-2xl lg:max-w-3xl xl:max-w-4xl"
|
className="md:max-w-2xl lg:max-w-3xl xl:max-w-4xl"
|
||||||
>
|
>
|
||||||
<AlertDialogHeader>
|
<AlertDialogHeader>
|
||||||
|
|||||||
@ -174,9 +174,7 @@ export default function CameraWizardDialog({
|
|||||||
...(friendlyName && { friendly_name: friendlyName }),
|
...(friendlyName && { friendly_name: friendlyName }),
|
||||||
ffmpeg: {
|
ffmpeg: {
|
||||||
inputs: wizardData.streams.map((stream, index) => {
|
inputs: wizardData.streams.map((stream, index) => {
|
||||||
const isRestreamed =
|
if (stream.restream) {
|
||||||
wizardData.restreamIds?.includes(stream.id) ?? false;
|
|
||||||
if (isRestreamed) {
|
|
||||||
const go2rtcStreamName =
|
const go2rtcStreamName =
|
||||||
wizardData.streams!.length === 1
|
wizardData.streams!.length === 1
|
||||||
? finalCameraName
|
? finalCameraName
|
||||||
@ -234,7 +232,11 @@ export default function CameraWizardDialog({
|
|||||||
wizardData.streams!.length === 1
|
wizardData.streams!.length === 1
|
||||||
? finalCameraName
|
? finalCameraName
|
||||||
: `${finalCameraName}_${index + 1}`;
|
: `${finalCameraName}_${index + 1}`;
|
||||||
go2rtcStreams[streamName] = [stream.url];
|
|
||||||
|
const streamUrl = stream.useFfmpeg
|
||||||
|
? `ffmpeg:${stream.url}`
|
||||||
|
: stream.url;
|
||||||
|
go2rtcStreams[streamName] = [streamUrl];
|
||||||
});
|
});
|
||||||
|
|
||||||
if (Object.keys(go2rtcStreams).length > 0) {
|
if (Object.keys(go2rtcStreams).length > 0) {
|
||||||
|
|||||||
@ -385,7 +385,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="h-8"
|
className="text-md h-8"
|
||||||
placeholder={t(
|
placeholder={t(
|
||||||
"cameraWizard.step1.cameraNamePlaceholder",
|
"cameraWizard.step1.cameraNamePlaceholder",
|
||||||
)}
|
)}
|
||||||
@ -475,7 +475,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="h-8"
|
className="text-md h-8"
|
||||||
placeholder="192.168.1.100"
|
placeholder="192.168.1.100"
|
||||||
{...field}
|
{...field}
|
||||||
/>
|
/>
|
||||||
@ -495,7 +495,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="h-8"
|
className="text-md h-8"
|
||||||
placeholder={t(
|
placeholder={t(
|
||||||
"cameraWizard.step1.usernamePlaceholder",
|
"cameraWizard.step1.usernamePlaceholder",
|
||||||
)}
|
)}
|
||||||
@ -518,7 +518,7 @@ export default function Step1NameCamera({
|
|||||||
<FormControl>
|
<FormControl>
|
||||||
<div className="relative">
|
<div className="relative">
|
||||||
<Input
|
<Input
|
||||||
className="h-8 pr-10"
|
className="text-md h-8 pr-10"
|
||||||
type={showPassword ? "text" : "password"}
|
type={showPassword ? "text" : "password"}
|
||||||
placeholder={t(
|
placeholder={t(
|
||||||
"cameraWizard.step1.passwordPlaceholder",
|
"cameraWizard.step1.passwordPlaceholder",
|
||||||
@ -558,7 +558,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="h-8"
|
className="text-md h-8"
|
||||||
placeholder="rtsp://username:password@host:port/path"
|
placeholder="rtsp://username:password@host:port/path"
|
||||||
{...field}
|
{...field}
|
||||||
/>
|
/>
|
||||||
@ -608,6 +608,12 @@ export default function Step1NameCamera({
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{isTesting && (
|
||||||
|
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||||
|
<ActivityIndicator className="size-4" />
|
||||||
|
{testStatus}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||||
<Button
|
<Button
|
||||||
type="button"
|
type="button"
|
||||||
@ -635,10 +641,7 @@ export default function Step1NameCamera({
|
|||||||
variant="select"
|
variant="select"
|
||||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||||
>
|
>
|
||||||
{isTesting && <ActivityIndicator className="size-4" />}
|
{t("cameraWizard.step1.testConnection")}
|
||||||
{isTesting && testStatus
|
|
||||||
? testStatus
|
|
||||||
: t("cameraWizard.step1.testConnection")}
|
|
||||||
</Button>
|
</Button>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -201,16 +201,12 @@ export default function Step2StreamConfig({
|
|||||||
|
|
||||||
const setRestream = useCallback(
|
const setRestream = useCallback(
|
||||||
(streamId: string) => {
|
(streamId: string) => {
|
||||||
const currentIds = wizardData.restreamIds || [];
|
const stream = streams.find((s) => s.id === streamId);
|
||||||
const isSelected = currentIds.includes(streamId);
|
if (!stream) return;
|
||||||
const newIds = isSelected
|
|
||||||
? currentIds.filter((id) => id !== streamId)
|
updateStream(streamId, { restream: !stream.restream });
|
||||||
: [...currentIds, streamId];
|
|
||||||
onUpdate({
|
|
||||||
restreamIds: newIds,
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
[wizardData.restreamIds, onUpdate],
|
[streams, updateStream],
|
||||||
);
|
);
|
||||||
|
|
||||||
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
|
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
|
||||||
@ -435,9 +431,7 @@ export default function Step2StreamConfig({
|
|||||||
{t("cameraWizard.step2.go2rtc")}
|
{t("cameraWizard.step2.go2rtc")}
|
||||||
</span>
|
</span>
|
||||||
<Switch
|
<Switch
|
||||||
checked={(wizardData.restreamIds || []).includes(
|
checked={stream.restream || false}
|
||||||
stream.id,
|
|
||||||
)}
|
|
||||||
onCheckedChange={() => setRestream(stream.id)}
|
onCheckedChange={() => setRestream(stream.id)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -1,7 +1,13 @@
|
|||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { Badge } from "@/components/ui/badge";
|
import { Badge } from "@/components/ui/badge";
|
||||||
|
import { Switch } from "@/components/ui/switch";
|
||||||
|
import {
|
||||||
|
Popover,
|
||||||
|
PopoverContent,
|
||||||
|
PopoverTrigger,
|
||||||
|
} from "@/components/ui/popover";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { LuRotateCcw } from "react-icons/lu";
|
import { LuRotateCcw, LuInfo } from "react-icons/lu";
|
||||||
import { useState, useCallback, useMemo, useEffect } from "react";
|
import { useState, useCallback, useMemo, useEffect } from "react";
|
||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
@ -216,7 +222,6 @@ export default function Step3Validation({
|
|||||||
brandTemplate: wizardData.brandTemplate,
|
brandTemplate: wizardData.brandTemplate,
|
||||||
customUrl: wizardData.customUrl,
|
customUrl: wizardData.customUrl,
|
||||||
streams: wizardData.streams,
|
streams: wizardData.streams,
|
||||||
restreamIds: wizardData.restreamIds,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
onSave(configData);
|
onSave(configData);
|
||||||
@ -322,6 +327,51 @@ export default function Step3Validation({
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{result?.success && (
|
||||||
|
<div className="mb-3 flex items-center justify-between">
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<span className="text-sm">
|
||||||
|
{t("cameraWizard.step3.ffmpegModule")}
|
||||||
|
</span>
|
||||||
|
<Popover>
|
||||||
|
<PopoverTrigger asChild>
|
||||||
|
<Button
|
||||||
|
variant="ghost"
|
||||||
|
size="sm"
|
||||||
|
className="h-4 w-4 p-0"
|
||||||
|
>
|
||||||
|
<LuInfo className="size-3" />
|
||||||
|
</Button>
|
||||||
|
</PopoverTrigger>
|
||||||
|
<PopoverContent className="pointer-events-auto w-80 text-xs">
|
||||||
|
<div className="space-y-2">
|
||||||
|
<div className="font-medium">
|
||||||
|
{t("cameraWizard.step3.ffmpegModule")}
|
||||||
|
</div>
|
||||||
|
<div className="text-muted-foreground">
|
||||||
|
{t(
|
||||||
|
"cameraWizard.step3.ffmpegModuleDescription",
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</PopoverContent>
|
||||||
|
</Popover>
|
||||||
|
</div>
|
||||||
|
<Switch
|
||||||
|
checked={stream.useFfmpeg || false}
|
||||||
|
onCheckedChange={(checked) => {
|
||||||
|
onUpdate({
|
||||||
|
streams: streams.map((s) =>
|
||||||
|
s.id === stream.id
|
||||||
|
? { ...s, useFfmpeg: checked }
|
||||||
|
: s,
|
||||||
|
),
|
||||||
|
});
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
|
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
|
||||||
<span className="break-all text-sm text-muted-foreground">
|
<span className="break-all text-sm text-muted-foreground">
|
||||||
{stream.url}
|
{stream.url}
|
||||||
@ -491,8 +541,7 @@ function StreamIssues({
|
|||||||
|
|
||||||
// Restreaming check
|
// Restreaming check
|
||||||
if (stream.roles.includes("record")) {
|
if (stream.roles.includes("record")) {
|
||||||
const restreamIds = wizardData.restreamIds || [];
|
if (stream.restream) {
|
||||||
if (restreamIds.includes(stream.id)) {
|
|
||||||
result.push({
|
result.push({
|
||||||
type: "warning",
|
type: "warning",
|
||||||
message: t("cameraWizard.step3.issues.restreamingWarning"),
|
message: t("cameraWizard.step3.issues.restreamingWarning"),
|
||||||
@ -660,9 +709,10 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Register stream with go2rtc
|
// Register stream with go2rtc
|
||||||
|
const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url;
|
||||||
axios
|
axios
|
||||||
.put(`go2rtc/streams/${streamId}`, null, {
|
.put(`go2rtc/streams/${streamId}`, null, {
|
||||||
params: { src: stream.url },
|
params: { src: streamUrl },
|
||||||
})
|
})
|
||||||
.then(() => {
|
.then(() => {
|
||||||
// Add small delay to allow go2rtc api to run and initialize the stream
|
// Add small delay to allow go2rtc api to run and initialize the stream
|
||||||
@ -680,7 +730,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
|||||||
// do nothing on cleanup errors - go2rtc won't consume the streams
|
// do nothing on cleanup errors - go2rtc won't consume the streams
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
}, [stream.url, streamId]);
|
}, [stream.url, stream.useFfmpeg, streamId]);
|
||||||
|
|
||||||
const resolution = stream.testResult?.resolution;
|
const resolution = stream.testResult?.resolution;
|
||||||
let aspectRatio = "16/9";
|
let aspectRatio = "16/9";
|
||||||
|
|||||||
@ -368,7 +368,11 @@ function ReviewGroup({
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
data-review-id={id}
|
data-review-id={id}
|
||||||
className="cursor-pointer rounded-lg bg-secondary py-3"
|
className={`mx-1 cursor-pointer rounded-lg bg-secondary px-0 py-3 outline outline-[2px] -outline-offset-[1.8px] ${
|
||||||
|
isActive
|
||||||
|
? "shadow-selected outline-selected"
|
||||||
|
: "outline-transparent duration-500"
|
||||||
|
}`}
|
||||||
>
|
>
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
@ -383,10 +387,10 @@ function ReviewGroup({
|
|||||||
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
|
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
|
||||||
<LuCircle
|
<LuCircle
|
||||||
className={cn(
|
className={cn(
|
||||||
"size-3",
|
"size-3 duration-500",
|
||||||
isActive
|
review.severity == "alert"
|
||||||
? "fill-selected text-selected"
|
? "fill-severity_alert text-severity_alert"
|
||||||
: "fill-muted duration-500 dark:fill-secondary-highlight dark:text-secondary-highlight",
|
: "fill-severity_detection text-severity_detection",
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
@ -455,6 +459,7 @@ function ReviewGroup({
|
|||||||
<EventList
|
<EventList
|
||||||
key={event.id}
|
key={event.id}
|
||||||
event={event}
|
event={event}
|
||||||
|
review={review}
|
||||||
effectiveTime={effectiveTime}
|
effectiveTime={effectiveTime}
|
||||||
annotationOffset={annotationOffset}
|
annotationOffset={annotationOffset}
|
||||||
onSeek={onSeek}
|
onSeek={onSeek}
|
||||||
@ -489,6 +494,7 @@ function ReviewGroup({
|
|||||||
|
|
||||||
type EventListProps = {
|
type EventListProps = {
|
||||||
event: Event;
|
event: Event;
|
||||||
|
review: ReviewSegment;
|
||||||
effectiveTime?: number;
|
effectiveTime?: number;
|
||||||
annotationOffset: number;
|
annotationOffset: number;
|
||||||
onSeek: (ts: number, play?: boolean) => void;
|
onSeek: (ts: number, play?: boolean) => void;
|
||||||
@ -496,6 +502,7 @@ type EventListProps = {
|
|||||||
};
|
};
|
||||||
function EventList({
|
function EventList({
|
||||||
event,
|
event,
|
||||||
|
review,
|
||||||
effectiveTime,
|
effectiveTime,
|
||||||
annotationOffset,
|
annotationOffset,
|
||||||
onSeek,
|
onSeek,
|
||||||
@ -614,6 +621,7 @@ function EventList({
|
|||||||
|
|
||||||
<div className="mt-2">
|
<div className="mt-2">
|
||||||
<ObjectTimeline
|
<ObjectTimeline
|
||||||
|
review={review}
|
||||||
eventId={event.id}
|
eventId={event.id}
|
||||||
onSeek={handleTimelineClick}
|
onSeek={handleTimelineClick}
|
||||||
effectiveTime={effectiveTime}
|
effectiveTime={effectiveTime}
|
||||||
@ -772,6 +780,7 @@ function LifecycleItem({
|
|||||||
|
|
||||||
// Fetch and render timeline entries for a single event id on demand.
|
// Fetch and render timeline entries for a single event id on demand.
|
||||||
function ObjectTimeline({
|
function ObjectTimeline({
|
||||||
|
review,
|
||||||
eventId,
|
eventId,
|
||||||
onSeek,
|
onSeek,
|
||||||
effectiveTime,
|
effectiveTime,
|
||||||
@ -779,6 +788,7 @@ function ObjectTimeline({
|
|||||||
startTime,
|
startTime,
|
||||||
endTime,
|
endTime,
|
||||||
}: {
|
}: {
|
||||||
|
review: ReviewSegment;
|
||||||
eventId: string;
|
eventId: string;
|
||||||
onSeek: (ts: number, play?: boolean) => void;
|
onSeek: (ts: number, play?: boolean) => void;
|
||||||
effectiveTime?: number;
|
effectiveTime?: number;
|
||||||
@ -787,13 +797,27 @@ function ObjectTimeline({
|
|||||||
endTime?: number;
|
endTime?: number;
|
||||||
}) {
|
}) {
|
||||||
const { t } = useTranslation("views/events");
|
const { t } = useTranslation("views/events");
|
||||||
const { data: timeline, isValidating } = useSWR<TrackingDetailsSequence[]>([
|
const { data: fullTimeline, isValidating } = useSWR<
|
||||||
|
TrackingDetailsSequence[]
|
||||||
|
>([
|
||||||
"timeline",
|
"timeline",
|
||||||
{
|
{
|
||||||
source_id: eventId,
|
source_id: eventId,
|
||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
const timeline = useMemo(() => {
|
||||||
|
if (!fullTimeline) {
|
||||||
|
return fullTimeline;
|
||||||
|
}
|
||||||
|
|
||||||
|
return fullTimeline.filter(
|
||||||
|
(t) =>
|
||||||
|
t.timestamp >= review.start_time &&
|
||||||
|
(review.end_time == undefined || t.timestamp <= review.end_time),
|
||||||
|
);
|
||||||
|
}, [fullTimeline, review]);
|
||||||
|
|
||||||
if (isValidating && (!timeline || timeline.length === 0)) {
|
if (isValidating && (!timeline || timeline.length === 0)) {
|
||||||
return <ActivityIndicator className="ml-2 size-3" />;
|
return <ActivityIndicator className="ml-2 size-3" />;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -101,7 +101,7 @@ export default function Step1NameAndType({
|
|||||||
|
|
||||||
const form = useForm<z.infer<typeof formSchema>>({
|
const form = useForm<z.infer<typeof formSchema>>({
|
||||||
resolver: zodResolver(formSchema),
|
resolver: zodResolver(formSchema),
|
||||||
mode: "onChange",
|
mode: "onBlur",
|
||||||
defaultValues: {
|
defaultValues: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
name: initialData?.name ?? trigger?.name ?? "",
|
name: initialData?.name ?? trigger?.name ?? "",
|
||||||
|
|||||||
@ -845,6 +845,7 @@ function FaceAttemptGroup({
|
|||||||
selectedItems={selectedFaces}
|
selectedItems={selectedFaces}
|
||||||
i18nLibrary="views/faceLibrary"
|
i18nLibrary="views/faceLibrary"
|
||||||
objectType="person"
|
objectType="person"
|
||||||
|
noClassificationLabel="details.unknown"
|
||||||
onClick={(data) => {
|
onClick={(data) => {
|
||||||
if (data) {
|
if (data) {
|
||||||
onClickFaces([data.filename], true);
|
onClickFaces([data.filename], true);
|
||||||
|
|||||||
@ -157,9 +157,11 @@ function MobileMenuItem({
|
|||||||
const { t } = useTranslation(["views/settings"]);
|
const { t } = useTranslation(["views/settings"]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Button
|
<div
|
||||||
variant="ghost"
|
className={cn(
|
||||||
className={cn("w-full justify-between pr-2", className)}
|
"inline-flex h-10 w-full cursor-pointer items-center justify-between whitespace-nowrap rounded-md px-4 py-2 pr-2 text-sm font-medium text-primary-variant disabled:pointer-events-none disabled:opacity-50",
|
||||||
|
className,
|
||||||
|
)}
|
||||||
onClick={() => {
|
onClick={() => {
|
||||||
onSelect(item.key);
|
onSelect(item.key);
|
||||||
onClose?.();
|
onClose?.();
|
||||||
@ -167,7 +169,7 @@ function MobileMenuItem({
|
|||||||
>
|
>
|
||||||
<div className="smart-capitalize">{t("menu." + item.key)}</div>
|
<div className="smart-capitalize">{t("menu." + item.key)}</div>
|
||||||
<LuChevronRight className="size-4" />
|
<LuChevronRight className="size-4" />
|
||||||
</Button>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,6 +275,9 @@ export default function Settings() {
|
|||||||
} else {
|
} else {
|
||||||
setPageToggle(page as SettingsType);
|
setPageToggle(page as SettingsType);
|
||||||
}
|
}
|
||||||
|
if (isMobile) {
|
||||||
|
setContentMobileOpen(true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// don't clear url params if we're creating a new object mask
|
// don't clear url params if we're creating a new object mask
|
||||||
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
||||||
@ -282,6 +287,9 @@ export default function Settings() {
|
|||||||
const cameraNames = cameras.map((c) => c.name);
|
const cameraNames = cameras.map((c) => c.name);
|
||||||
if (cameraNames.includes(camera)) {
|
if (cameraNames.includes(camera)) {
|
||||||
setSelectedCamera(camera);
|
setSelectedCamera(camera);
|
||||||
|
if (isMobile) {
|
||||||
|
setContentMobileOpen(true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// don't clear url params if we're creating a new object mask or trigger
|
// don't clear url params if we're creating a new object mask or trigger
|
||||||
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
||||||
|
|||||||
@ -85,6 +85,8 @@ export type StreamConfig = {
|
|||||||
quality?: string;
|
quality?: string;
|
||||||
testResult?: TestResult;
|
testResult?: TestResult;
|
||||||
userTested?: boolean;
|
userTested?: boolean;
|
||||||
|
useFfmpeg?: boolean;
|
||||||
|
restream?: boolean;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type TestResult = {
|
export type TestResult = {
|
||||||
@ -105,7 +107,6 @@ export type WizardFormData = {
|
|||||||
brandTemplate?: CameraBrand;
|
brandTemplate?: CameraBrand;
|
||||||
customUrl?: string;
|
customUrl?: string;
|
||||||
streams?: StreamConfig[];
|
streams?: StreamConfig[];
|
||||||
restreamIds?: string[];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// API Response Types
|
// API Response Types
|
||||||
@ -146,6 +147,7 @@ export type CameraConfigData = {
|
|||||||
inputs: {
|
inputs: {
|
||||||
path: string;
|
path: string;
|
||||||
roles: string[];
|
roles: string[];
|
||||||
|
input_args?: string;
|
||||||
}[];
|
}[];
|
||||||
};
|
};
|
||||||
live?: {
|
live?: {
|
||||||
|
|||||||
@ -307,6 +307,7 @@ export type CustomClassificationModelConfig = {
|
|||||||
threshold: number;
|
threshold: number;
|
||||||
object_config?: {
|
object_config?: {
|
||||||
objects: string[];
|
objects: string[];
|
||||||
|
classification_type: string;
|
||||||
};
|
};
|
||||||
state_config?: {
|
state_config?: {
|
||||||
cameras: {
|
cameras: {
|
||||||
|
|||||||
@ -43,5 +43,5 @@ export function generateFixedHash(name: string, prefix: string = "id"): string {
|
|||||||
* @returns True if the name is valid, false otherwise
|
* @returns True if the name is valid, false otherwise
|
||||||
*/
|
*/
|
||||||
export function isValidId(name: string): boolean {
|
export function isValidId(name: string): boolean {
|
||||||
return /^[a-zA-Z0-9_-]+$/.test(name);
|
return /^[a-zA-Z0-9_-]+$/.test(name) && !/^\d+$/.test(name);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import { baseUrl } from "@/api/baseUrl";
|
import { baseUrl } from "@/api/baseUrl";
|
||||||
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
|
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
|
||||||
|
import ClassificationModelEditDialog from "@/components/classification/ClassificationModelEditDialog";
|
||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
|
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
|
||||||
import { Button, buttonVariants } from "@/components/ui/button";
|
import { Button, buttonVariants } from "@/components/ui/button";
|
||||||
@ -10,18 +11,17 @@ import {
|
|||||||
CustomClassificationModelConfig,
|
CustomClassificationModelConfig,
|
||||||
FrigateConfig,
|
FrigateConfig,
|
||||||
} from "@/types/frigateConfig";
|
} from "@/types/frigateConfig";
|
||||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { FaFolderPlus } from "react-icons/fa";
|
import { FaFolderPlus } from "react-icons/fa";
|
||||||
import { MdModelTraining } from "react-icons/md";
|
import { MdModelTraining } from "react-icons/md";
|
||||||
import { LuTrash2 } from "react-icons/lu";
|
import { LuPencil, LuTrash2 } from "react-icons/lu";
|
||||||
import { FiMoreVertical } from "react-icons/fi";
|
import { FiMoreVertical } from "react-icons/fi";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import Heading from "@/components/ui/heading";
|
import Heading from "@/components/ui/heading";
|
||||||
import { useOverlayState } from "@/hooks/use-overlay-state";
|
import { useOverlayState } from "@/hooks/use-overlay-state";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
|
||||||
import {
|
import {
|
||||||
DropdownMenu,
|
DropdownMenu,
|
||||||
DropdownMenuContent,
|
DropdownMenuContent,
|
||||||
@ -164,6 +164,7 @@ export default function ModelSelectionView({
|
|||||||
key={config.name}
|
key={config.name}
|
||||||
config={config}
|
config={config}
|
||||||
onClick={() => onClick(config)}
|
onClick={() => onClick(config)}
|
||||||
|
onUpdate={() => refreshConfig()}
|
||||||
onDelete={() => refreshConfig()}
|
onDelete={() => refreshConfig()}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
@ -202,9 +203,10 @@ function NoModelsView({
|
|||||||
type ModelCardProps = {
|
type ModelCardProps = {
|
||||||
config: CustomClassificationModelConfig;
|
config: CustomClassificationModelConfig;
|
||||||
onClick: () => void;
|
onClick: () => void;
|
||||||
|
onUpdate: () => void;
|
||||||
onDelete: () => void;
|
onDelete: () => void;
|
||||||
};
|
};
|
||||||
function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
||||||
const { t } = useTranslation(["views/classificationModel"]);
|
const { t } = useTranslation(["views/classificationModel"]);
|
||||||
|
|
||||||
const { data: dataset } = useSWR<{
|
const { data: dataset } = useSWR<{
|
||||||
@ -212,25 +214,31 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
|||||||
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
|
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
|
||||||
|
|
||||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
||||||
const bypassDialogRef = useRef(false);
|
const [editDialogOpen, setEditDialogOpen] = useState(false);
|
||||||
|
|
||||||
useKeyboardListener(["Shift"], (_, modifiers) => {
|
|
||||||
bypassDialogRef.current = modifiers.shift;
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
|
|
||||||
const handleDelete = useCallback(async () => {
|
const handleDelete = useCallback(async () => {
|
||||||
await axios
|
try {
|
||||||
.delete(`classification/${config.name}`)
|
await axios.delete(`classification/${config.name}`);
|
||||||
.then((resp) => {
|
await axios.put("/config/set", {
|
||||||
if (resp.status == 200) {
|
requires_restart: 0,
|
||||||
|
update_topic: `config/classification/custom/${config.name}`,
|
||||||
|
config_data: {
|
||||||
|
classification: {
|
||||||
|
custom: {
|
||||||
|
[config.name]: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
||||||
position: "top-center",
|
position: "top-center",
|
||||||
});
|
});
|
||||||
onDelete();
|
onDelete();
|
||||||
}
|
} catch (err) {
|
||||||
})
|
const error = err as {
|
||||||
.catch((error) => {
|
response?: { data?: { message?: string; detail?: string } };
|
||||||
|
};
|
||||||
const errorMessage =
|
const errorMessage =
|
||||||
error.response?.data?.message ||
|
error.response?.data?.message ||
|
||||||
error.response?.data?.detail ||
|
error.response?.data?.detail ||
|
||||||
@ -238,16 +246,18 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
|||||||
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
||||||
position: "top-center",
|
position: "top-center",
|
||||||
});
|
});
|
||||||
});
|
}
|
||||||
}, [config, onDelete, t]);
|
}, [config, onDelete, t]);
|
||||||
|
|
||||||
const handleDeleteClick = useCallback(() => {
|
const handleDeleteClick = useCallback((e: React.MouseEvent) => {
|
||||||
if (bypassDialogRef.current) {
|
e.stopPropagation();
|
||||||
handleDelete();
|
|
||||||
} else {
|
|
||||||
setDeleteDialogOpen(true);
|
setDeleteDialogOpen(true);
|
||||||
}
|
}, []);
|
||||||
}, [handleDelete]);
|
|
||||||
|
const handleEditClick = useCallback((e: React.MouseEvent) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
setEditDialogOpen(true);
|
||||||
|
}, []);
|
||||||
|
|
||||||
const coverImage = useMemo(() => {
|
const coverImage = useMemo(() => {
|
||||||
if (!dataset) {
|
if (!dataset) {
|
||||||
@ -269,6 +279,13 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
|
<ClassificationModelEditDialog
|
||||||
|
open={editDialogOpen}
|
||||||
|
model={config}
|
||||||
|
onClose={() => setEditDialogOpen(false)}
|
||||||
|
onSuccess={() => onUpdate()}
|
||||||
|
/>
|
||||||
|
|
||||||
<AlertDialog
|
<AlertDialog
|
||||||
open={deleteDialogOpen}
|
open={deleteDialogOpen}
|
||||||
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
|
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
|
||||||
@ -304,7 +321,7 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
|||||||
className="size-full"
|
className="size-full"
|
||||||
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
||||||
/>
|
/>
|
||||||
<ImageShadowOverlay />
|
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
|
||||||
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
|
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
|
||||||
{config.name}
|
{config.name}
|
||||||
</div>
|
</div>
|
||||||
@ -315,14 +332,17 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
|||||||
<FiMoreVertical className="size-5 text-white" />
|
<FiMoreVertical className="size-5 text-white" />
|
||||||
</BlurredIconButton>
|
</BlurredIconButton>
|
||||||
</DropdownMenuTrigger>
|
</DropdownMenuTrigger>
|
||||||
<DropdownMenuContent align="end">
|
<DropdownMenuContent
|
||||||
|
align="end"
|
||||||
|
onClick={(e) => e.stopPropagation()}
|
||||||
|
>
|
||||||
|
<DropdownMenuItem onClick={handleEditClick}>
|
||||||
|
<LuPencil className="mr-2 size-4" />
|
||||||
|
<span>{t("button.edit", { ns: "common" })}</span>
|
||||||
|
</DropdownMenuItem>
|
||||||
<DropdownMenuItem onClick={handleDeleteClick}>
|
<DropdownMenuItem onClick={handleDeleteClick}>
|
||||||
<LuTrash2 className="mr-2 size-4" />
|
<LuTrash2 className="mr-2 size-4" />
|
||||||
<span>
|
<span>{t("button.delete", { ns: "common" })}</span>
|
||||||
{bypassDialogRef.current
|
|
||||||
? t("button.deleteNow", { ns: "common" })
|
|
||||||
: t("button.delete", { ns: "common" })}
|
|
||||||
</span>
|
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
</DropdownMenuContent>
|
</DropdownMenuContent>
|
||||||
</DropdownMenu>
|
</DropdownMenu>
|
||||||
|
|||||||
@ -327,6 +327,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
|||||||
</AlertDialog>
|
</AlertDialog>
|
||||||
|
|
||||||
<div className="flex flex-row justify-between gap-2 p-2 align-middle">
|
<div className="flex flex-row justify-between gap-2 p-2 align-middle">
|
||||||
|
{(isDesktop || !selectedImages?.length) && (
|
||||||
<div className="flex flex-row items-center justify-center gap-2">
|
<div className="flex flex-row items-center justify-center gap-2">
|
||||||
<Button
|
<Button
|
||||||
className="flex items-center gap-2.5 rounded-lg"
|
className="flex items-center gap-2.5 rounded-lg"
|
||||||
@ -340,6 +341,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</Button>
|
</Button>
|
||||||
|
|
||||||
<LibrarySelector
|
<LibrarySelector
|
||||||
pageToggle={pageToggle}
|
pageToggle={pageToggle}
|
||||||
dataset={dataset || {}}
|
dataset={dataset || {}}
|
||||||
@ -349,9 +351,15 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
|||||||
onRename={() => {}}
|
onRename={() => {}}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
)}
|
||||||
{selectedImages?.length > 0 ? (
|
{selectedImages?.length > 0 ? (
|
||||||
<div className="flex items-center justify-center gap-2">
|
<div
|
||||||
<div className="mx-1 flex w-48 items-center justify-center text-sm text-muted-foreground">
|
className={cn(
|
||||||
|
"flex w-full items-center justify-end gap-2",
|
||||||
|
isMobileOnly && "justify-between",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<div className="flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||||
<div className="p-1">{`${selectedImages.length} selected`}</div>
|
<div className="p-1">{`${selectedImages.length} selected`}</div>
|
||||||
<div className="p-1">{"|"}</div>
|
<div className="p-1">{"|"}</div>
|
||||||
<div
|
<div
|
||||||
@ -961,6 +969,7 @@ function ObjectTrainGrid({
|
|||||||
selectedItems={selectedImages}
|
selectedItems={selectedImages}
|
||||||
i18nLibrary="views/classificationModel"
|
i18nLibrary="views/classificationModel"
|
||||||
objectType={model.object_config?.objects?.at(0) ?? "Object"}
|
objectType={model.object_config?.objects?.at(0) ?? "Object"}
|
||||||
|
noClassificationLabel="details.none"
|
||||||
onClick={(data) => {
|
onClick={(data) => {
|
||||||
if (data) {
|
if (data) {
|
||||||
onClickImages([data.filename], true);
|
onClickImages([data.filename], true);
|
||||||
|
|||||||
@ -136,7 +136,7 @@ export default function EventView({
|
|||||||
|
|
||||||
const [selectedReviews, setSelectedReviews] = useState<ReviewSegment[]>([]);
|
const [selectedReviews, setSelectedReviews] = useState<ReviewSegment[]>([]);
|
||||||
const onSelectReview = useCallback(
|
const onSelectReview = useCallback(
|
||||||
(review: ReviewSegment, ctrl: boolean) => {
|
(review: ReviewSegment, ctrl: boolean, detail: boolean) => {
|
||||||
if (selectedReviews.length > 0 || ctrl) {
|
if (selectedReviews.length > 0 || ctrl) {
|
||||||
const index = selectedReviews.findIndex((r) => r.id === review.id);
|
const index = selectedReviews.findIndex((r) => r.id === review.id);
|
||||||
|
|
||||||
@ -156,17 +156,31 @@ export default function EventView({
|
|||||||
setSelectedReviews(copy);
|
setSelectedReviews(copy);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// If a specific date is selected in the calendar and it's after the event start,
|
||||||
|
// use the selected date instead of the event start time
|
||||||
|
const effectiveStartTime =
|
||||||
|
timeRange.after > review.start_time
|
||||||
|
? timeRange.after
|
||||||
|
: review.start_time;
|
||||||
|
|
||||||
onOpenRecording({
|
onOpenRecording({
|
||||||
camera: review.camera,
|
camera: review.camera,
|
||||||
startTime: review.start_time - REVIEW_PADDING,
|
startTime: effectiveStartTime - REVIEW_PADDING,
|
||||||
severity: review.severity,
|
severity: review.severity,
|
||||||
|
timelineType: detail ? "detail" : undefined,
|
||||||
});
|
});
|
||||||
|
|
||||||
review.has_been_reviewed = true;
|
review.has_been_reviewed = true;
|
||||||
markItemAsReviewed(review);
|
markItemAsReviewed(review);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[selectedReviews, setSelectedReviews, onOpenRecording, markItemAsReviewed],
|
[
|
||||||
|
selectedReviews,
|
||||||
|
setSelectedReviews,
|
||||||
|
onOpenRecording,
|
||||||
|
markItemAsReviewed,
|
||||||
|
timeRange.after,
|
||||||
|
],
|
||||||
);
|
);
|
||||||
const onSelectAllReviews = useCallback(() => {
|
const onSelectAllReviews = useCallback(() => {
|
||||||
if (!currentReviewItems || currentReviewItems.length == 0) {
|
if (!currentReviewItems || currentReviewItems.length == 0) {
|
||||||
@ -402,7 +416,6 @@ export default function EventView({
|
|||||||
onSelectAllReviews={onSelectAllReviews}
|
onSelectAllReviews={onSelectAllReviews}
|
||||||
setSelectedReviews={setSelectedReviews}
|
setSelectedReviews={setSelectedReviews}
|
||||||
pullLatestData={pullLatestData}
|
pullLatestData={pullLatestData}
|
||||||
onOpenRecording={onOpenRecording}
|
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{severity == "significant_motion" && (
|
{severity == "significant_motion" && (
|
||||||
@ -442,11 +455,14 @@ type DetectionReviewProps = {
|
|||||||
loading: boolean;
|
loading: boolean;
|
||||||
markItemAsReviewed: (review: ReviewSegment) => void;
|
markItemAsReviewed: (review: ReviewSegment) => void;
|
||||||
markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void;
|
markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void;
|
||||||
onSelectReview: (review: ReviewSegment, ctrl: boolean) => void;
|
onSelectReview: (
|
||||||
|
review: ReviewSegment,
|
||||||
|
ctrl: boolean,
|
||||||
|
detail: boolean,
|
||||||
|
) => void;
|
||||||
onSelectAllReviews: () => void;
|
onSelectAllReviews: () => void;
|
||||||
setSelectedReviews: (reviews: ReviewSegment[]) => void;
|
setSelectedReviews: (reviews: ReviewSegment[]) => void;
|
||||||
pullLatestData: () => void;
|
pullLatestData: () => void;
|
||||||
onOpenRecording: (recordingInfo: RecordingStartingPoint) => void;
|
|
||||||
};
|
};
|
||||||
function DetectionReview({
|
function DetectionReview({
|
||||||
contentRef,
|
contentRef,
|
||||||
@ -466,7 +482,6 @@ function DetectionReview({
|
|||||||
onSelectAllReviews,
|
onSelectAllReviews,
|
||||||
setSelectedReviews,
|
setSelectedReviews,
|
||||||
pullLatestData,
|
pullLatestData,
|
||||||
onOpenRecording,
|
|
||||||
}: DetectionReviewProps) {
|
}: DetectionReviewProps) {
|
||||||
const { t } = useTranslation(["views/events"]);
|
const { t } = useTranslation(["views/events"]);
|
||||||
|
|
||||||
@ -758,16 +773,7 @@ function DetectionReview({
|
|||||||
ctrl: boolean,
|
ctrl: boolean,
|
||||||
detail: boolean,
|
detail: boolean,
|
||||||
) => {
|
) => {
|
||||||
if (detail) {
|
onSelectReview(review, ctrl, detail);
|
||||||
onOpenRecording({
|
|
||||||
camera: review.camera,
|
|
||||||
startTime: review.start_time - REVIEW_PADDING,
|
|
||||||
severity: review.severity,
|
|
||||||
timelineType: "detail",
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
onSelectReview(review, ctrl);
|
|
||||||
}
|
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -970,7 +970,6 @@ function Timeline({
|
|||||||
"relative overflow-hidden",
|
"relative overflow-hidden",
|
||||||
isDesktop
|
isDesktop
|
||||||
? cn(
|
? cn(
|
||||||
"no-scrollbar overflow-y-auto",
|
|
||||||
timelineType == "timeline"
|
timelineType == "timeline"
|
||||||
? "w-[100px] flex-shrink-0"
|
? "w-[100px] flex-shrink-0"
|
||||||
: timelineType == "detail"
|
: timelineType == "detail"
|
||||||
|
|||||||
@ -709,11 +709,11 @@ export default function CameraSettingsView({
|
|||||||
<div className="flex w-full flex-row items-center gap-2 pt-2 md:w-[25%]">
|
<div className="flex w-full flex-row items-center gap-2 pt-2 md:w-[25%]">
|
||||||
<Button
|
<Button
|
||||||
className="flex flex-1"
|
className="flex flex-1"
|
||||||
aria-label={t("button.cancel", { ns: "common" })}
|
aria-label={t("button.reset", { ns: "common" })}
|
||||||
onClick={onCancel}
|
onClick={onCancel}
|
||||||
type="button"
|
type="button"
|
||||||
>
|
>
|
||||||
<Trans>button.cancel</Trans>
|
<Trans>button.reset</Trans>
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button
|
||||||
variant="select"
|
variant="select"
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user