mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-19 19:46:43 +03:00
Compare commits
No commits in common. "90b7086b1e1fa261a748c2867aa32621ea1b6491" and "303ebff4e3021f3b82bf35f29267086af18c2bef" have entirely different histories.
90b7086b1e
...
303ebff4e3
@ -5,12 +5,6 @@ set -euxo pipefail
|
|||||||
SQLITE3_VERSION="3.46.1"
|
SQLITE3_VERSION="3.46.1"
|
||||||
PYSQLITE3_VERSION="0.5.3"
|
PYSQLITE3_VERSION="0.5.3"
|
||||||
|
|
||||||
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
|
|
||||||
if ! dpkg -l | grep -q libsqlite3-dev; then
|
|
||||||
echo "Installing libsqlite3-dev for compilation..."
|
|
||||||
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fetch the pre-built sqlite amalgamation instead of building from source
|
# Fetch the pre-built sqlite amalgamation instead of building from source
|
||||||
if [[ ! -d "sqlite" ]]; then
|
if [[ ! -d "sqlite" ]]; then
|
||||||
mkdir sqlite
|
mkdir sqlite
|
||||||
|
|||||||
@ -2,9 +2,9 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Download the MxAccl for Frigate github release
|
# Download the MxAccl for Frigate github release
|
||||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
|
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
|
||||||
unzip /tmp/mxaccl.zip -d /tmp
|
unzip /tmp/mxaccl.zip -d /tmp
|
||||||
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
|
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
|
||||||
rm /tmp/mxaccl.zip
|
rm /tmp/mxaccl.zip
|
||||||
|
|
||||||
# Install Python dependencies
|
# Install Python dependencies
|
||||||
|
|||||||
@ -56,7 +56,7 @@ pywebpush == 2.0.*
|
|||||||
# alpr
|
# alpr
|
||||||
pyclipper == 1.3.*
|
pyclipper == 1.3.*
|
||||||
shapely == 2.0.*
|
shapely == 2.0.*
|
||||||
rapidfuzz==3.12.*
|
Levenshtein==0.26.*
|
||||||
# HailoRT Wheels
|
# HailoRT Wheels
|
||||||
appdirs==1.4.*
|
appdirs==1.4.*
|
||||||
argcomplete==2.0.*
|
argcomplete==2.0.*
|
||||||
|
|||||||
@ -24,13 +24,10 @@ echo "Adding MemryX GPG key and repository..."
|
|||||||
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
||||||
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
||||||
|
|
||||||
# Update and install specific SDK 2.1 packages
|
# Update and install memx-drivers
|
||||||
echo "Installing MemryX SDK 2.1 packages..."
|
echo "Installing memx-drivers..."
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
|
sudo apt install -y memx-drivers
|
||||||
|
|
||||||
# Hold packages to prevent automatic upgrades
|
|
||||||
sudo apt-mark hold memx-drivers memx-accl mxa-manager
|
|
||||||
|
|
||||||
# ARM-specific board setup
|
# ARM-specific board setup
|
||||||
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
||||||
@ -40,5 +37,11 @@ fi
|
|||||||
|
|
||||||
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
||||||
|
|
||||||
echo "MemryX SDK 2.1 installation complete!"
|
# Install other runtime packages
|
||||||
|
packages=("memx-accl" "mxa-manager")
|
||||||
|
for pkg in "${packages[@]}"; do
|
||||||
|
echo "Installing $pkg..."
|
||||||
|
sudo apt install -y "$pkg"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "MemryX installation complete!"
|
||||||
|
|||||||
@ -112,7 +112,7 @@ RUN apt-get update \
|
|||||||
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
||||||
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
|
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
|
||||||
|
|
||||||
FROM wget AS jetson-ffmpeg
|
FROM wget AS jetson-ffmpeg
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
@ -145,8 +145,7 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
|||||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
||||||
pip3 uninstall -y onnxruntime \
|
pip3 uninstall -y onnxruntime \
|
||||||
&& pip3 install -U /deps/trt-wheels/*.whl \
|
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
|
||||||
&& pip3 install -U /deps/trt-model-wheels/*.whl \
|
|
||||||
&& ldconfig
|
&& ldconfig
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
||||||
numpy == 1.26.*; platform_machine == 'aarch64'
|
|
||||||
|
|||||||
@ -16,7 +16,15 @@ To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the
|
|||||||
|
|
||||||
### Restricting alerts and detections to specific zones
|
### Restricting alerts and detections to specific zones
|
||||||
|
|
||||||
Often you will only want alerts to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to have an alert created when an object enters your entire_yard zone, the config would be:
|
You can flexibly define alert or detection zones, allowing you to focus on what matters most.
|
||||||
|
|
||||||
|
Often you will only want alerts to be created when an object enters areas of interest. This is done using zones along with setting review classification.
|
||||||
|
|
||||||
|
For example, you only want to have an alert created when an object enters your `Entire Yard` zone. Simply go to the `Camera → Review → Alerts` settings, check the `Entire Yard` zone you just created, and save the changes.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
the config would be:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
@ -27,11 +35,13 @@ cameras:
|
|||||||
- entire_yard
|
- entire_yard
|
||||||
zones:
|
zones:
|
||||||
entire_yard:
|
entire_yard:
|
||||||
friendly_name: Entire yard # You can use characters from any language text
|
friendly_name: Entire yard🏡 # You can use characters from any language, including emojis.
|
||||||
coordinates: ...
|
coordinates: ...
|
||||||
```
|
```
|
||||||
|
|
||||||
You may also want to filter detections to only be created when an object enters a secondary area of interest. This is done using zones along with setting required_zones. Let's say you want alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard, the config would be
|
You may also want to filter detections to only be created when an object enters a secondary area of interest. This is done using zones along with setting required_zones. Let's say you want alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard. Simply go to the `Detections` option on the previous page, check `Limit detections to specific zones`, and then select the desired zones.
|
||||||
|
|
||||||
|
the config would be
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
@ -45,10 +55,10 @@ cameras:
|
|||||||
- edge_yard
|
- edge_yard
|
||||||
zones:
|
zones:
|
||||||
edge_yard:
|
edge_yard:
|
||||||
friendly_name: Edge yard # You can use characters from any language text
|
friendly_name: Edge yard🚗 # You can use characters from any language, including emojis.
|
||||||
coordinates: ...
|
coordinates: ...
|
||||||
inner_yard:
|
inner_yard:
|
||||||
friendly_name: Inner yard # You can use characters from any language text
|
friendly_name: Inner yard🪵 # You can use characters from any language, including emojis.
|
||||||
coordinates: ...
|
coordinates: ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -62,7 +72,7 @@ cameras:
|
|||||||
- entire_yard
|
- entire_yard
|
||||||
zones:
|
zones:
|
||||||
entire_yard:
|
entire_yard:
|
||||||
friendly_name: Entire yard
|
friendly_name: Entire yard🏡
|
||||||
coordinates: ...
|
coordinates: ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -86,6 +96,7 @@ cameras:
|
|||||||
|
|
||||||
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
|
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
|
||||||
|
|
||||||
|
Of course, you can also manually select the desired tracked Object when editing zones on the `Masks / Zones` page.
|
||||||
|
|
||||||
### Zone Loitering
|
### Zone Loitering
|
||||||
|
|
||||||
@ -94,6 +105,7 @@ Sometimes objects are expected to be passing through a zone, but an object loite
|
|||||||
:::note
|
:::note
|
||||||
|
|
||||||
When using loitering zones, a review item will behave in the following way:
|
When using loitering zones, a review item will behave in the following way:
|
||||||
|
|
||||||
- When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary.
|
- When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary.
|
||||||
- When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end.
|
- When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end.
|
||||||
|
|
||||||
|
|||||||
BIN
docs/static/img/zones-review.png
vendored
Normal file
BIN
docs/static/img/zones-review.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 108 KiB |
@ -37,6 +37,7 @@ from frigate.stats.prometheus import get_metrics, update_metrics
|
|||||||
from frigate.util.builtin import (
|
from frigate.util.builtin import (
|
||||||
clean_camera_user_pass,
|
clean_camera_user_pass,
|
||||||
flatten_config_data,
|
flatten_config_data,
|
||||||
|
get_tz_modifiers,
|
||||||
process_config_query_string,
|
process_config_query_string,
|
||||||
update_yaml_file_bulk,
|
update_yaml_file_bulk,
|
||||||
)
|
)
|
||||||
@ -47,7 +48,6 @@ from frigate.util.services import (
|
|||||||
restart_frigate,
|
restart_frigate,
|
||||||
vainfo_hwaccel,
|
vainfo_hwaccel,
|
||||||
)
|
)
|
||||||
from frigate.util.time import get_tz_modifiers
|
|
||||||
from frigate.version import VERSION
|
from frigate.version import VERSION
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -403,13 +403,12 @@ def config_set(request: Request, body: AppConfigSetBody):
|
|||||||
settings,
|
settings,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Generic handling for global config updates
|
# Handle nested config updates (e.g., config/classification/custom/{name})
|
||||||
settings = config.get_nested_object(body.update_topic)
|
settings = config.get_nested_object(body.update_topic)
|
||||||
|
if settings:
|
||||||
# Publish None for removal, actual config for add/update
|
request.app.config_publisher.publisher.publish(
|
||||||
request.app.config_publisher.publisher.publish(
|
body.update_topic, settings
|
||||||
body.update_topic, settings
|
)
|
||||||
)
|
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
|
|||||||
@ -31,14 +31,14 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
|||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.config.camera import DetectConfig
|
from frigate.config.camera import DetectConfig
|
||||||
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
from frigate.const import CLIPS_DIR, FACE_DIR
|
||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.util.classification import (
|
from frigate.util.classification import (
|
||||||
collect_object_classification_examples,
|
collect_object_classification_examples,
|
||||||
collect_state_classification_examples,
|
collect_state_classification_examples,
|
||||||
)
|
)
|
||||||
from frigate.util.file import get_event_snapshot
|
from frigate.util.path import get_event_snapshot
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -828,13 +828,9 @@ def delete_classification_model(request: Request, name: str):
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Delete the classification model's data directory in clips
|
# Delete the classification model's data directory
|
||||||
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
model_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
||||||
if os.path.exists(data_dir):
|
|
||||||
shutil.rmtree(data_dir)
|
|
||||||
|
|
||||||
# Delete the classification model's files in model_cache
|
|
||||||
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
|
|
||||||
if os.path.exists(model_dir):
|
if os.path.exists(model_dir):
|
||||||
shutil.rmtree(model_dir)
|
shutil.rmtree(model_dir)
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
@ -58,8 +57,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
|||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||||
from frigate.track.object_processing import TrackedObject
|
from frigate.track.object_processing import TrackedObject
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
from frigate.util.builtin import get_tz_modifiers
|
||||||
from frigate.util.time import get_dst_transitions, get_tz_modifiers
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -814,6 +813,7 @@ def events_summary(
|
|||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
):
|
):
|
||||||
tz_name = params.timezone
|
tz_name = params.timezone
|
||||||
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
|
||||||
has_clip = params.has_clip
|
has_clip = params.has_clip
|
||||||
has_snapshot = params.has_snapshot
|
has_snapshot = params.has_snapshot
|
||||||
|
|
||||||
@ -828,91 +828,33 @@ def events_summary(
|
|||||||
if len(clauses) == 0:
|
if len(clauses) == 0:
|
||||||
clauses.append((True))
|
clauses.append((True))
|
||||||
|
|
||||||
time_range_query = (
|
groups = (
|
||||||
Event.select(
|
Event.select(
|
||||||
fn.MIN(Event.start_time).alias("min_time"),
|
Event.camera,
|
||||||
fn.MAX(Event.start_time).alias("max_time"),
|
Event.label,
|
||||||
|
Event.sub_label,
|
||||||
|
Event.data,
|
||||||
|
fn.strftime(
|
||||||
|
"%Y-%m-%d",
|
||||||
|
fn.datetime(
|
||||||
|
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||||
|
),
|
||||||
|
).alias("day"),
|
||||||
|
Event.zones,
|
||||||
|
fn.COUNT(Event.id).alias("count"),
|
||||||
)
|
)
|
||||||
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
|
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
|
||||||
.dicts()
|
.group_by(
|
||||||
.get()
|
Event.camera,
|
||||||
|
Event.label,
|
||||||
|
Event.sub_label,
|
||||||
|
Event.data,
|
||||||
|
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
|
||||||
|
Event.zones,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
min_time = time_range_query.get("min_time")
|
return JSONResponse(content=[e for e in groups.dicts()])
|
||||||
max_time = time_range_query.get("max_time")
|
|
||||||
|
|
||||||
if min_time is None or max_time is None:
|
|
||||||
return JSONResponse(content=[])
|
|
||||||
|
|
||||||
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
|
|
||||||
|
|
||||||
grouped: dict[tuple, dict] = {}
|
|
||||||
|
|
||||||
for period_start, period_end, period_offset in dst_periods:
|
|
||||||
hours_offset = int(period_offset / 60 / 60)
|
|
||||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
|
||||||
period_hour_modifier = f"{hours_offset} hour"
|
|
||||||
period_minute_modifier = f"{minutes_offset} minute"
|
|
||||||
|
|
||||||
period_groups = (
|
|
||||||
Event.select(
|
|
||||||
Event.camera,
|
|
||||||
Event.label,
|
|
||||||
Event.sub_label,
|
|
||||||
Event.data,
|
|
||||||
fn.strftime(
|
|
||||||
"%Y-%m-%d",
|
|
||||||
fn.datetime(
|
|
||||||
Event.start_time,
|
|
||||||
"unixepoch",
|
|
||||||
period_hour_modifier,
|
|
||||||
period_minute_modifier,
|
|
||||||
),
|
|
||||||
).alias("day"),
|
|
||||||
Event.zones,
|
|
||||||
fn.COUNT(Event.id).alias("count"),
|
|
||||||
)
|
|
||||||
.where(
|
|
||||||
reduce(operator.and_, clauses)
|
|
||||||
& (Event.camera << allowed_cameras)
|
|
||||||
& (Event.start_time >= period_start)
|
|
||||||
& (Event.start_time <= period_end)
|
|
||||||
)
|
|
||||||
.group_by(
|
|
||||||
Event.camera,
|
|
||||||
Event.label,
|
|
||||||
Event.sub_label,
|
|
||||||
Event.data,
|
|
||||||
(Event.start_time + period_offset).cast("int") / (3600 * 24),
|
|
||||||
Event.zones,
|
|
||||||
)
|
|
||||||
.namedtuples()
|
|
||||||
)
|
|
||||||
|
|
||||||
for g in period_groups:
|
|
||||||
key = (
|
|
||||||
g.camera,
|
|
||||||
g.label,
|
|
||||||
g.sub_label,
|
|
||||||
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
|
|
||||||
g.day,
|
|
||||||
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
if key in grouped:
|
|
||||||
grouped[key]["count"] += int(g.count or 0)
|
|
||||||
else:
|
|
||||||
grouped[key] = {
|
|
||||||
"camera": g.camera,
|
|
||||||
"label": g.label,
|
|
||||||
"sub_label": g.sub_label,
|
|
||||||
"data": g.data,
|
|
||||||
"day": g.day,
|
|
||||||
"zones": g.zones,
|
|
||||||
"count": int(g.count or 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
return JSONResponse(content=list(grouped.values()))
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
|
|||||||
@ -34,7 +34,7 @@ from frigate.record.export import (
|
|||||||
PlaybackSourceEnum,
|
PlaybackSourceEnum,
|
||||||
RecordingExporter,
|
RecordingExporter,
|
||||||
)
|
)
|
||||||
from frigate.util.time import is_current_hour
|
from frigate.util.builtin import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -44,9 +44,9 @@ from frigate.const import (
|
|||||||
)
|
)
|
||||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||||
from frigate.track.object_processing import TrackedObjectProcessor
|
from frigate.track.object_processing import TrackedObjectProcessor
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
from frigate.util.builtin import get_tz_modifiers
|
||||||
from frigate.util.image import get_image_from_recording
|
from frigate.util.image import get_image_from_recording
|
||||||
from frigate.util.time import get_dst_transitions
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -424,6 +424,7 @@ def all_recordings_summary(
|
|||||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||||
):
|
):
|
||||||
"""Returns true/false by day indicating if recordings exist"""
|
"""Returns true/false by day indicating if recordings exist"""
|
||||||
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||||
|
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
if cameras != "all":
|
if cameras != "all":
|
||||||
@ -431,70 +432,41 @@ def all_recordings_summary(
|
|||||||
filtered = requested.intersection(allowed_cameras)
|
filtered = requested.intersection(allowed_cameras)
|
||||||
if not filtered:
|
if not filtered:
|
||||||
return JSONResponse(content={})
|
return JSONResponse(content={})
|
||||||
camera_list = list(filtered)
|
cameras = ",".join(filtered)
|
||||||
else:
|
else:
|
||||||
camera_list = allowed_cameras
|
cameras = allowed_cameras
|
||||||
|
|
||||||
time_range_query = (
|
query = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
fn.strftime(
|
||||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
"%Y-%m-%d",
|
||||||
|
fn.datetime(
|
||||||
|
Recordings.start_time + seconds_offset,
|
||||||
|
"unixepoch",
|
||||||
|
hour_modifier,
|
||||||
|
minute_modifier,
|
||||||
|
),
|
||||||
|
).alias("day")
|
||||||
)
|
)
|
||||||
.where(Recordings.camera << camera_list)
|
.group_by(
|
||||||
.dicts()
|
fn.strftime(
|
||||||
.get()
|
"%Y-%m-%d",
|
||||||
|
fn.datetime(
|
||||||
|
Recordings.start_time + seconds_offset,
|
||||||
|
"unixepoch",
|
||||||
|
hour_modifier,
|
||||||
|
minute_modifier,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.order_by(Recordings.start_time.desc())
|
||||||
)
|
)
|
||||||
|
|
||||||
min_time = time_range_query.get("min_time")
|
if params.cameras != "all":
|
||||||
max_time = time_range_query.get("max_time")
|
query = query.where(Recordings.camera << cameras.split(","))
|
||||||
|
|
||||||
if min_time is None or max_time is None:
|
recording_days = query.namedtuples()
|
||||||
return JSONResponse(content={})
|
days = {day.day: True for day in recording_days}
|
||||||
|
|
||||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
|
||||||
|
|
||||||
days: dict[str, bool] = {}
|
|
||||||
|
|
||||||
for period_start, period_end, period_offset in dst_periods:
|
|
||||||
hours_offset = int(period_offset / 60 / 60)
|
|
||||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
|
||||||
period_hour_modifier = f"{hours_offset} hour"
|
|
||||||
period_minute_modifier = f"{minutes_offset} minute"
|
|
||||||
|
|
||||||
period_query = (
|
|
||||||
Recordings.select(
|
|
||||||
fn.strftime(
|
|
||||||
"%Y-%m-%d",
|
|
||||||
fn.datetime(
|
|
||||||
Recordings.start_time,
|
|
||||||
"unixepoch",
|
|
||||||
period_hour_modifier,
|
|
||||||
period_minute_modifier,
|
|
||||||
),
|
|
||||||
).alias("day")
|
|
||||||
)
|
|
||||||
.where(
|
|
||||||
(Recordings.camera << camera_list)
|
|
||||||
& (Recordings.end_time >= period_start)
|
|
||||||
& (Recordings.start_time <= period_end)
|
|
||||||
)
|
|
||||||
.group_by(
|
|
||||||
fn.strftime(
|
|
||||||
"%Y-%m-%d",
|
|
||||||
fn.datetime(
|
|
||||||
Recordings.start_time,
|
|
||||||
"unixepoch",
|
|
||||||
period_hour_modifier,
|
|
||||||
period_minute_modifier,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.order_by(Recordings.start_time.desc())
|
|
||||||
.namedtuples()
|
|
||||||
)
|
|
||||||
|
|
||||||
for g in period_query:
|
|
||||||
days[g.day] = True
|
|
||||||
|
|
||||||
return JSONResponse(content=days)
|
return JSONResponse(content=days)
|
||||||
|
|
||||||
@ -504,103 +476,61 @@ def all_recordings_summary(
|
|||||||
)
|
)
|
||||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||||
"""Returns hourly summary for recordings of given camera"""
|
"""Returns hourly summary for recordings of given camera"""
|
||||||
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
|
||||||
time_range_query = (
|
recording_groups = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
fn.strftime(
|
||||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
"%Y-%m-%d %H",
|
||||||
|
fn.datetime(
|
||||||
|
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||||
|
),
|
||||||
|
).alias("hour"),
|
||||||
|
fn.SUM(Recordings.duration).alias("duration"),
|
||||||
|
fn.SUM(Recordings.motion).alias("motion"),
|
||||||
|
fn.SUM(Recordings.objects).alias("objects"),
|
||||||
)
|
)
|
||||||
.where(Recordings.camera == camera_name)
|
.where(Recordings.camera == camera_name)
|
||||||
.dicts()
|
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
|
||||||
.get()
|
.order_by(Recordings.start_time.desc())
|
||||||
|
.namedtuples()
|
||||||
)
|
)
|
||||||
|
|
||||||
min_time = time_range_query.get("min_time")
|
event_groups = (
|
||||||
max_time = time_range_query.get("max_time")
|
Event.select(
|
||||||
|
fn.strftime(
|
||||||
days: dict[str, dict] = {}
|
"%Y-%m-%d %H",
|
||||||
|
fn.datetime(
|
||||||
if min_time is None or max_time is None:
|
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||||
return JSONResponse(content=list(days.values()))
|
),
|
||||||
|
).alias("hour"),
|
||||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
fn.COUNT(Event.id).alias("count"),
|
||||||
|
|
||||||
for period_start, period_end, period_offset in dst_periods:
|
|
||||||
hours_offset = int(period_offset / 60 / 60)
|
|
||||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
|
||||||
period_hour_modifier = f"{hours_offset} hour"
|
|
||||||
period_minute_modifier = f"{minutes_offset} minute"
|
|
||||||
|
|
||||||
recording_groups = (
|
|
||||||
Recordings.select(
|
|
||||||
fn.strftime(
|
|
||||||
"%Y-%m-%d %H",
|
|
||||||
fn.datetime(
|
|
||||||
Recordings.start_time,
|
|
||||||
"unixepoch",
|
|
||||||
period_hour_modifier,
|
|
||||||
period_minute_modifier,
|
|
||||||
),
|
|
||||||
).alias("hour"),
|
|
||||||
fn.SUM(Recordings.duration).alias("duration"),
|
|
||||||
fn.SUM(Recordings.motion).alias("motion"),
|
|
||||||
fn.SUM(Recordings.objects).alias("objects"),
|
|
||||||
)
|
|
||||||
.where(
|
|
||||||
(Recordings.camera == camera_name)
|
|
||||||
& (Recordings.end_time >= period_start)
|
|
||||||
& (Recordings.start_time <= period_end)
|
|
||||||
)
|
|
||||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
|
||||||
.order_by(Recordings.start_time.desc())
|
|
||||||
.namedtuples()
|
|
||||||
)
|
)
|
||||||
|
.where(Event.camera == camera_name, Event.has_clip)
|
||||||
|
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
|
||||||
|
.namedtuples()
|
||||||
|
)
|
||||||
|
|
||||||
event_groups = (
|
event_map = {g.hour: g.count for g in event_groups}
|
||||||
Event.select(
|
|
||||||
fn.strftime(
|
|
||||||
"%Y-%m-%d %H",
|
|
||||||
fn.datetime(
|
|
||||||
Event.start_time,
|
|
||||||
"unixepoch",
|
|
||||||
period_hour_modifier,
|
|
||||||
period_minute_modifier,
|
|
||||||
),
|
|
||||||
).alias("hour"),
|
|
||||||
fn.COUNT(Event.id).alias("count"),
|
|
||||||
)
|
|
||||||
.where(Event.camera == camera_name, Event.has_clip)
|
|
||||||
.where(
|
|
||||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
|
||||||
)
|
|
||||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
|
||||||
.namedtuples()
|
|
||||||
)
|
|
||||||
|
|
||||||
event_map = {g.hour: g.count for g in event_groups}
|
days = {}
|
||||||
|
|
||||||
for recording_group in recording_groups:
|
for recording_group in recording_groups:
|
||||||
parts = recording_group.hour.split()
|
parts = recording_group.hour.split()
|
||||||
hour = parts[1]
|
hour = parts[1]
|
||||||
day = parts[0]
|
day = parts[0]
|
||||||
events_count = event_map.get(recording_group.hour, 0)
|
events_count = event_map.get(recording_group.hour, 0)
|
||||||
hour_data = {
|
hour_data = {
|
||||||
"hour": hour,
|
"hour": hour,
|
||||||
"events": events_count,
|
"events": events_count,
|
||||||
"motion": recording_group.motion,
|
"motion": recording_group.motion,
|
||||||
"objects": recording_group.objects,
|
"objects": recording_group.objects,
|
||||||
"duration": round(recording_group.duration),
|
"duration": round(recording_group.duration),
|
||||||
}
|
}
|
||||||
if day in days:
|
if day not in days:
|
||||||
# merge counts if already present (edge-case at DST boundary)
|
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
|
||||||
days[day]["events"] += events_count or 0
|
else:
|
||||||
days[day]["hours"].append(hour_data)
|
days[day]["events"] += events_count
|
||||||
else:
|
days[day]["hours"].append(hour_data)
|
||||||
days[day] = {
|
|
||||||
"events": events_count or 0,
|
|
||||||
"hours": [hour_data],
|
|
||||||
"day": day,
|
|
||||||
}
|
|
||||||
|
|
||||||
return JSONResponse(content=list(days.values()))
|
return JSONResponse(content=list(days.values()))
|
||||||
|
|
||||||
|
|||||||
@ -36,7 +36,7 @@ from frigate.config import FrigateConfig
|
|||||||
from frigate.embeddings import EmbeddingsContext
|
from frigate.embeddings import EmbeddingsContext
|
||||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||||
from frigate.review.types import SeverityEnum
|
from frigate.review.types import SeverityEnum
|
||||||
from frigate.util.time import get_dst_transitions
|
from frigate.util.builtin import get_tz_modifiers
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -197,6 +197,7 @@ async def review_summary(
|
|||||||
|
|
||||||
user_id = current_user["username"]
|
user_id = current_user["username"]
|
||||||
|
|
||||||
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||||
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
||||||
|
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
@ -328,135 +329,89 @@ async def review_summary(
|
|||||||
)
|
)
|
||||||
clauses.append(reduce(operator.or_, label_clauses))
|
clauses.append(reduce(operator.or_, label_clauses))
|
||||||
|
|
||||||
# Find the time range of available data
|
day_in_seconds = 60 * 60 * 24
|
||||||
time_range_query = (
|
last_month_query = (
|
||||||
ReviewSegment.select(
|
ReviewSegment.select(
|
||||||
fn.MIN(ReviewSegment.start_time).alias("min_time"),
|
fn.strftime(
|
||||||
fn.MAX(ReviewSegment.start_time).alias("max_time"),
|
"%Y-%m-%d",
|
||||||
|
fn.datetime(
|
||||||
|
ReviewSegment.start_time,
|
||||||
|
"unixepoch",
|
||||||
|
hour_modifier,
|
||||||
|
minute_modifier,
|
||||||
|
),
|
||||||
|
).alias("day"),
|
||||||
|
fn.SUM(
|
||||||
|
Case(
|
||||||
|
None,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
(ReviewSegment.severity == SeverityEnum.alert)
|
||||||
|
& (UserReviewStatus.has_been_reviewed == True),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
).alias("reviewed_alert"),
|
||||||
|
fn.SUM(
|
||||||
|
Case(
|
||||||
|
None,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
(ReviewSegment.severity == SeverityEnum.detection)
|
||||||
|
& (UserReviewStatus.has_been_reviewed == True),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
).alias("reviewed_detection"),
|
||||||
|
fn.SUM(
|
||||||
|
Case(
|
||||||
|
None,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
(ReviewSegment.severity == SeverityEnum.alert),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
).alias("total_alert"),
|
||||||
|
fn.SUM(
|
||||||
|
Case(
|
||||||
|
None,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
(ReviewSegment.severity == SeverityEnum.detection),
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
).alias("total_detection"),
|
||||||
|
)
|
||||||
|
.left_outer_join(
|
||||||
|
UserReviewStatus,
|
||||||
|
on=(
|
||||||
|
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||||
|
& (UserReviewStatus.user_id == user_id)
|
||||||
|
),
|
||||||
)
|
)
|
||||||
.where(reduce(operator.and_, clauses) if clauses else True)
|
.where(reduce(operator.and_, clauses) if clauses else True)
|
||||||
.dicts()
|
.group_by(
|
||||||
.get()
|
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
|
||||||
|
)
|
||||||
|
.order_by(ReviewSegment.start_time.desc())
|
||||||
)
|
)
|
||||||
|
|
||||||
min_time = time_range_query.get("min_time")
|
|
||||||
max_time = time_range_query.get("max_time")
|
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"last24Hours": last_24_query,
|
"last24Hours": last_24_query,
|
||||||
}
|
}
|
||||||
|
|
||||||
# If no data, return early
|
for e in last_month_query.dicts().iterator():
|
||||||
if min_time is None or max_time is None:
|
data[e["day"]] = e
|
||||||
return JSONResponse(content=data)
|
|
||||||
|
|
||||||
# Get DST transition periods
|
|
||||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
|
||||||
|
|
||||||
day_in_seconds = 60 * 60 * 24
|
|
||||||
|
|
||||||
# Query each DST period separately with the correct offset
|
|
||||||
for period_start, period_end, period_offset in dst_periods:
|
|
||||||
# Calculate hour/minute modifiers for this period
|
|
||||||
hours_offset = int(period_offset / 60 / 60)
|
|
||||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
|
||||||
period_hour_modifier = f"{hours_offset} hour"
|
|
||||||
period_minute_modifier = f"{minutes_offset} minute"
|
|
||||||
|
|
||||||
# Build clauses including time range for this period
|
|
||||||
period_clauses = clauses.copy()
|
|
||||||
period_clauses.append(
|
|
||||||
(ReviewSegment.start_time >= period_start)
|
|
||||||
& (ReviewSegment.start_time <= period_end)
|
|
||||||
)
|
|
||||||
|
|
||||||
period_query = (
|
|
||||||
ReviewSegment.select(
|
|
||||||
fn.strftime(
|
|
||||||
"%Y-%m-%d",
|
|
||||||
fn.datetime(
|
|
||||||
ReviewSegment.start_time,
|
|
||||||
"unixepoch",
|
|
||||||
period_hour_modifier,
|
|
||||||
period_minute_modifier,
|
|
||||||
),
|
|
||||||
).alias("day"),
|
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == SeverityEnum.alert)
|
|
||||||
& (UserReviewStatus.has_been_reviewed == True),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("reviewed_alert"),
|
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == SeverityEnum.detection)
|
|
||||||
& (UserReviewStatus.has_been_reviewed == True),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("reviewed_detection"),
|
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == SeverityEnum.alert),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("total_alert"),
|
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == SeverityEnum.detection),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("total_detection"),
|
|
||||||
)
|
|
||||||
.left_outer_join(
|
|
||||||
UserReviewStatus,
|
|
||||||
on=(
|
|
||||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
|
||||||
& (UserReviewStatus.user_id == user_id)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.where(reduce(operator.and_, period_clauses))
|
|
||||||
.group_by(
|
|
||||||
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
|
|
||||||
)
|
|
||||||
.order_by(ReviewSegment.start_time.desc())
|
|
||||||
)
|
|
||||||
|
|
||||||
# Merge results from this period
|
|
||||||
for e in period_query.dicts().iterator():
|
|
||||||
day_key = e["day"]
|
|
||||||
if day_key in data:
|
|
||||||
# Merge counts if day already exists (edge case at DST boundary)
|
|
||||||
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
|
|
||||||
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
|
|
||||||
data[day_key]["total_alert"] += e["total_alert"] or 0
|
|
||||||
data[day_key]["total_detection"] += e["total_detection"] or 0
|
|
||||||
else:
|
|
||||||
data[day_key] = e
|
|
||||||
|
|
||||||
return JSONResponse(content=data)
|
return JSONResponse(content=data)
|
||||||
|
|
||||||
|
|||||||
@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
|
|||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from Levenshtein import distance, jaro_winkler
|
||||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||||
from rapidfuzz.distance import JaroWinkler, Levenshtein
|
|
||||||
from shapely.geometry import Polygon
|
from shapely.geometry import Polygon
|
||||||
|
|
||||||
from frigate.comms.event_metadata_updater import (
|
from frigate.comms.event_metadata_updater import (
|
||||||
@ -1123,9 +1123,7 @@ class LicensePlateProcessingMixin:
|
|||||||
for i, plate in enumerate(plates):
|
for i, plate in enumerate(plates):
|
||||||
merged = False
|
merged = False
|
||||||
for j, cluster in enumerate(clusters):
|
for j, cluster in enumerate(clusters):
|
||||||
sims = [
|
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
|
||||||
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
|
|
||||||
]
|
|
||||||
if len(sims) > 0:
|
if len(sims) > 0:
|
||||||
avg_sim = sum(sims) / len(sims)
|
avg_sim = sum(sims) / len(sims)
|
||||||
if avg_sim >= self.cluster_threshold:
|
if avg_sim >= self.cluster_threshold:
|
||||||
@ -1502,7 +1500,7 @@ class LicensePlateProcessingMixin:
|
|||||||
and current_time - data["last_seen"]
|
and current_time - data["last_seen"]
|
||||||
<= self.config.cameras[camera].lpr.expire_time
|
<= self.config.cameras[camera].lpr.expire_time
|
||||||
):
|
):
|
||||||
similarity = JaroWinkler.similarity(data["plate"], top_plate)
|
similarity = jaro_winkler(data["plate"], top_plate)
|
||||||
if similarity >= self.similarity_threshold:
|
if similarity >= self.similarity_threshold:
|
||||||
plate_id = existing_id
|
plate_id = existing_id
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@ -1582,8 +1580,7 @@ class LicensePlateProcessingMixin:
|
|||||||
for label, plates_list in self.lpr_config.known_plates.items()
|
for label, plates_list in self.lpr_config.known_plates.items()
|
||||||
if any(
|
if any(
|
||||||
re.match(f"^{plate}$", rep_plate)
|
re.match(f"^{plate}$", rep_plate)
|
||||||
or Levenshtein.distance(plate, rep_plate)
|
or distance(plate, rep_plate) <= self.lpr_config.match_distance
|
||||||
<= self.lpr_config.match_distance
|
|
||||||
for plate in plates_list
|
for plate in plates_list
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
|||||||
@ -20,8 +20,8 @@ from frigate.genai import GenAIClient
|
|||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
|
||||||
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
|
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
|
||||||
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from frigate.embeddings import Embeddings
|
from frigate.embeddings import Embeddings
|
||||||
|
|||||||
@ -22,7 +22,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
|||||||
from frigate.embeddings.util import ZScoreNormalization
|
from frigate.embeddings.util import ZScoreNormalization
|
||||||
from frigate.models import Event, Trigger
|
from frigate.models import Event, Trigger
|
||||||
from frigate.util.builtin import cosine_distance
|
from frigate.util.builtin import cosine_distance
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
from ..post.api import PostProcessorApi
|
from ..post.api import PostProcessorApi
|
||||||
from ..types import DataProcessorMetrics
|
from ..types import DataProcessorMetrics
|
||||||
|
|||||||
@ -466,7 +466,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
now,
|
now,
|
||||||
self.labelmap[best_id],
|
self.labelmap[best_id],
|
||||||
score,
|
score,
|
||||||
max_files=200,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if score < self.model_config.threshold:
|
if score < self.model_config.threshold:
|
||||||
@ -530,7 +529,6 @@ def write_classification_attempt(
|
|||||||
timestamp: float,
|
timestamp: float,
|
||||||
label: str,
|
label: str,
|
||||||
score: float,
|
score: float,
|
||||||
max_files: int = 100,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
if "-" in label:
|
if "-" in label:
|
||||||
label = label.replace("-", "_")
|
label = label.replace("-", "_")
|
||||||
@ -546,5 +544,5 @@ def write_classification_attempt(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# delete oldest face image if maximum is reached
|
# delete oldest face image if maximum is reached
|
||||||
if len(files) > max_files:
|
if len(files) > 100:
|
||||||
os.unlink(os.path.join(folder, files[-1]))
|
os.unlink(os.path.join(folder, files[-1]))
|
||||||
|
|||||||
@ -166,7 +166,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
camera = obj_data["camera"]
|
camera = obj_data["camera"]
|
||||||
|
|
||||||
if not self.config.cameras[camera].face_recognition.enabled:
|
if not self.config.cameras[camera].face_recognition.enabled:
|
||||||
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
|
|
||||||
return
|
return
|
||||||
|
|
||||||
start = datetime.datetime.now().timestamp()
|
start = datetime.datetime.now().timestamp()
|
||||||
@ -209,7 +208,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
person_box = obj_data.get("box")
|
person_box = obj_data.get("box")
|
||||||
|
|
||||||
if not person_box:
|
if not person_box:
|
||||||
logger.debug(f"No person box available for {id}")
|
|
||||||
return
|
return
|
||||||
|
|
||||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
@ -235,8 +233,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logger.debug(f"Failed to convert face frame color for {id}: {e}")
|
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# don't run for object without attributes
|
# don't run for object without attributes
|
||||||
@ -254,7 +251,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
# no faces detected in this frame
|
# no faces detected in this frame
|
||||||
if not face:
|
if not face:
|
||||||
logger.debug(f"No face attributes found for {id}")
|
|
||||||
return
|
return
|
||||||
|
|
||||||
face_box = face.get("box")
|
face_box = face.get("box")
|
||||||
@ -278,7 +274,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
res = self.recognizer.classify(face_frame)
|
res = self.recognizer.classify(face_frame)
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
logger.debug(f"Face recognizer returned no result for {id}")
|
|
||||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -335,7 +330,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
||||||
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
||||||
self.recognizer.clear()
|
self.recognizer.clear()
|
||||||
return {"success": True, "message": "Face classifier cleared."}
|
|
||||||
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
||||||
img = cv2.imdecode(
|
img = cv2.imdecode(
|
||||||
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
||||||
|
|||||||
@ -17,7 +17,6 @@ from frigate.detectors.detector_config import (
|
|||||||
BaseDetectorConfig,
|
BaseDetectorConfig,
|
||||||
ModelTypeEnum,
|
ModelTypeEnum,
|
||||||
)
|
)
|
||||||
from frigate.util.file import FileLock
|
|
||||||
from frigate.util.model import post_process_yolo
|
from frigate.util.model import post_process_yolo
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -178,6 +177,29 @@ class MemryXDetector(DetectionApi):
|
|||||||
logger.error(f"Failed to initialize MemryX model: {e}")
|
logger.error(f"Failed to initialize MemryX model: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def _acquire_file_lock(self, lock_path: str, timeout: int = 60, poll: float = 0.2):
|
||||||
|
"""
|
||||||
|
Create an exclusive lock file. Blocks (with polling) until it can acquire,
|
||||||
|
or raises TimeoutError. Uses only stdlib (os.O_EXCL).
|
||||||
|
"""
|
||||||
|
start = time.time()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
||||||
|
os.close(fd)
|
||||||
|
return
|
||||||
|
except FileExistsError:
|
||||||
|
if time.time() - start > timeout:
|
||||||
|
raise TimeoutError(f"Timeout waiting for lock: {lock_path}")
|
||||||
|
time.sleep(poll)
|
||||||
|
|
||||||
|
def _release_file_lock(self, lock_path: str):
|
||||||
|
"""Best-effort removal of the lock file."""
|
||||||
|
try:
|
||||||
|
os.remove(lock_path)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
def load_yolo_constants(self):
|
def load_yolo_constants(self):
|
||||||
base = f"{self.cache_dir}/{self.model_folder}"
|
base = f"{self.cache_dir}/{self.model_folder}"
|
||||||
# constants for yolov9 post-processing
|
# constants for yolov9 post-processing
|
||||||
@ -190,9 +212,9 @@ class MemryXDetector(DetectionApi):
|
|||||||
os.makedirs(self.cache_dir, exist_ok=True)
|
os.makedirs(self.cache_dir, exist_ok=True)
|
||||||
|
|
||||||
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
|
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
|
||||||
lock = FileLock(lock_path, timeout=60)
|
self._acquire_file_lock(lock_path)
|
||||||
|
|
||||||
with lock:
|
try:
|
||||||
# ---------- CASE 1: user provided a custom model path ----------
|
# ---------- CASE 1: user provided a custom model path ----------
|
||||||
if self.memx_model_path:
|
if self.memx_model_path:
|
||||||
if not self.memx_model_path.endswith(".zip"):
|
if not self.memx_model_path.endswith(".zip"):
|
||||||
@ -316,6 +338,9 @@ class MemryXDetector(DetectionApi):
|
|||||||
f"Failed to remove downloaded zip {zip_path}: {e}"
|
f"Failed to remove downloaded zip {zip_path}: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
self._release_file_lock(lock_path)
|
||||||
|
|
||||||
def send_input(self, connection_id, tensor_input: np.ndarray):
|
def send_input(self, connection_id, tensor_input: np.ndarray):
|
||||||
"""Pre-process (if needed) and send frame to MemryX input queue"""
|
"""Pre-process (if needed) and send frame to MemryX input queue"""
|
||||||
if tensor_input is None:
|
if tensor_input is None:
|
||||||
|
|||||||
@ -29,7 +29,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
|||||||
from frigate.models import Event, Trigger
|
from frigate.models import Event, Trigger
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
|
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
||||||
from .onnx.jina_v2_embedding import JinaV2Embedding
|
from .onnx.jina_v2_embedding import JinaV2Embedding
|
||||||
|
|||||||
@ -62,8 +62,8 @@ from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
|
|||||||
from frigate.genai import get_genai_client
|
from frigate.genai import get_genai_client
|
||||||
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
||||||
from frigate.util.builtin import serialize
|
from frigate.util.builtin import serialize
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
|
||||||
from frigate.util.image import SharedMemoryFrameManager
|
from frigate.util.image import SharedMemoryFrameManager
|
||||||
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
from .embeddings import Embeddings
|
from .embeddings import Embeddings
|
||||||
|
|
||||||
@ -158,13 +158,11 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||||
|
|
||||||
if self.config.face_recognition.enabled:
|
if self.config.face_recognition.enabled:
|
||||||
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
|
|
||||||
self.realtime_processors.append(
|
self.realtime_processors.append(
|
||||||
FaceRealTimeProcessor(
|
FaceRealTimeProcessor(
|
||||||
self.config, self.requestor, self.event_metadata_publisher, metrics
|
self.config, self.requestor, self.event_metadata_publisher, metrics
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
logger.debug("FaceRealTimeProcessor initialized successfully")
|
|
||||||
|
|
||||||
if self.config.classification.bird.enabled:
|
if self.config.classification.bird.enabled:
|
||||||
self.realtime_processors.append(
|
self.realtime_processors.append(
|
||||||
@ -285,66 +283,45 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
logger.info("Exiting embeddings maintenance...")
|
logger.info("Exiting embeddings maintenance...")
|
||||||
|
|
||||||
def _check_classification_config_updates(self) -> None:
|
def _check_classification_config_updates(self) -> None:
|
||||||
"""Check for classification config updates and add/remove processors."""
|
"""Check for classification config updates and add new processors."""
|
||||||
topic, model_config = self.classification_config_subscriber.check_for_update()
|
topic, model_config = self.classification_config_subscriber.check_for_update()
|
||||||
|
|
||||||
if topic:
|
if topic and model_config:
|
||||||
model_name = topic.split("/")[-1]
|
model_name = topic.split("/")[-1]
|
||||||
|
self.config.classification.custom[model_name] = model_config
|
||||||
|
|
||||||
if model_config is None:
|
# Check if processor already exists
|
||||||
self.realtime_processors = [
|
for processor in self.realtime_processors:
|
||||||
processor
|
if isinstance(
|
||||||
for processor in self.realtime_processors
|
processor,
|
||||||
if not (
|
(
|
||||||
isinstance(
|
CustomStateClassificationProcessor,
|
||||||
processor,
|
CustomObjectClassificationProcessor,
|
||||||
(
|
),
|
||||||
CustomStateClassificationProcessor,
|
):
|
||||||
CustomObjectClassificationProcessor,
|
if processor.model_config.name == model_name:
|
||||||
),
|
logger.debug(
|
||||||
|
f"Classification processor for model {model_name} already exists, skipping"
|
||||||
)
|
)
|
||||||
and processor.model_config.name == model_name
|
return
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
logger.info(
|
if model_config.state_config is not None:
|
||||||
f"Successfully removed classification processor for model: {model_name}"
|
processor = CustomStateClassificationProcessor(
|
||||||
|
self.config, model_config, self.requestor, self.metrics
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.config.classification.custom[model_name] = model_config
|
processor = CustomObjectClassificationProcessor(
|
||||||
|
self.config,
|
||||||
# Check if processor already exists
|
model_config,
|
||||||
for processor in self.realtime_processors:
|
self.event_metadata_publisher,
|
||||||
if isinstance(
|
self.metrics,
|
||||||
processor,
|
|
||||||
(
|
|
||||||
CustomStateClassificationProcessor,
|
|
||||||
CustomObjectClassificationProcessor,
|
|
||||||
),
|
|
||||||
):
|
|
||||||
if processor.model_config.name == model_name:
|
|
||||||
logger.debug(
|
|
||||||
f"Classification processor for model {model_name} already exists, skipping"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
if model_config.state_config is not None:
|
|
||||||
processor = CustomStateClassificationProcessor(
|
|
||||||
self.config, model_config, self.requestor, self.metrics
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
processor = CustomObjectClassificationProcessor(
|
|
||||||
self.config,
|
|
||||||
model_config,
|
|
||||||
self.event_metadata_publisher,
|
|
||||||
self.metrics,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.realtime_processors.append(processor)
|
|
||||||
logger.info(
|
|
||||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.realtime_processors.append(processor)
|
||||||
|
logger.info(
|
||||||
|
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||||
|
)
|
||||||
|
|
||||||
def _process_requests(self) -> None:
|
def _process_requests(self) -> None:
|
||||||
"""Process embeddings requests"""
|
"""Process embeddings requests"""
|
||||||
|
|
||||||
@ -397,14 +374,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
source_type, _, camera, frame_name, data = update
|
source_type, _, camera, frame_name, data = update
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not camera or source_type != EventTypeEnum.tracked_object:
|
if not camera or source_type != EventTypeEnum.tracked_object:
|
||||||
logger.debug(
|
|
||||||
f"Skipping update - camera: {camera}, source_type: {source_type}"
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.config.semantic_search.enabled:
|
if self.config.semantic_search.enabled:
|
||||||
@ -414,9 +384,6 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
# no need to process updated objects if no processors are active
|
# no need to process updated objects if no processors are active
|
||||||
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
||||||
logger.debug(
|
|
||||||
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Create our own thumbnail based on the bounding box and the frame time
|
# Create our own thumbnail based on the bounding box and the frame time
|
||||||
@ -425,7 +392,6 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
frame_name, camera_config.frame_shape_yuv
|
frame_name, camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
logger.debug(f"Frame {frame_name} not found for camera {camera}")
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
@ -434,11 +400,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
|
|
||||||
)
|
|
||||||
for processor in self.realtime_processors:
|
for processor in self.realtime_processors:
|
||||||
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
|
|
||||||
processor.process_frame(data, yuv_frame)
|
processor.process_frame(data, yuv_frame)
|
||||||
|
|
||||||
for processor in self.post_processors:
|
for processor in self.post_processors:
|
||||||
|
|||||||
@ -12,7 +12,7 @@ from frigate.config import FrigateConfig
|
|||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.models import Event, Timeline
|
from frigate.models import Event, Timeline
|
||||||
from frigate.util.file import delete_event_snapshot, delete_event_thumbnail
|
from frigate.util.path import delete_event_snapshot, delete_event_thumbnail
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -9,7 +9,6 @@ from multiprocessing import Queue, Value
|
|||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import zmq
|
|
||||||
|
|
||||||
from frigate.comms.object_detector_signaler import (
|
from frigate.comms.object_detector_signaler import (
|
||||||
ObjectDetectorPublisher,
|
ObjectDetectorPublisher,
|
||||||
@ -378,15 +377,6 @@ class RemoteObjectDetector:
|
|||||||
if self.stop_event.is_set():
|
if self.stop_event.is_set():
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
# Drain any stale detection results from the ZMQ buffer before making a new request
|
|
||||||
# This prevents reading detection results from a previous request
|
|
||||||
# NOTE: This should never happen, but can in some rare cases
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
|
|
||||||
except zmq.Again:
|
|
||||||
break
|
|
||||||
|
|
||||||
# copy input to shared memory
|
# copy input to shared memory
|
||||||
self.np_shm[:] = tensor_input[:]
|
self.np_shm[:] = tensor_input[:]
|
||||||
self.detection_queue.put(self.name)
|
self.detection_queue.put(self.name)
|
||||||
|
|||||||
@ -14,8 +14,7 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
|||||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||||
from frigate.util.builtin import clear_and_unlink
|
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
||||||
from frigate.util.time import get_tomorrow_at_time
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
|
|||||||
parse_preset_hardware_acceleration_encode,
|
parse_preset_hardware_acceleration_encode,
|
||||||
)
|
)
|
||||||
from frigate.models import Export, Previews, Recordings
|
from frigate.models import Export, Previews, Recordings
|
||||||
from frigate.util.time import is_current_hour
|
from frigate.util.builtin import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -15,9 +15,12 @@ from collections.abc import Mapping
|
|||||||
from multiprocessing.sharedctypes import Synchronized
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Optional, Tuple, Union
|
from typing import Any, Dict, Optional, Tuple, Union
|
||||||
|
from zoneinfo import ZoneInfoNotFoundError
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pytz
|
||||||
from ruamel.yaml import YAML
|
from ruamel.yaml import YAML
|
||||||
|
from tzlocal import get_localzone
|
||||||
|
|
||||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||||
|
|
||||||
@ -154,6 +157,17 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
|
|||||||
return labels
|
return labels
|
||||||
|
|
||||||
|
|
||||||
|
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||||
|
seconds_offset = (
|
||||||
|
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||||
|
)
|
||||||
|
hours_offset = int(seconds_offset / 60 / 60)
|
||||||
|
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||||
|
hour_modifier = f"{hours_offset} hour"
|
||||||
|
minute_modifier = f"{minutes_offset} minute"
|
||||||
|
return hour_modifier, minute_modifier, seconds_offset
|
||||||
|
|
||||||
|
|
||||||
def to_relative_box(
|
def to_relative_box(
|
||||||
width: int, height: int, box: Tuple[int, int, int, int]
|
width: int, height: int, box: Tuple[int, int, int, int]
|
||||||
) -> Tuple[int | float, int | float, int | float, int | float]:
|
) -> Tuple[int | float, int | float, int | float, int | float]:
|
||||||
@ -284,6 +298,34 @@ def find_by_key(dictionary, target_key):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||||
|
"""Returns the datetime of the following day at 2am."""
|
||||||
|
try:
|
||||||
|
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||||
|
except ZoneInfoNotFoundError:
|
||||||
|
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||||
|
days=1
|
||||||
|
)
|
||||||
|
logger.warning(
|
||||||
|
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||||
|
)
|
||||||
|
|
||||||
|
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||||
|
datetime.timezone.utc
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_current_hour(timestamp: int) -> bool:
|
||||||
|
"""Returns if timestamp is in the current UTC hour."""
|
||||||
|
start_of_next_hour = (
|
||||||
|
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||||
|
minute=0, second=0, microsecond=0
|
||||||
|
)
|
||||||
|
+ datetime.timedelta(hours=1)
|
||||||
|
).timestamp()
|
||||||
|
return timestamp < start_of_next_hour
|
||||||
|
|
||||||
|
|
||||||
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
||||||
"""clear file then unlink to avoid space retained by file descriptors."""
|
"""clear file then unlink to avoid space retained by file descriptors."""
|
||||||
if not missing_ok and not file.exists():
|
if not missing_ok and not file.exists():
|
||||||
|
|||||||
@ -20,8 +20,8 @@ from frigate.const import (
|
|||||||
from frigate.log import redirect_output_to_logger
|
from frigate.log import redirect_output_to_logger
|
||||||
from frigate.models import Event, Recordings, ReviewSegment
|
from frigate.models import Event, Recordings, ReviewSegment
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
from frigate.util.file import get_event_thumbnail_bytes
|
|
||||||
from frigate.util.image import get_image_from_recording
|
from frigate.util.image import get_image_from_recording
|
||||||
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
from frigate.util.process import FrigateProcess
|
from frigate.util.process import FrigateProcess
|
||||||
|
|
||||||
BATCH_SIZE = 16
|
BATCH_SIZE = 16
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Callable, List
|
from typing import Callable, List
|
||||||
|
|
||||||
@ -9,11 +10,40 @@ import requests
|
|||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.const import UPDATE_MODEL_STATE
|
from frigate.const import UPDATE_MODEL_STATE
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
from frigate.util.file import FileLock
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class FileLock:
|
||||||
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
|
self.lock_file = f"{path}.lock"
|
||||||
|
|
||||||
|
# we have not acquired the lock yet so it should not exist
|
||||||
|
if os.path.exists(self.lock_file):
|
||||||
|
try:
|
||||||
|
os.remove(self.lock_file)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def acquire(self):
|
||||||
|
parent_dir = os.path.dirname(self.lock_file)
|
||||||
|
os.makedirs(parent_dir, exist_ok=True)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
with open(self.lock_file, "x"):
|
||||||
|
return
|
||||||
|
except FileExistsError:
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
try:
|
||||||
|
os.remove(self.lock_file)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ModelDownloader:
|
class ModelDownloader:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -51,13 +81,15 @@ class ModelDownloader:
|
|||||||
def _download_models(self):
|
def _download_models(self):
|
||||||
for file_name in self.file_names:
|
for file_name in self.file_names:
|
||||||
path = os.path.join(self.download_path, file_name)
|
path = os.path.join(self.download_path, file_name)
|
||||||
lock_path = f"{path}.lock"
|
lock = FileLock(path)
|
||||||
lock = FileLock(lock_path, cleanup_stale_on_init=True)
|
|
||||||
|
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
with lock:
|
lock.acquire()
|
||||||
|
try:
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
self.download_func(path)
|
self.download_func(path)
|
||||||
|
finally:
|
||||||
|
lock.release()
|
||||||
|
|
||||||
self.requestor.send_data(
|
self.requestor.send_data(
|
||||||
UPDATE_MODEL_STATE,
|
UPDATE_MODEL_STATE,
|
||||||
|
|||||||
@ -1,276 +0,0 @@
|
|||||||
"""Path and file utilities."""
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import fcntl
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import cv2
|
|
||||||
from numpy import ndarray
|
|
||||||
|
|
||||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
|
||||||
from frigate.models import Event
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
|
||||||
if event.thumbnail:
|
|
||||||
return base64.b64decode(event.thumbnail)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
with open(
|
|
||||||
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
|
||||||
) as f:
|
|
||||||
return f.read()
|
|
||||||
except Exception:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_event_snapshot(event: Event) -> ndarray:
|
|
||||||
media_name = f"{event.camera}-{event.id}"
|
|
||||||
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
|
||||||
|
|
||||||
|
|
||||||
### Deletion
|
|
||||||
|
|
||||||
|
|
||||||
def delete_event_images(event: Event) -> bool:
|
|
||||||
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_event_snapshot(event: Event) -> bool:
|
|
||||||
media_name = f"{event.camera}-{event.id}"
|
|
||||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
|
||||||
|
|
||||||
try:
|
|
||||||
media_path.unlink(missing_ok=True)
|
|
||||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
|
||||||
media_path.unlink(missing_ok=True)
|
|
||||||
# also delete clean.png (legacy) for backward compatibility
|
|
||||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
|
||||||
media_path.unlink(missing_ok=True)
|
|
||||||
return True
|
|
||||||
except OSError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def delete_event_thumbnail(event: Event) -> bool:
|
|
||||||
if event.thumbnail:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
|
||||||
missing_ok=True
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
### File Locking
|
|
||||||
|
|
||||||
|
|
||||||
class FileLock:
|
|
||||||
"""
|
|
||||||
A file-based lock for coordinating access to resources across processes.
|
|
||||||
|
|
||||||
Uses fcntl.flock() for proper POSIX file locking on Linux. Supports timeouts,
|
|
||||||
stale lock detection, and can be used as a context manager.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```python
|
|
||||||
# Using as a context manager (recommended)
|
|
||||||
with FileLock("/path/to/resource.lock", timeout=60):
|
|
||||||
# Critical section
|
|
||||||
do_something()
|
|
||||||
|
|
||||||
# Manual acquisition and release
|
|
||||||
lock = FileLock("/path/to/resource.lock")
|
|
||||||
if lock.acquire(timeout=60):
|
|
||||||
try:
|
|
||||||
do_something()
|
|
||||||
finally:
|
|
||||||
lock.release()
|
|
||||||
```
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
lock_path: Path to the lock file
|
|
||||||
timeout: Maximum time to wait for lock acquisition (seconds)
|
|
||||||
poll_interval: Time to wait between lock acquisition attempts (seconds)
|
|
||||||
stale_timeout: Time after which a lock is considered stale (seconds)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
lock_path: str | Path,
|
|
||||||
timeout: int = 300,
|
|
||||||
poll_interval: float = 1.0,
|
|
||||||
stale_timeout: int = 600,
|
|
||||||
cleanup_stale_on_init: bool = False,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initialize a FileLock.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
lock_path: Path to the lock file
|
|
||||||
timeout: Maximum time to wait for lock acquisition in seconds (default: 300)
|
|
||||||
poll_interval: Time to wait between lock attempts in seconds (default: 1.0)
|
|
||||||
stale_timeout: Time after which a lock is considered stale in seconds (default: 600)
|
|
||||||
cleanup_stale_on_init: Whether to clean up stale locks on initialization (default: False)
|
|
||||||
"""
|
|
||||||
self.lock_path = Path(lock_path)
|
|
||||||
self.timeout = timeout
|
|
||||||
self.poll_interval = poll_interval
|
|
||||||
self.stale_timeout = stale_timeout
|
|
||||||
self._fd: Optional[int] = None
|
|
||||||
self._acquired = False
|
|
||||||
|
|
||||||
if cleanup_stale_on_init:
|
|
||||||
self._cleanup_stale_lock()
|
|
||||||
|
|
||||||
def _cleanup_stale_lock(self) -> bool:
|
|
||||||
"""
|
|
||||||
Clean up a stale lock file if it exists and is old.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if lock was cleaned up, False otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if self.lock_path.exists():
|
|
||||||
# Check if lock file is older than stale_timeout
|
|
||||||
lock_age = time.time() - self.lock_path.stat().st_mtime
|
|
||||||
if lock_age > self.stale_timeout:
|
|
||||||
logger.warning(
|
|
||||||
f"Removing stale lock file: {self.lock_path} (age: {lock_age:.1f}s)"
|
|
||||||
)
|
|
||||||
self.lock_path.unlink()
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error cleaning up stale lock: {e}")
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def is_stale(self) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the lock file is stale (older than stale_timeout).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if lock is stale, False otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if self.lock_path.exists():
|
|
||||||
lock_age = time.time() - self.lock_path.stat().st_mtime
|
|
||||||
return lock_age > self.stale_timeout
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def acquire(self, timeout: Optional[int] = None) -> bool:
|
|
||||||
"""
|
|
||||||
Acquire the file lock using fcntl.flock().
|
|
||||||
|
|
||||||
Args:
|
|
||||||
timeout: Maximum time to wait for lock in seconds (uses instance timeout if None)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if lock acquired, False if timeout or error
|
|
||||||
"""
|
|
||||||
if self._acquired:
|
|
||||||
logger.warning(f"Lock already acquired: {self.lock_path}")
|
|
||||||
return True
|
|
||||||
|
|
||||||
if timeout is None:
|
|
||||||
timeout = self.timeout
|
|
||||||
|
|
||||||
# Ensure parent directory exists
|
|
||||||
self.lock_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Clean up stale lock before attempting to acquire
|
|
||||||
self._cleanup_stale_lock()
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._fd = os.open(self.lock_path, os.O_CREAT | os.O_RDWR)
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
while time.time() - start_time < timeout:
|
|
||||||
try:
|
|
||||||
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
||||||
self._acquired = True
|
|
||||||
logger.debug(f"Acquired lock: {self.lock_path}")
|
|
||||||
return True
|
|
||||||
except (OSError, IOError):
|
|
||||||
# Lock is held by another process
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
logger.warning(f"Timeout waiting for lock: {self.lock_path}")
|
|
||||||
os.close(self._fd)
|
|
||||||
self._fd = None
|
|
||||||
return False
|
|
||||||
|
|
||||||
time.sleep(self.poll_interval)
|
|
||||||
|
|
||||||
# Timeout reached
|
|
||||||
if self._fd is not None:
|
|
||||||
os.close(self._fd)
|
|
||||||
self._fd = None
|
|
||||||
return False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error acquiring lock: {e}")
|
|
||||||
if self._fd is not None:
|
|
||||||
try:
|
|
||||||
os.close(self._fd)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
self._fd = None
|
|
||||||
return False
|
|
||||||
|
|
||||||
def release(self) -> None:
|
|
||||||
"""
|
|
||||||
Release the file lock.
|
|
||||||
|
|
||||||
This closes the file descriptor and removes the lock file.
|
|
||||||
"""
|
|
||||||
if not self._acquired:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Close file descriptor and release fcntl lock
|
|
||||||
if self._fd is not None:
|
|
||||||
try:
|
|
||||||
fcntl.flock(self._fd, fcntl.LOCK_UN)
|
|
||||||
os.close(self._fd)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Error closing lock file descriptor: {e}")
|
|
||||||
finally:
|
|
||||||
self._fd = None
|
|
||||||
|
|
||||||
# Remove lock file
|
|
||||||
if self.lock_path.exists():
|
|
||||||
self.lock_path.unlink()
|
|
||||||
logger.debug(f"Released lock: {self.lock_path}")
|
|
||||||
|
|
||||||
except FileNotFoundError:
|
|
||||||
# Lock file already removed, that's fine
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error releasing lock: {e}")
|
|
||||||
finally:
|
|
||||||
self._acquired = False
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
"""Context manager entry - acquire the lock."""
|
|
||||||
if not self.acquire():
|
|
||||||
raise TimeoutError(f"Failed to acquire lock: {self.lock_path}")
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
"""Context manager exit - release the lock."""
|
|
||||||
self.release()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
"""Destructor - ensure lock is released."""
|
|
||||||
if self._acquired:
|
|
||||||
self.release()
|
|
||||||
62
frigate/util/path.py
Normal file
62
frigate/util/path.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
"""Path utilities."""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
from numpy import ndarray
|
||||||
|
|
||||||
|
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
|
||||||
|
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
||||||
|
if event.thumbnail:
|
||||||
|
return base64.b64decode(event.thumbnail)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
||||||
|
) as f:
|
||||||
|
return f.read()
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_event_snapshot(event: Event) -> ndarray:
|
||||||
|
media_name = f"{event.camera}-{event.id}"
|
||||||
|
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||||
|
|
||||||
|
|
||||||
|
### Deletion
|
||||||
|
|
||||||
|
|
||||||
|
def delete_event_images(event: Event) -> bool:
|
||||||
|
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_event_snapshot(event: Event) -> bool:
|
||||||
|
media_name = f"{event.camera}-{event.id}"
|
||||||
|
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||||
|
|
||||||
|
try:
|
||||||
|
media_path.unlink(missing_ok=True)
|
||||||
|
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
||||||
|
media_path.unlink(missing_ok=True)
|
||||||
|
# also delete clean.png (legacy) for backward compatibility
|
||||||
|
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||||
|
media_path.unlink(missing_ok=True)
|
||||||
|
return True
|
||||||
|
except OSError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def delete_event_thumbnail(event: Event) -> bool:
|
||||||
|
if event.thumbnail:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
||||||
|
missing_ok=True
|
||||||
|
)
|
||||||
|
return True
|
||||||
@ -1,5 +1,6 @@
|
|||||||
"""RKNN model conversion utility for Frigate."""
|
"""RKNN model conversion utility for Frigate."""
|
||||||
|
|
||||||
|
import fcntl
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -8,8 +9,6 @@ import time
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from frigate.util.file import FileLock
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
MODEL_TYPE_CONFIGS = {
|
MODEL_TYPE_CONFIGS = {
|
||||||
@ -246,6 +245,112 @@ def convert_onnx_to_rknn(
|
|||||||
logger.warning(f"Failed to remove temporary ONNX file: {e}")
|
logger.warning(f"Failed to remove temporary ONNX file: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_stale_lock(lock_file_path: Path) -> bool:
|
||||||
|
"""
|
||||||
|
Clean up a stale lock file if it exists and is old.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lock_file_path: Path to the lock file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if lock was cleaned up, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if lock_file_path.exists():
|
||||||
|
# Check if lock file is older than 10 minutes (stale)
|
||||||
|
lock_age = time.time() - lock_file_path.stat().st_mtime
|
||||||
|
if lock_age > 600: # 10 minutes
|
||||||
|
logger.warning(
|
||||||
|
f"Removing stale lock file: {lock_file_path} (age: {lock_age:.1f}s)"
|
||||||
|
)
|
||||||
|
lock_file_path.unlink()
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error cleaning up stale lock: {e}")
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def acquire_conversion_lock(lock_file_path: Path, timeout: int = 300) -> bool:
|
||||||
|
"""
|
||||||
|
Acquire a file-based lock for model conversion.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lock_file_path: Path to the lock file
|
||||||
|
timeout: Maximum time to wait for lock in seconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if lock acquired, False if timeout or error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
cleanup_stale_lock(lock_file_path)
|
||||||
|
lock_fd = os.open(lock_file_path, os.O_CREAT | os.O_RDWR)
|
||||||
|
|
||||||
|
# Try to acquire exclusive lock
|
||||||
|
start_time = time.time()
|
||||||
|
while time.time() - start_time < timeout:
|
||||||
|
try:
|
||||||
|
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
# Lock acquired successfully
|
||||||
|
logger.debug(f"Acquired conversion lock: {lock_file_path}")
|
||||||
|
return True
|
||||||
|
except (OSError, IOError):
|
||||||
|
# Lock is held by another process, wait and retry
|
||||||
|
if time.time() - start_time >= timeout:
|
||||||
|
logger.warning(
|
||||||
|
f"Timeout waiting for conversion lock: {lock_file_path}"
|
||||||
|
)
|
||||||
|
os.close(lock_fd)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.debug("Waiting for conversion lock to be released...")
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
os.close(lock_fd)
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error acquiring conversion lock: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def release_conversion_lock(lock_file_path: Path) -> None:
|
||||||
|
"""
|
||||||
|
Release the conversion lock.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lock_file_path: Path to the lock file
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if lock_file_path.exists():
|
||||||
|
lock_file_path.unlink()
|
||||||
|
logger.debug(f"Released conversion lock: {lock_file_path}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error releasing conversion lock: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def is_lock_stale(lock_file_path: Path, max_age: int = 600) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a lock file is stale (older than max_age seconds).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lock_file_path: Path to the lock file
|
||||||
|
max_age: Maximum age in seconds before considering lock stale
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if lock is stale, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if lock_file_path.exists():
|
||||||
|
lock_age = time.time() - lock_file_path.stat().st_mtime
|
||||||
|
return lock_age > max_age
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def wait_for_conversion_completion(
|
def wait_for_conversion_completion(
|
||||||
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
|
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
|
||||||
) -> bool:
|
) -> bool:
|
||||||
@ -253,7 +358,6 @@ def wait_for_conversion_completion(
|
|||||||
Wait for another process to complete the conversion.
|
Wait for another process to complete the conversion.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model_type: Type of model being converted
|
|
||||||
rknn_path: Path to the expected RKNN model
|
rknn_path: Path to the expected RKNN model
|
||||||
lock_file_path: Path to the lock file to monitor
|
lock_file_path: Path to the lock file to monitor
|
||||||
timeout: Maximum time to wait in seconds
|
timeout: Maximum time to wait in seconds
|
||||||
@ -262,8 +366,6 @@ def wait_for_conversion_completion(
|
|||||||
True if RKNN model appears, False if timeout
|
True if RKNN model appears, False if timeout
|
||||||
"""
|
"""
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
lock = FileLock(lock_file_path, stale_timeout=600)
|
|
||||||
|
|
||||||
while time.time() - start_time < timeout:
|
while time.time() - start_time < timeout:
|
||||||
# Check if RKNN model appeared
|
# Check if RKNN model appeared
|
||||||
if rknn_path.exists():
|
if rknn_path.exists():
|
||||||
@ -283,14 +385,11 @@ def wait_for_conversion_completion(
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Check if lock is stale
|
# Check if lock is stale
|
||||||
if lock.is_stale():
|
if is_lock_stale(lock_file_path):
|
||||||
logger.warning("Lock file is stale, attempting to clean up and retry...")
|
logger.warning("Lock file is stale, attempting to clean up and retry...")
|
||||||
lock._cleanup_stale_lock()
|
cleanup_stale_lock(lock_file_path)
|
||||||
# Try to acquire lock again
|
# Try to acquire lock again
|
||||||
retry_lock = FileLock(
|
if acquire_conversion_lock(lock_file_path, timeout=60):
|
||||||
lock_file_path, timeout=60, cleanup_stale_on_init=True
|
|
||||||
)
|
|
||||||
if retry_lock.acquire():
|
|
||||||
try:
|
try:
|
||||||
# Check if RKNN file appeared while waiting
|
# Check if RKNN file appeared while waiting
|
||||||
if rknn_path.exists():
|
if rknn_path.exists():
|
||||||
@ -316,7 +415,7 @@ def wait_for_conversion_completion(
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
retry_lock.release()
|
release_conversion_lock(lock_file_path)
|
||||||
|
|
||||||
logger.debug("Waiting for RKNN model to appear...")
|
logger.debug("Waiting for RKNN model to appear...")
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
@ -353,9 +452,8 @@ def auto_convert_model(
|
|||||||
return str(rknn_path)
|
return str(rknn_path)
|
||||||
|
|
||||||
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
|
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
|
||||||
lock = FileLock(lock_file_path, timeout=300, cleanup_stale_on_init=True)
|
|
||||||
|
|
||||||
if lock.acquire():
|
if acquire_conversion_lock(lock_file_path):
|
||||||
try:
|
try:
|
||||||
if rknn_path.exists():
|
if rknn_path.exists():
|
||||||
logger.info(
|
logger.info(
|
||||||
@ -378,7 +476,7 @@ def auto_convert_model(
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
lock.release()
|
release_conversion_lock(lock_file_path)
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Another process is converting {model_path}, waiting for completion..."
|
f"Another process is converting {model_path}, waiting for completion..."
|
||||||
|
|||||||
@ -1,100 +0,0 @@
|
|||||||
"""Time utilities."""
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import logging
|
|
||||||
from typing import Tuple
|
|
||||||
from zoneinfo import ZoneInfoNotFoundError
|
|
||||||
|
|
||||||
import pytz
|
|
||||||
from tzlocal import get_localzone
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
|
||||||
seconds_offset = (
|
|
||||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
|
||||||
)
|
|
||||||
hours_offset = int(seconds_offset / 60 / 60)
|
|
||||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
|
||||||
hour_modifier = f"{hours_offset} hour"
|
|
||||||
minute_modifier = f"{minutes_offset} minute"
|
|
||||||
return hour_modifier, minute_modifier, seconds_offset
|
|
||||||
|
|
||||||
|
|
||||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
|
||||||
"""Returns the datetime of the following day at 2am."""
|
|
||||||
try:
|
|
||||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
|
||||||
except ZoneInfoNotFoundError:
|
|
||||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
|
||||||
days=1
|
|
||||||
)
|
|
||||||
logger.warning(
|
|
||||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
|
||||||
)
|
|
||||||
|
|
||||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
|
||||||
datetime.timezone.utc
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def is_current_hour(timestamp: int) -> bool:
|
|
||||||
"""Returns if timestamp is in the current UTC hour."""
|
|
||||||
start_of_next_hour = (
|
|
||||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
|
||||||
minute=0, second=0, microsecond=0
|
|
||||||
)
|
|
||||||
+ datetime.timedelta(hours=1)
|
|
||||||
).timestamp()
|
|
||||||
return timestamp < start_of_next_hour
|
|
||||||
|
|
||||||
|
|
||||||
def get_dst_transitions(
|
|
||||||
tz_name: str, start_time: float, end_time: float
|
|
||||||
) -> list[tuple[float, float]]:
|
|
||||||
"""
|
|
||||||
Find DST transition points and return time periods with consistent offsets.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
tz_name: Timezone name (e.g., 'America/New_York')
|
|
||||||
start_time: Start timestamp (UTC)
|
|
||||||
end_time: End timestamp (UTC)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of (period_start, period_end, seconds_offset) tuples representing
|
|
||||||
continuous periods with the same UTC offset
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
tz = pytz.timezone(tz_name)
|
|
||||||
except pytz.UnknownTimeZoneError:
|
|
||||||
# If timezone is invalid, return single period with no offset
|
|
||||||
return [(start_time, end_time, 0)]
|
|
||||||
|
|
||||||
periods = []
|
|
||||||
current = start_time
|
|
||||||
|
|
||||||
# Get initial offset
|
|
||||||
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
|
||||||
local_dt = dt.astimezone(tz)
|
|
||||||
prev_offset = local_dt.utcoffset().total_seconds()
|
|
||||||
period_start = start_time
|
|
||||||
|
|
||||||
# Check each day for offset changes
|
|
||||||
while current <= end_time:
|
|
||||||
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
|
||||||
local_dt = dt.astimezone(tz)
|
|
||||||
current_offset = local_dt.utcoffset().total_seconds()
|
|
||||||
|
|
||||||
if current_offset != prev_offset:
|
|
||||||
# Found a transition - close previous period
|
|
||||||
periods.append((period_start, current, prev_offset))
|
|
||||||
period_start = current
|
|
||||||
prev_offset = current_offset
|
|
||||||
|
|
||||||
current += 86400 # Check daily
|
|
||||||
|
|
||||||
# Add final period
|
|
||||||
periods.append((period_start, end_time, prev_offset))
|
|
||||||
|
|
||||||
return periods
|
|
||||||
@ -34,7 +34,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
|||||||
from frigate.track import ObjectTracker
|
from frigate.track import ObjectTracker
|
||||||
from frigate.track.norfair_tracker import NorfairTracker
|
from frigate.track.norfair_tracker import NorfairTracker
|
||||||
from frigate.track.tracked_object import TrackedObjectAttribute
|
from frigate.track.tracked_object import TrackedObjectAttribute
|
||||||
from frigate.util.builtin import EventsPerSecond
|
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
|
||||||
from frigate.util.image import (
|
from frigate.util.image import (
|
||||||
FrameManager,
|
FrameManager,
|
||||||
SharedMemoryFrameManager,
|
SharedMemoryFrameManager,
|
||||||
@ -53,7 +53,6 @@ from frigate.util.object import (
|
|||||||
reduce_detections,
|
reduce_detections,
|
||||||
)
|
)
|
||||||
from frigate.util.process import FrigateProcess
|
from frigate.util.process import FrigateProcess
|
||||||
from frigate.util.time import get_tomorrow_at_time
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@ -1,8 +1,5 @@
|
|||||||
{
|
{
|
||||||
"documentTitle": "Classification Models",
|
"documentTitle": "Classification Models",
|
||||||
"details": {
|
|
||||||
"scoreInfo": "Score represents the average classification confidence across all detections of this object."
|
|
||||||
},
|
|
||||||
"button": {
|
"button": {
|
||||||
"deleteClassificationAttempts": "Delete Classification Images",
|
"deleteClassificationAttempts": "Delete Classification Images",
|
||||||
"renameCategory": "Rename Class",
|
"renameCategory": "Rename Class",
|
||||||
@ -10,27 +7,23 @@
|
|||||||
"deleteImages": "Delete Images",
|
"deleteImages": "Delete Images",
|
||||||
"trainModel": "Train Model",
|
"trainModel": "Train Model",
|
||||||
"addClassification": "Add Classification",
|
"addClassification": "Add Classification",
|
||||||
"deleteModels": "Delete Models",
|
"deleteModels": "Delete Models"
|
||||||
"editModel": "Edit Model"
|
|
||||||
},
|
},
|
||||||
"toast": {
|
"toast": {
|
||||||
"success": {
|
"success": {
|
||||||
"deletedCategory": "Deleted Class",
|
"deletedCategory": "Deleted Class",
|
||||||
"deletedImage": "Deleted Images",
|
"deletedImage": "Deleted Images",
|
||||||
"deletedModel_one": "Successfully deleted {{count}} model",
|
"deletedModel": "Successfully deleted {{count}} model(s)",
|
||||||
"deletedModel_other": "Successfully deleted {{count}} models",
|
|
||||||
"categorizedImage": "Successfully Classified Image",
|
"categorizedImage": "Successfully Classified Image",
|
||||||
"trainedModel": "Successfully trained model.",
|
"trainedModel": "Successfully trained model.",
|
||||||
"trainingModel": "Successfully started model training.",
|
"trainingModel": "Successfully started model training."
|
||||||
"updatedModel": "Successfully updated model configuration"
|
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
|
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
|
||||||
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
|
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
|
||||||
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
|
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
|
||||||
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
|
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
|
||||||
"trainingFailed": "Failed to start model training: {{errorMessage}}",
|
"trainingFailed": "Failed to start model training: {{errorMessage}}"
|
||||||
"updateModelFailed": "Failed to update model: {{errorMessage}}"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"deleteCategory": {
|
"deleteCategory": {
|
||||||
@ -42,12 +35,6 @@
|
|||||||
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
|
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
|
||||||
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
|
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
|
||||||
},
|
},
|
||||||
"edit": {
|
|
||||||
"title": "Edit Classification Model",
|
|
||||||
"descriptionState": "Edit the classes for this state classification model. Changes will require retraining the model.",
|
|
||||||
"descriptionObject": "Edit the object type and classification type for this object classification model.",
|
|
||||||
"stateClassesInfo": "Note: Changing state classes requires retraining the model with the updated classes."
|
|
||||||
},
|
|
||||||
"deleteDatasetImages": {
|
"deleteDatasetImages": {
|
||||||
"title": "Delete Dataset Images",
|
"title": "Delete Dataset Images",
|
||||||
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
|
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
|
||||||
|
|||||||
@ -6,8 +6,7 @@
|
|||||||
},
|
},
|
||||||
"details": {
|
"details": {
|
||||||
"timestamp": "Timestamp",
|
"timestamp": "Timestamp",
|
||||||
"unknown": "Unknown",
|
"unknown": "Unknown"
|
||||||
"scoreInfo": "Score is a weighted average of all face scores, weighted by the size of the face in each image."
|
|
||||||
},
|
},
|
||||||
"documentTitle": "Face Library - Frigate",
|
"documentTitle": "Face Library - Frigate",
|
||||||
"uploadFaceImage": {
|
"uploadFaceImage": {
|
||||||
|
|||||||
@ -271,8 +271,6 @@
|
|||||||
"disconnectStream": "Disconnect",
|
"disconnectStream": "Disconnect",
|
||||||
"estimatedBandwidth": "Estimated Bandwidth",
|
"estimatedBandwidth": "Estimated Bandwidth",
|
||||||
"roles": "Roles",
|
"roles": "Roles",
|
||||||
"ffmpegModule": "Use stream compatibility mode",
|
|
||||||
"ffmpegModuleDescription": "If the stream does not load after several attempts, try enabling this. When enabled, Frigate will use the ffmpeg module with go2rtc. This may provide better compatibility with some camera streams.",
|
|
||||||
"none": "None",
|
"none": "None",
|
||||||
"error": "Error",
|
"error": "Error",
|
||||||
"streamValidated": "Stream {{number}} validated successfully",
|
"streamValidated": "Stream {{number}} validated successfully",
|
||||||
|
|||||||
@ -7,12 +7,11 @@ import {
|
|||||||
} from "@/types/classification";
|
} from "@/types/classification";
|
||||||
import { Event } from "@/types/event";
|
import { Event } from "@/types/event";
|
||||||
import { forwardRef, useMemo, useRef, useState } from "react";
|
import { forwardRef, useMemo, useRef, useState } from "react";
|
||||||
import { isDesktop, isMobile, isMobileOnly } from "react-device-detect";
|
import { isDesktop, isMobile } from "react-device-detect";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import TimeAgo from "../dynamic/TimeAgo";
|
import TimeAgo from "../dynamic/TimeAgo";
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
||||||
import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover";
|
import { LuSearch } from "react-icons/lu";
|
||||||
import { LuSearch, LuInfo } from "react-icons/lu";
|
|
||||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||||
import { useNavigate } from "react-router-dom";
|
import { useNavigate } from "react-router-dom";
|
||||||
import { HiSquare2Stack } from "react-icons/hi2";
|
import { HiSquare2Stack } from "react-icons/hi2";
|
||||||
@ -182,7 +181,6 @@ type GroupedClassificationCardProps = {
|
|||||||
selectedItems: string[];
|
selectedItems: string[];
|
||||||
i18nLibrary: string;
|
i18nLibrary: string;
|
||||||
objectType: string;
|
objectType: string;
|
||||||
noClassificationLabel?: string;
|
|
||||||
onClick: (data: ClassificationItemData | undefined) => void;
|
onClick: (data: ClassificationItemData | undefined) => void;
|
||||||
children?: (data: ClassificationItemData) => React.ReactNode;
|
children?: (data: ClassificationItemData) => React.ReactNode;
|
||||||
};
|
};
|
||||||
@ -192,7 +190,6 @@ export function GroupedClassificationCard({
|
|||||||
threshold,
|
threshold,
|
||||||
selectedItems,
|
selectedItems,
|
||||||
i18nLibrary,
|
i18nLibrary,
|
||||||
noClassificationLabel = "details.none",
|
|
||||||
onClick,
|
onClick,
|
||||||
children,
|
children,
|
||||||
}: GroupedClassificationCardProps) {
|
}: GroupedClassificationCardProps) {
|
||||||
@ -225,14 +222,10 @@ export function GroupedClassificationCard({
|
|||||||
const bestTyped: ClassificationItemData = best;
|
const bestTyped: ClassificationItemData = best;
|
||||||
return {
|
return {
|
||||||
...bestTyped,
|
...bestTyped,
|
||||||
name: event
|
name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name,
|
||||||
? event.sub_label && event.sub_label !== "none"
|
|
||||||
? event.sub_label
|
|
||||||
: t(noClassificationLabel)
|
|
||||||
: bestTyped.name,
|
|
||||||
score: event?.data?.sub_label_score || bestTyped.score,
|
score: event?.data?.sub_label_score || bestTyped.score,
|
||||||
};
|
};
|
||||||
}, [group, event, noClassificationLabel, t]);
|
}, [group, event, t]);
|
||||||
|
|
||||||
const bestScoreStatus = useMemo(() => {
|
const bestScoreStatus = useMemo(() => {
|
||||||
if (!bestItem?.score || !threshold) {
|
if (!bestItem?.score || !threshold) {
|
||||||
@ -264,8 +257,8 @@ export function GroupedClassificationCard({
|
|||||||
|
|
||||||
const Overlay = isDesktop ? Dialog : MobilePage;
|
const Overlay = isDesktop ? Dialog : MobilePage;
|
||||||
const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger;
|
const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger;
|
||||||
const Content = isDesktop ? DialogContent : MobilePageContent;
|
|
||||||
const Header = isDesktop ? DialogHeader : MobilePageHeader;
|
const Header = isDesktop ? DialogHeader : MobilePageHeader;
|
||||||
|
const Content = isDesktop ? DialogContent : MobilePageContent;
|
||||||
const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle;
|
const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle;
|
||||||
const ContentDescription = isDesktop
|
const ContentDescription = isDesktop
|
||||||
? DialogDescription
|
? DialogDescription
|
||||||
@ -298,9 +291,9 @@ export function GroupedClassificationCard({
|
|||||||
<Trigger asChild></Trigger>
|
<Trigger asChild></Trigger>
|
||||||
<Content
|
<Content
|
||||||
className={cn(
|
className={cn(
|
||||||
"scrollbar-container",
|
"",
|
||||||
isDesktop && "min-w-[50%] max-w-[65%]",
|
isDesktop && "min-w-[50%] max-w-[65%]",
|
||||||
isMobile && "overflow-y-auto",
|
isMobile && "flex flex-col",
|
||||||
)}
|
)}
|
||||||
onOpenAutoFocus={(e) => e.preventDefault()}
|
onOpenAutoFocus={(e) => e.preventDefault()}
|
||||||
>
|
>
|
||||||
@ -308,45 +301,26 @@ export function GroupedClassificationCard({
|
|||||||
<Header
|
<Header
|
||||||
className={cn(
|
className={cn(
|
||||||
"mx-2 flex flex-row items-center gap-4",
|
"mx-2 flex flex-row items-center gap-4",
|
||||||
isMobileOnly && "top-0 mx-4",
|
isMobile && "flex-shrink-0",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div
|
<div>
|
||||||
className={cn(
|
<ContentTitle
|
||||||
"",
|
className={cn(
|
||||||
isMobile && "flex flex-col items-center justify-center",
|
"flex items-center gap-2 font-normal capitalize",
|
||||||
)}
|
isMobile && "px-2",
|
||||||
>
|
)}
|
||||||
<ContentTitle className="flex items-center gap-2 font-normal capitalize">
|
>
|
||||||
{event?.sub_label && event.sub_label !== "none"
|
{event?.sub_label ? event.sub_label : t("details.unknown")}
|
||||||
? event.sub_label
|
{event?.sub_label && (
|
||||||
: t(noClassificationLabel)}
|
<div
|
||||||
{event?.sub_label && event.sub_label !== "none" && (
|
className={cn(
|
||||||
<div className="flex items-center gap-1">
|
"",
|
||||||
<div
|
bestScoreStatus == "match" && "text-success",
|
||||||
className={cn(
|
bestScoreStatus == "potential" && "text-orange-400",
|
||||||
"",
|
bestScoreStatus == "unknown" && "text-danger",
|
||||||
bestScoreStatus == "match" && "text-success",
|
)}
|
||||||
bestScoreStatus == "potential" && "text-orange-400",
|
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
|
||||||
bestScoreStatus == "unknown" && "text-danger",
|
|
||||||
)}
|
|
||||||
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
|
|
||||||
<Popover>
|
|
||||||
<PopoverTrigger asChild>
|
|
||||||
<button
|
|
||||||
className="focus:outline-none"
|
|
||||||
aria-label={t("details.scoreInfo", {
|
|
||||||
ns: i18nLibrary,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LuInfo className="size-3" />
|
|
||||||
</button>
|
|
||||||
</PopoverTrigger>
|
|
||||||
<PopoverContent className="w-80 text-sm">
|
|
||||||
{t("details.scoreInfo", { ns: i18nLibrary })}
|
|
||||||
</PopoverContent>
|
|
||||||
</Popover>
|
|
||||||
</div>
|
|
||||||
)}
|
)}
|
||||||
</ContentTitle>
|
</ContentTitle>
|
||||||
<ContentDescription className={cn("", isMobile && "px-2")}>
|
<ContentDescription className={cn("", isMobile && "px-2")}>
|
||||||
@ -390,7 +364,7 @@ export function GroupedClassificationCard({
|
|||||||
className={cn(
|
className={cn(
|
||||||
"grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8",
|
"grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8",
|
||||||
isDesktop && "p-2",
|
isDesktop && "p-2",
|
||||||
isMobile && "px-4 pb-4",
|
isMobile && "scrollbar-container flex-1 overflow-y-auto",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{group.map((data: ClassificationItemData) => (
|
{group.map((data: ClassificationItemData) => (
|
||||||
|
|||||||
@ -37,7 +37,6 @@ import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
|||||||
import { Button, buttonVariants } from "../ui/button";
|
import { Button, buttonVariants } from "../ui/button";
|
||||||
import { Trans, useTranslation } from "react-i18next";
|
import { Trans, useTranslation } from "react-i18next";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { LuCircle } from "react-icons/lu";
|
|
||||||
|
|
||||||
type ReviewCardProps = {
|
type ReviewCardProps = {
|
||||||
event: ReviewSegment;
|
event: ReviewSegment;
|
||||||
@ -143,7 +142,7 @@ export default function ReviewCard({
|
|||||||
className={cn(
|
className={cn(
|
||||||
"size-full rounded-lg",
|
"size-full rounded-lg",
|
||||||
activeReviewItem?.id == event.id &&
|
activeReviewItem?.id == event.id &&
|
||||||
"outline outline-[3px] -outline-offset-[2.8px] outline-selected duration-200",
|
"outline outline-[3px] outline-offset-1 outline-selected",
|
||||||
imgLoaded ? "visible" : "invisible",
|
imgLoaded ? "visible" : "invisible",
|
||||||
)}
|
)}
|
||||||
src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`}
|
src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`}
|
||||||
@ -166,14 +165,6 @@ export default function ReviewCard({
|
|||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<div className="flex items-center justify-evenly gap-1">
|
<div className="flex items-center justify-evenly gap-1">
|
||||||
<>
|
<>
|
||||||
<LuCircle
|
|
||||||
className={cn(
|
|
||||||
"size-2",
|
|
||||||
event.severity == "alert"
|
|
||||||
? "fill-severity_alert text-severity_alert"
|
|
||||||
: "fill-severity_detection text-severity_detection",
|
|
||||||
)}
|
|
||||||
/>
|
|
||||||
{event.data.objects.map((object) => {
|
{event.data.objects.map((object) => {
|
||||||
return getIconForLabel(
|
return getIconForLabel(
|
||||||
object,
|
object,
|
||||||
|
|||||||
@ -1,477 +0,0 @@
|
|||||||
import { Button } from "@/components/ui/button";
|
|
||||||
import {
|
|
||||||
Dialog,
|
|
||||||
DialogContent,
|
|
||||||
DialogDescription,
|
|
||||||
DialogHeader,
|
|
||||||
DialogTitle,
|
|
||||||
} from "@/components/ui/dialog";
|
|
||||||
import {
|
|
||||||
Form,
|
|
||||||
FormControl,
|
|
||||||
FormField,
|
|
||||||
FormItem,
|
|
||||||
FormLabel,
|
|
||||||
FormMessage,
|
|
||||||
} from "@/components/ui/form";
|
|
||||||
import { Input } from "@/components/ui/input";
|
|
||||||
import { Label } from "@/components/ui/label";
|
|
||||||
import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group";
|
|
||||||
import {
|
|
||||||
Select,
|
|
||||||
SelectContent,
|
|
||||||
SelectItem,
|
|
||||||
SelectTrigger,
|
|
||||||
SelectValue,
|
|
||||||
} from "@/components/ui/select";
|
|
||||||
import {
|
|
||||||
CustomClassificationModelConfig,
|
|
||||||
FrigateConfig,
|
|
||||||
} from "@/types/frigateConfig";
|
|
||||||
import { getTranslatedLabel } from "@/utils/i18n";
|
|
||||||
import { zodResolver } from "@hookform/resolvers/zod";
|
|
||||||
import axios from "axios";
|
|
||||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
|
||||||
import { useForm } from "react-hook-form";
|
|
||||||
import { useTranslation } from "react-i18next";
|
|
||||||
import { LuPlus, LuX } from "react-icons/lu";
|
|
||||||
import { toast } from "sonner";
|
|
||||||
import useSWR from "swr";
|
|
||||||
import { z } from "zod";
|
|
||||||
|
|
||||||
type ClassificationModelEditDialogProps = {
|
|
||||||
open: boolean;
|
|
||||||
model: CustomClassificationModelConfig;
|
|
||||||
onClose: () => void;
|
|
||||||
onSuccess: () => void;
|
|
||||||
};
|
|
||||||
|
|
||||||
type ObjectClassificationType = "sub_label" | "attribute";
|
|
||||||
|
|
||||||
type ObjectFormData = {
|
|
||||||
objectLabel: string;
|
|
||||||
objectType: ObjectClassificationType;
|
|
||||||
};
|
|
||||||
|
|
||||||
type StateFormData = {
|
|
||||||
classes: string[];
|
|
||||||
};
|
|
||||||
|
|
||||||
export default function ClassificationModelEditDialog({
|
|
||||||
open,
|
|
||||||
model,
|
|
||||||
onClose,
|
|
||||||
onSuccess,
|
|
||||||
}: ClassificationModelEditDialogProps) {
|
|
||||||
const { t } = useTranslation(["views/classificationModel"]);
|
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
|
||||||
const [isSaving, setIsSaving] = useState(false);
|
|
||||||
|
|
||||||
const isStateModel = model.state_config !== undefined;
|
|
||||||
const isObjectModel = model.object_config !== undefined;
|
|
||||||
|
|
||||||
const objectLabels = useMemo(() => {
|
|
||||||
if (!config) return [];
|
|
||||||
|
|
||||||
const labels = new Set<string>();
|
|
||||||
|
|
||||||
Object.values(config.cameras).forEach((cameraConfig) => {
|
|
||||||
if (!cameraConfig.enabled || !cameraConfig.enabled_in_config) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
cameraConfig.objects.track.forEach((label) => {
|
|
||||||
if (!config.model.all_attributes.includes(label)) {
|
|
||||||
labels.add(label);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
return [...labels].sort();
|
|
||||||
}, [config]);
|
|
||||||
|
|
||||||
// Define form schema based on model type
|
|
||||||
const formSchema = useMemo(() => {
|
|
||||||
if (isObjectModel) {
|
|
||||||
return z.object({
|
|
||||||
objectLabel: z
|
|
||||||
.string()
|
|
||||||
.min(1, t("wizard.step1.errors.objectLabelRequired")),
|
|
||||||
objectType: z.enum(["sub_label", "attribute"]),
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// State model
|
|
||||||
return z.object({
|
|
||||||
classes: z
|
|
||||||
.array(z.string())
|
|
||||||
.min(1, t("wizard.step1.errors.classRequired"))
|
|
||||||
.refine(
|
|
||||||
(classes) => {
|
|
||||||
const nonEmpty = classes.filter((c) => c.trim().length > 0);
|
|
||||||
return nonEmpty.length >= 2;
|
|
||||||
},
|
|
||||||
{ message: t("wizard.step1.errors.stateRequiresTwoClasses") },
|
|
||||||
)
|
|
||||||
.refine(
|
|
||||||
(classes) => {
|
|
||||||
const nonEmpty = classes.filter((c) => c.trim().length > 0);
|
|
||||||
const unique = new Set(nonEmpty.map((c) => c.toLowerCase()));
|
|
||||||
return unique.size === nonEmpty.length;
|
|
||||||
},
|
|
||||||
{ message: t("wizard.step1.errors.classesUnique") },
|
|
||||||
),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}, [isObjectModel, t]);
|
|
||||||
|
|
||||||
const form = useForm<ObjectFormData | StateFormData>({
|
|
||||||
resolver: zodResolver(formSchema),
|
|
||||||
defaultValues: isObjectModel
|
|
||||||
? ({
|
|
||||||
objectLabel: model.object_config?.objects?.[0] || "",
|
|
||||||
objectType:
|
|
||||||
(model.object_config
|
|
||||||
?.classification_type as ObjectClassificationType) || "sub_label",
|
|
||||||
} as ObjectFormData)
|
|
||||||
: ({
|
|
||||||
classes: [""], // Will be populated from dataset
|
|
||||||
} as StateFormData),
|
|
||||||
mode: "onChange",
|
|
||||||
});
|
|
||||||
|
|
||||||
// Fetch dataset to get current classes for state models
|
|
||||||
const { data: dataset } = useSWR<{
|
|
||||||
[id: string]: string[];
|
|
||||||
}>(isStateModel ? `classification/${model.name}/dataset` : null, {
|
|
||||||
revalidateOnFocus: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Update form with classes from dataset when loaded
|
|
||||||
useEffect(() => {
|
|
||||||
if (isStateModel && dataset) {
|
|
||||||
const classes = Object.keys(dataset).filter((key) => key !== "none");
|
|
||||||
if (classes.length > 0) {
|
|
||||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
|
||||||
"classes",
|
|
||||||
classes,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, [dataset, isStateModel, form]);
|
|
||||||
|
|
||||||
const watchedClasses = isStateModel
|
|
||||||
? (form as ReturnType<typeof useForm<StateFormData>>).watch("classes")
|
|
||||||
: undefined;
|
|
||||||
const watchedObjectType = isObjectModel
|
|
||||||
? (form as ReturnType<typeof useForm<ObjectFormData>>).watch("objectType")
|
|
||||||
: undefined;
|
|
||||||
|
|
||||||
const handleAddClass = useCallback(() => {
|
|
||||||
const currentClasses = (
|
|
||||||
form as ReturnType<typeof useForm<StateFormData>>
|
|
||||||
).getValues("classes");
|
|
||||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
|
||||||
"classes",
|
|
||||||
[...currentClasses, ""],
|
|
||||||
{
|
|
||||||
shouldValidate: true,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}, [form]);
|
|
||||||
|
|
||||||
const handleRemoveClass = useCallback(
|
|
||||||
(index: number) => {
|
|
||||||
const currentClasses = (
|
|
||||||
form as ReturnType<typeof useForm<StateFormData>>
|
|
||||||
).getValues("classes");
|
|
||||||
const newClasses = currentClasses.filter((_, i) => i !== index);
|
|
||||||
|
|
||||||
// Ensure at least one field remains (even if empty)
|
|
||||||
if (newClasses.length === 0) {
|
|
||||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
|
||||||
"classes",
|
|
||||||
[""],
|
|
||||||
{ shouldValidate: true },
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
|
|
||||||
"classes",
|
|
||||||
newClasses,
|
|
||||||
{ shouldValidate: true },
|
|
||||||
);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
[form],
|
|
||||||
);
|
|
||||||
|
|
||||||
const onSubmit = useCallback(
|
|
||||||
async (data: ObjectFormData | StateFormData) => {
|
|
||||||
setIsSaving(true);
|
|
||||||
try {
|
|
||||||
if (isObjectModel) {
|
|
||||||
const objectData = data as ObjectFormData;
|
|
||||||
|
|
||||||
// Update the config
|
|
||||||
await axios.put("/config/set", {
|
|
||||||
requires_restart: 0,
|
|
||||||
update_topic: `config/classification/custom/${model.name}`,
|
|
||||||
config_data: {
|
|
||||||
classification: {
|
|
||||||
custom: {
|
|
||||||
[model.name]: {
|
|
||||||
enabled: model.enabled,
|
|
||||||
name: model.name,
|
|
||||||
threshold: model.threshold,
|
|
||||||
object_config: {
|
|
||||||
objects: [objectData.objectLabel],
|
|
||||||
classification_type: objectData.objectType,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
toast.success(t("toast.success.updatedModel"), {
|
|
||||||
position: "top-center",
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// State model - update classes
|
|
||||||
// Note: For state models, updating classes requires renaming categories
|
|
||||||
// which is handled through the dataset API, not the config API
|
|
||||||
// We'll need to implement this by calling the rename endpoint for each class
|
|
||||||
// For now, we just show a message that this requires retraining
|
|
||||||
|
|
||||||
toast.info(t("edit.stateClassesInfo"), {
|
|
||||||
position: "top-center",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
onSuccess();
|
|
||||||
onClose();
|
|
||||||
} catch (err) {
|
|
||||||
const error = err as {
|
|
||||||
response?: { data?: { message?: string; detail?: string } };
|
|
||||||
};
|
|
||||||
const errorMessage =
|
|
||||||
error.response?.data?.message ||
|
|
||||||
error.response?.data?.detail ||
|
|
||||||
"Unknown error";
|
|
||||||
toast.error(t("toast.error.updateModelFailed", { errorMessage }), {
|
|
||||||
position: "top-center",
|
|
||||||
});
|
|
||||||
} finally {
|
|
||||||
setIsSaving(false);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
[isObjectModel, model, t, onSuccess, onClose],
|
|
||||||
);
|
|
||||||
|
|
||||||
const handleCancel = useCallback(() => {
|
|
||||||
form.reset();
|
|
||||||
onClose();
|
|
||||||
}, [form, onClose]);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog open={open} onOpenChange={(open) => !open && handleCancel()}>
|
|
||||||
<DialogContent>
|
|
||||||
<DialogHeader>
|
|
||||||
<DialogTitle>{t("edit.title")}</DialogTitle>
|
|
||||||
<DialogDescription>
|
|
||||||
{isStateModel
|
|
||||||
? t("edit.descriptionState")
|
|
||||||
: t("edit.descriptionObject")}
|
|
||||||
</DialogDescription>
|
|
||||||
</DialogHeader>
|
|
||||||
|
|
||||||
<div className="space-y-6">
|
|
||||||
<Form {...form}>
|
|
||||||
<form onSubmit={form.handleSubmit(onSubmit)} className="space-y-4">
|
|
||||||
{isObjectModel && (
|
|
||||||
<>
|
|
||||||
<FormField
|
|
||||||
control={form.control}
|
|
||||||
name="objectLabel"
|
|
||||||
render={({ field }) => (
|
|
||||||
<FormItem>
|
|
||||||
<FormLabel className="text-primary-variant">
|
|
||||||
{t("wizard.step1.objectLabel")}
|
|
||||||
</FormLabel>
|
|
||||||
<Select
|
|
||||||
onValueChange={field.onChange}
|
|
||||||
defaultValue={field.value}
|
|
||||||
>
|
|
||||||
<FormControl>
|
|
||||||
<SelectTrigger className="h-8">
|
|
||||||
<SelectValue
|
|
||||||
placeholder={t(
|
|
||||||
"wizard.step1.objectLabelPlaceholder",
|
|
||||||
)}
|
|
||||||
/>
|
|
||||||
</SelectTrigger>
|
|
||||||
</FormControl>
|
|
||||||
<SelectContent>
|
|
||||||
{objectLabels.map((label) => (
|
|
||||||
<SelectItem
|
|
||||||
key={label}
|
|
||||||
value={label}
|
|
||||||
className="cursor-pointer hover:bg-secondary-highlight"
|
|
||||||
>
|
|
||||||
{getTranslatedLabel(label)}
|
|
||||||
</SelectItem>
|
|
||||||
))}
|
|
||||||
</SelectContent>
|
|
||||||
</Select>
|
|
||||||
<FormMessage />
|
|
||||||
</FormItem>
|
|
||||||
)}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<FormField
|
|
||||||
control={form.control}
|
|
||||||
name="objectType"
|
|
||||||
render={({ field }) => (
|
|
||||||
<FormItem>
|
|
||||||
<FormLabel className="text-primary-variant">
|
|
||||||
{t("wizard.step1.classificationType")}
|
|
||||||
</FormLabel>
|
|
||||||
<FormControl>
|
|
||||||
<RadioGroup
|
|
||||||
onValueChange={field.onChange}
|
|
||||||
defaultValue={field.value}
|
|
||||||
className="flex flex-col gap-4 pt-2"
|
|
||||||
>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<RadioGroupItem
|
|
||||||
className={
|
|
||||||
watchedObjectType === "sub_label"
|
|
||||||
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
|
||||||
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
|
||||||
}
|
|
||||||
id="sub_label"
|
|
||||||
value="sub_label"
|
|
||||||
/>
|
|
||||||
<Label
|
|
||||||
className="cursor-pointer"
|
|
||||||
htmlFor="sub_label"
|
|
||||||
>
|
|
||||||
{t("wizard.step1.classificationSubLabel")}
|
|
||||||
</Label>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<RadioGroupItem
|
|
||||||
className={
|
|
||||||
watchedObjectType === "attribute"
|
|
||||||
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
|
||||||
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
|
||||||
}
|
|
||||||
id="attribute"
|
|
||||||
value="attribute"
|
|
||||||
/>
|
|
||||||
<Label
|
|
||||||
className="cursor-pointer"
|
|
||||||
htmlFor="attribute"
|
|
||||||
>
|
|
||||||
{t("wizard.step1.classificationAttribute")}
|
|
||||||
</Label>
|
|
||||||
</div>
|
|
||||||
</RadioGroup>
|
|
||||||
</FormControl>
|
|
||||||
<FormMessage />
|
|
||||||
</FormItem>
|
|
||||||
)}
|
|
||||||
/>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{isStateModel && (
|
|
||||||
<div className="space-y-2">
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<FormLabel className="text-primary-variant">
|
|
||||||
{t("wizard.step1.states")}
|
|
||||||
</FormLabel>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="secondary"
|
|
||||||
className="size-6 rounded-md bg-secondary-foreground p-1 text-background"
|
|
||||||
onClick={handleAddClass}
|
|
||||||
>
|
|
||||||
<LuPlus />
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
<div className="space-y-2">
|
|
||||||
{watchedClasses?.map((_: string, index: number) => (
|
|
||||||
<FormField
|
|
||||||
key={index}
|
|
||||||
control={
|
|
||||||
(form as ReturnType<typeof useForm<StateFormData>>)
|
|
||||||
.control
|
|
||||||
}
|
|
||||||
name={`classes.${index}` as const}
|
|
||||||
render={({ field }) => (
|
|
||||||
<FormItem>
|
|
||||||
<FormControl>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<Input
|
|
||||||
className="text-md h-8"
|
|
||||||
placeholder={t(
|
|
||||||
"wizard.step1.classPlaceholder",
|
|
||||||
)}
|
|
||||||
{...field}
|
|
||||||
/>
|
|
||||||
{watchedClasses &&
|
|
||||||
watchedClasses.length > 1 && (
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="ghost"
|
|
||||||
size="sm"
|
|
||||||
className="h-8 w-8 p-0"
|
|
||||||
onClick={() => handleRemoveClass(index)}
|
|
||||||
>
|
|
||||||
<LuX className="size-4" />
|
|
||||||
</Button>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</FormControl>
|
|
||||||
</FormItem>
|
|
||||||
)}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
{isStateModel &&
|
|
||||||
"classes" in form.formState.errors &&
|
|
||||||
form.formState.errors.classes && (
|
|
||||||
<p className="text-sm font-medium text-destructive">
|
|
||||||
{form.formState.errors.classes.message}
|
|
||||||
</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
onClick={handleCancel}
|
|
||||||
className="sm:flex-1"
|
|
||||||
disabled={isSaving}
|
|
||||||
>
|
|
||||||
{t("button.cancel", { ns: "common" })}
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
type="submit"
|
|
||||||
variant="select"
|
|
||||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
|
||||||
disabled={!form.formState.isValid || isSaving}
|
|
||||||
>
|
|
||||||
{isSaving
|
|
||||||
? t("button.saving", { ns: "common" })
|
|
||||||
: t("button.save", { ns: "common" })}
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</form>
|
|
||||||
</Form>
|
|
||||||
</div>
|
|
||||||
</DialogContent>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@ -317,21 +317,6 @@ export default function Step3ChooseExamples({
|
|||||||
return unclassifiedImages.length === 0;
|
return unclassifiedImages.length === 0;
|
||||||
}, [unclassifiedImages]);
|
}, [unclassifiedImages]);
|
||||||
|
|
||||||
const handleBack = useCallback(() => {
|
|
||||||
if (currentClassIndex > 0) {
|
|
||||||
const previousClass = allClasses[currentClassIndex - 1];
|
|
||||||
setCurrentClassIndex((prev) => prev - 1);
|
|
||||||
|
|
||||||
// Restore selections for the previous class
|
|
||||||
const previousSelections = Object.entries(imageClassifications)
|
|
||||||
.filter(([_, className]) => className === previousClass)
|
|
||||||
.map(([imageName, _]) => imageName);
|
|
||||||
setSelectedImages(new Set(previousSelections));
|
|
||||||
} else {
|
|
||||||
onBack();
|
|
||||||
}
|
|
||||||
}, [currentClassIndex, allClasses, imageClassifications, onBack]);
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex flex-col gap-6">
|
<div className="flex flex-col gap-6">
|
||||||
{isTraining ? (
|
{isTraining ? (
|
||||||
@ -435,7 +420,7 @@ export default function Step3ChooseExamples({
|
|||||||
|
|
||||||
{!isTraining && (
|
{!isTraining && (
|
||||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||||
<Button type="button" onClick={handleBack} className="sm:flex-1">
|
<Button type="button" onClick={onBack} className="sm:flex-1">
|
||||||
{t("button.back", { ns: "common" })}
|
{t("button.back", { ns: "common" })}
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button
|
||||||
|
|||||||
@ -348,26 +348,6 @@ export function GeneralFilterContent({
|
|||||||
onClose,
|
onClose,
|
||||||
}: GeneralFilterContentProps) {
|
}: GeneralFilterContentProps) {
|
||||||
const { t } = useTranslation(["components/filter"]);
|
const { t } = useTranslation(["components/filter"]);
|
||||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
|
||||||
revalidateOnFocus: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
const allAudioListenLabels = useMemo<string[]>(() => {
|
|
||||||
if (!config) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const labels = new Set<string>();
|
|
||||||
Object.values(config.cameras).forEach((camera) => {
|
|
||||||
if (camera?.audio?.enabled) {
|
|
||||||
camera.audio.listen.forEach((label) => {
|
|
||||||
labels.add(label);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return [...labels].sort();
|
|
||||||
}, [config]);
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<div className="overflow-x-hidden">
|
<div className="overflow-x-hidden">
|
||||||
@ -393,10 +373,7 @@ export function GeneralFilterContent({
|
|||||||
{allLabels.map((item) => (
|
{allLabels.map((item) => (
|
||||||
<FilterSwitch
|
<FilterSwitch
|
||||||
key={item}
|
key={item}
|
||||||
label={getTranslatedLabel(
|
label={getTranslatedLabel(item)}
|
||||||
item,
|
|
||||||
allAudioListenLabels.includes(item) ? "audio" : "object",
|
|
||||||
)}
|
|
||||||
isChecked={currentLabels?.includes(item) ?? false}
|
isChecked={currentLabels?.includes(item) ?? false}
|
||||||
onCheckedChange={(isChecked) => {
|
onCheckedChange={(isChecked) => {
|
||||||
if (isChecked) {
|
if (isChecked) {
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import {
|
|||||||
FormMessage,
|
FormMessage,
|
||||||
} from "@/components/ui/form";
|
} from "@/components/ui/form";
|
||||||
import { Input } from "@/components/ui/input";
|
import { Input } from "@/components/ui/input";
|
||||||
import { useState, useEffect, useRef } from "react";
|
import { useState, useEffect } from "react";
|
||||||
import { useFormContext } from "react-hook-form";
|
import { useFormContext } from "react-hook-form";
|
||||||
import { generateFixedHash, isValidId } from "@/utils/stringUtil";
|
import { generateFixedHash, isValidId } from "@/utils/stringUtil";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
@ -25,7 +25,6 @@ type NameAndIdFieldsProps<T extends FieldValues = FieldValues> = {
|
|||||||
processId?: (name: string) => string;
|
processId?: (name: string) => string;
|
||||||
placeholderName?: string;
|
placeholderName?: string;
|
||||||
placeholderId?: string;
|
placeholderId?: string;
|
||||||
idVisible?: boolean;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
||||||
@ -40,12 +39,10 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
|||||||
processId,
|
processId,
|
||||||
placeholderName,
|
placeholderName,
|
||||||
placeholderId,
|
placeholderId,
|
||||||
idVisible,
|
|
||||||
}: NameAndIdFieldsProps<T>) {
|
}: NameAndIdFieldsProps<T>) {
|
||||||
const { t } = useTranslation(["common"]);
|
const { t } = useTranslation(["common"]);
|
||||||
const { watch, setValue, trigger, formState } = useFormContext<T>();
|
const { watch, setValue, trigger } = useFormContext<T>();
|
||||||
const [isIdVisible, setIsIdVisible] = useState(idVisible ?? false);
|
const [isIdVisible, setIsIdVisible] = useState(false);
|
||||||
const hasUserTypedRef = useRef(false);
|
|
||||||
|
|
||||||
const defaultProcessId = (name: string) => {
|
const defaultProcessId = (name: string) => {
|
||||||
const normalized = name.replace(/\s+/g, "_").toLowerCase();
|
const normalized = name.replace(/\s+/g, "_").toLowerCase();
|
||||||
@ -61,7 +58,6 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
|||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const subscription = watch((value, { name }) => {
|
const subscription = watch((value, { name }) => {
|
||||||
if (name === nameField) {
|
if (name === nameField) {
|
||||||
hasUserTypedRef.current = true;
|
|
||||||
const processedId = effectiveProcessId(value[nameField] || "");
|
const processedId = effectiveProcessId(value[nameField] || "");
|
||||||
setValue(idField, processedId as PathValue<T, Path<T>>);
|
setValue(idField, processedId as PathValue<T, Path<T>>);
|
||||||
trigger(idField);
|
trigger(idField);
|
||||||
@ -70,14 +66,6 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
|
|||||||
return () => subscription.unsubscribe();
|
return () => subscription.unsubscribe();
|
||||||
}, [watch, setValue, trigger, nameField, idField, effectiveProcessId]);
|
}, [watch, setValue, trigger, nameField, idField, effectiveProcessId]);
|
||||||
|
|
||||||
// Auto-expand if there's an error on the ID field after user has typed
|
|
||||||
useEffect(() => {
|
|
||||||
const idError = formState.errors[idField];
|
|
||||||
if (idError && hasUserTypedRef.current && !isIdVisible) {
|
|
||||||
setIsIdVisible(true);
|
|
||||||
}
|
|
||||||
}, [formState.errors, idField, isIdVisible]);
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<FormField
|
<FormField
|
||||||
|
|||||||
@ -258,7 +258,6 @@ export default function CreateTriggerDialog({
|
|||||||
nameLabel={t("triggers.dialog.form.name.title")}
|
nameLabel={t("triggers.dialog.form.name.title")}
|
||||||
nameDescription={t("triggers.dialog.form.name.description")}
|
nameDescription={t("triggers.dialog.form.name.description")}
|
||||||
placeholderName={t("triggers.dialog.form.name.placeholder")}
|
placeholderName={t("triggers.dialog.form.name.placeholder")}
|
||||||
idVisible={!!trigger}
|
|
||||||
/>
|
/>
|
||||||
|
|
||||||
<FormField
|
<FormField
|
||||||
|
|||||||
@ -59,47 +59,6 @@ export default function ObjectTrackOverlay({
|
|||||||
|
|
||||||
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
||||||
|
|
||||||
const {
|
|
||||||
pathStroke,
|
|
||||||
pointRadius,
|
|
||||||
pointStroke,
|
|
||||||
zoneStroke,
|
|
||||||
boxStroke,
|
|
||||||
highlightRadius,
|
|
||||||
} = useMemo(() => {
|
|
||||||
const BASE_WIDTH = 1280;
|
|
||||||
const BASE_HEIGHT = 720;
|
|
||||||
const BASE_PATH_STROKE = 5;
|
|
||||||
const BASE_POINT_RADIUS = 7;
|
|
||||||
const BASE_POINT_STROKE = 3;
|
|
||||||
const BASE_ZONE_STROKE = 5;
|
|
||||||
const BASE_BOX_STROKE = 5;
|
|
||||||
const BASE_HIGHLIGHT_RADIUS = 5;
|
|
||||||
|
|
||||||
const scale = Math.sqrt(
|
|
||||||
(videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT),
|
|
||||||
);
|
|
||||||
|
|
||||||
const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale));
|
|
||||||
const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale));
|
|
||||||
const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale));
|
|
||||||
const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale));
|
|
||||||
const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale));
|
|
||||||
const highlightRadius = Math.max(
|
|
||||||
2,
|
|
||||||
Math.round(BASE_HIGHLIGHT_RADIUS * scale),
|
|
||||||
);
|
|
||||||
|
|
||||||
return {
|
|
||||||
pathStroke,
|
|
||||||
pointRadius,
|
|
||||||
pointStroke,
|
|
||||||
zoneStroke,
|
|
||||||
boxStroke,
|
|
||||||
highlightRadius,
|
|
||||||
};
|
|
||||||
}, [videoWidth, videoHeight]);
|
|
||||||
|
|
||||||
// Fetch all event data in a single request (CSV ids)
|
// Fetch all event data in a single request (CSV ids)
|
||||||
const { data: eventsData } = useSWR<Event[]>(
|
const { data: eventsData } = useSWR<Event[]>(
|
||||||
selectedObjectIds.length > 0
|
selectedObjectIds.length > 0
|
||||||
@ -255,21 +214,16 @@ export default function ObjectTrackOverlay({
|
|||||||
b.timestamp - a.timestamp,
|
b.timestamp - a.timestamp,
|
||||||
)[0]?.data?.zones || [];
|
)[0]?.data?.zones || [];
|
||||||
|
|
||||||
// bounding box - only show if there's a timeline event at/near the current time with a box
|
// bounding box (with tolerance for browsers with seek precision by-design issues)
|
||||||
// Search all timeline events (not just those before current time) to find one matching the seek position
|
const boxCandidates = timelineData?.filter(
|
||||||
const nearbyTimelineEvent = timelineData
|
(event: TrackingDetailsSequence) =>
|
||||||
?.filter((event: TrackingDetailsSequence) => event.data.box)
|
event.timestamp <= effectiveCurrentTime + TOLERANCE &&
|
||||||
.sort(
|
event.data.box,
|
||||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
);
|
||||||
Math.abs(a.timestamp - effectiveCurrentTime) -
|
const currentBox = boxCandidates?.sort(
|
||||||
Math.abs(b.timestamp - effectiveCurrentTime),
|
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||||
)
|
b.timestamp - a.timestamp,
|
||||||
.find(
|
)[0]?.data?.box;
|
||||||
(event: TrackingDetailsSequence) =>
|
|
||||||
Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE,
|
|
||||||
);
|
|
||||||
|
|
||||||
const currentBox = nearbyTimelineEvent?.data?.box;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
objectId,
|
objectId,
|
||||||
@ -395,7 +349,7 @@ export default function ObjectTrackOverlay({
|
|||||||
points={zone.points}
|
points={zone.points}
|
||||||
fill={zone.fill}
|
fill={zone.fill}
|
||||||
stroke={zone.stroke}
|
stroke={zone.stroke}
|
||||||
strokeWidth={zoneStroke}
|
strokeWidth="5"
|
||||||
opacity="0.7"
|
opacity="0.7"
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
@ -415,7 +369,7 @@ export default function ObjectTrackOverlay({
|
|||||||
d={generateStraightPath(absolutePositions)}
|
d={generateStraightPath(absolutePositions)}
|
||||||
fill="none"
|
fill="none"
|
||||||
stroke={objData.color}
|
stroke={objData.color}
|
||||||
strokeWidth={pathStroke}
|
strokeWidth="5"
|
||||||
strokeLinecap="round"
|
strokeLinecap="round"
|
||||||
strokeLinejoin="round"
|
strokeLinejoin="round"
|
||||||
/>
|
/>
|
||||||
@ -427,13 +381,13 @@ export default function ObjectTrackOverlay({
|
|||||||
<circle
|
<circle
|
||||||
cx={pos.x}
|
cx={pos.x}
|
||||||
cy={pos.y}
|
cy={pos.y}
|
||||||
r={pointRadius}
|
r="7"
|
||||||
fill={getPointColor(
|
fill={getPointColor(
|
||||||
objData.color,
|
objData.color,
|
||||||
pos.lifecycle_item?.class_type,
|
pos.lifecycle_item?.class_type,
|
||||||
)}
|
)}
|
||||||
stroke="white"
|
stroke="white"
|
||||||
strokeWidth={pointStroke}
|
strokeWidth="3"
|
||||||
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
||||||
onClick={() => handlePointClick(pos.timestamp)}
|
onClick={() => handlePointClick(pos.timestamp)}
|
||||||
/>
|
/>
|
||||||
@ -462,7 +416,7 @@ export default function ObjectTrackOverlay({
|
|||||||
height={objData.currentBox[3] * videoHeight}
|
height={objData.currentBox[3] * videoHeight}
|
||||||
fill="none"
|
fill="none"
|
||||||
stroke={objData.color}
|
stroke={objData.color}
|
||||||
strokeWidth={boxStroke}
|
strokeWidth="5"
|
||||||
opacity="0.9"
|
opacity="0.9"
|
||||||
/>
|
/>
|
||||||
<circle
|
<circle
|
||||||
@ -474,10 +428,10 @@ export default function ObjectTrackOverlay({
|
|||||||
(objData.currentBox[1] + objData.currentBox[3]) *
|
(objData.currentBox[1] + objData.currentBox[3]) *
|
||||||
videoHeight
|
videoHeight
|
||||||
}
|
}
|
||||||
r={highlightRadius}
|
r="5"
|
||||||
fill="rgb(255, 255, 0)" // yellow highlight
|
fill="rgb(255, 255, 0)" // yellow highlight
|
||||||
stroke={objData.color}
|
stroke={objData.color}
|
||||||
strokeWidth={boxStroke}
|
strokeWidth="5"
|
||||||
opacity="1"
|
opacity="1"
|
||||||
/>
|
/>
|
||||||
</g>
|
</g>
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import Heading from "@/components/ui/heading";
|
|||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
||||||
import { getIconForLabel } from "@/utils/iconUtil";
|
import { getIconForLabel } from "@/utils/iconUtil";
|
||||||
import { LuCircle, LuFolderX, LuSettings } from "react-icons/lu";
|
import { LuCircle, LuSettings } from "react-icons/lu";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import {
|
import {
|
||||||
Tooltip,
|
Tooltip,
|
||||||
@ -32,18 +32,14 @@ import { Link, useNavigate } from "react-router-dom";
|
|||||||
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
|
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { getTranslatedLabel } from "@/utils/i18n";
|
import { getTranslatedLabel } from "@/utils/i18n";
|
||||||
import { resolveZoneName } from "@/hooks/use-zone-friendly-name";
|
|
||||||
import { Badge } from "@/components/ui/badge";
|
import { Badge } from "@/components/ui/badge";
|
||||||
import { HiDotsHorizontal } from "react-icons/hi";
|
import { HiDotsHorizontal } from "react-icons/hi";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import { useDetailStream } from "@/context/detail-stream-context";
|
import { useDetailStream } from "@/context/detail-stream-context";
|
||||||
import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect";
|
import { isDesktop, isIOS } from "react-device-detect";
|
||||||
import Chip from "@/components/indicators/Chip";
|
import Chip from "@/components/indicators/Chip";
|
||||||
import { FaDownload, FaHistory } from "react-icons/fa";
|
import { FaDownload, FaHistory } from "react-icons/fa";
|
||||||
import { useApiHost } from "@/api";
|
|
||||||
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
|
|
||||||
import ObjectTrackOverlay from "../ObjectTrackOverlay";
|
|
||||||
|
|
||||||
type TrackingDetailsProps = {
|
type TrackingDetailsProps = {
|
||||||
className?: string;
|
className?: string;
|
||||||
@ -60,19 +56,9 @@ export function TrackingDetails({
|
|||||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||||
const { t } = useTranslation(["views/explore"]);
|
const { t } = useTranslation(["views/explore"]);
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
const apiHost = useApiHost();
|
|
||||||
const imgRef = useRef<HTMLImageElement | null>(null);
|
|
||||||
const [imgLoaded, setImgLoaded] = useState(false);
|
|
||||||
const [displaySource, _setDisplaySource] = useState<"video" | "image">(
|
|
||||||
"video",
|
|
||||||
);
|
|
||||||
const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } =
|
const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } =
|
||||||
useDetailStream();
|
useDetailStream();
|
||||||
|
|
||||||
// manualOverride holds a record-stream timestamp explicitly chosen by the
|
|
||||||
// user (eg, clicking a lifecycle row). When null we display `currentTime`.
|
|
||||||
const [manualOverride, setManualOverride] = useState<number | null>(null);
|
|
||||||
|
|
||||||
// event.start_time is detect time, convert to record, then subtract padding
|
// event.start_time is detect time, convert to record, then subtract padding
|
||||||
const [currentTime, setCurrentTime] = useState(
|
const [currentTime, setCurrentTime] = useState(
|
||||||
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
|
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
|
||||||
@ -87,19 +73,9 @@ export function TrackingDetails({
|
|||||||
|
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
eventSequence?.map((event) => {
|
|
||||||
event.data.zones_friendly_names = event.data?.zones?.map((zone) => {
|
|
||||||
return resolveZoneName(config, zone);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
// Use manualOverride (set when seeking in image mode) if present so
|
|
||||||
// lifecycle rows and overlays follow image-mode seeks. Otherwise fall
|
|
||||||
// back to currentTime used for video mode.
|
|
||||||
const effectiveTime = useMemo(() => {
|
const effectiveTime = useMemo(() => {
|
||||||
const displayedRecordTime = manualOverride ?? currentTime;
|
return currentTime - annotationOffset / 1000;
|
||||||
return displayedRecordTime - annotationOffset / 1000;
|
}, [currentTime, annotationOffset]);
|
||||||
}, [manualOverride, currentTime, annotationOffset]);
|
|
||||||
|
|
||||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||||
const [_selectedZone, setSelectedZone] = useState("");
|
const [_selectedZone, setSelectedZone] = useState("");
|
||||||
@ -142,30 +118,20 @@ export function TrackingDetails({
|
|||||||
|
|
||||||
const handleLifecycleClick = useCallback(
|
const handleLifecycleClick = useCallback(
|
||||||
(item: TrackingDetailsSequence) => {
|
(item: TrackingDetailsSequence) => {
|
||||||
if (!videoRef.current && !imgRef.current) return;
|
if (!videoRef.current) return;
|
||||||
|
|
||||||
// Convert lifecycle timestamp (detect stream) to record stream time
|
// Convert lifecycle timestamp (detect stream) to record stream time
|
||||||
const targetTimeRecord = item.timestamp + annotationOffset / 1000;
|
const targetTimeRecord = item.timestamp + annotationOffset / 1000;
|
||||||
|
|
||||||
if (displaySource === "image") {
|
// Convert to video-relative time for seeking
|
||||||
// For image mode: set a manual override timestamp and update
|
|
||||||
// currentTime so overlays render correctly.
|
|
||||||
setManualOverride(targetTimeRecord);
|
|
||||||
setCurrentTime(targetTimeRecord);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// For video mode: convert to video-relative time and seek player
|
|
||||||
const eventStartRecord =
|
const eventStartRecord =
|
||||||
(event.start_time ?? 0) + annotationOffset / 1000;
|
(event.start_time ?? 0) + annotationOffset / 1000;
|
||||||
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
||||||
const relativeTime = targetTimeRecord - videoStartTime;
|
const relativeTime = targetTimeRecord - videoStartTime;
|
||||||
|
|
||||||
if (videoRef.current) {
|
videoRef.current.currentTime = relativeTime;
|
||||||
videoRef.current.currentTime = relativeTime;
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
[event.start_time, annotationOffset, displaySource],
|
[event.start_time, annotationOffset],
|
||||||
);
|
);
|
||||||
|
|
||||||
const formattedStart = config
|
const formattedStart = config
|
||||||
@ -206,20 +172,11 @@ export function TrackingDetails({
|
|||||||
}, [eventSequence]);
|
}, [eventSequence]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (seekToTimestamp === null) return;
|
if (seekToTimestamp === null || !videoRef.current) return;
|
||||||
|
|
||||||
if (displaySource === "image") {
|
|
||||||
// For image mode, set the manual override so the snapshot updates to
|
|
||||||
// the exact record timestamp.
|
|
||||||
setManualOverride(seekToTimestamp);
|
|
||||||
setSeekToTimestamp(null);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// seekToTimestamp is a record stream timestamp
|
// seekToTimestamp is a record stream timestamp
|
||||||
// event.start_time is detect stream time, convert to record
|
// event.start_time is detect stream time, convert to record
|
||||||
// The video clip starts at (eventStartRecord - REVIEW_PADDING)
|
// The video clip starts at (eventStartRecord - REVIEW_PADDING)
|
||||||
if (!videoRef.current) return;
|
|
||||||
const eventStartRecord = event.start_time + annotationOffset / 1000;
|
const eventStartRecord = event.start_time + annotationOffset / 1000;
|
||||||
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
const videoStartTime = eventStartRecord - REVIEW_PADDING;
|
||||||
const relativeTime = seekToTimestamp - videoStartTime;
|
const relativeTime = seekToTimestamp - videoStartTime;
|
||||||
@ -227,14 +184,7 @@ export function TrackingDetails({
|
|||||||
videoRef.current.currentTime = relativeTime;
|
videoRef.current.currentTime = relativeTime;
|
||||||
}
|
}
|
||||||
setSeekToTimestamp(null);
|
setSeekToTimestamp(null);
|
||||||
}, [
|
}, [seekToTimestamp, event.start_time, annotationOffset]);
|
||||||
seekToTimestamp,
|
|
||||||
event.start_time,
|
|
||||||
annotationOffset,
|
|
||||||
apiHost,
|
|
||||||
event.camera,
|
|
||||||
displaySource,
|
|
||||||
]);
|
|
||||||
|
|
||||||
const isWithinEventRange =
|
const isWithinEventRange =
|
||||||
effectiveTime !== undefined &&
|
effectiveTime !== undefined &&
|
||||||
@ -337,27 +287,6 @@ export function TrackingDetails({
|
|||||||
[event.start_time, annotationOffset],
|
[event.start_time, annotationOffset],
|
||||||
);
|
);
|
||||||
|
|
||||||
const [src, setSrc] = useState(
|
|
||||||
`${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
|
|
||||||
);
|
|
||||||
const [hasError, setHasError] = useState(false);
|
|
||||||
|
|
||||||
// Derive the record timestamp to display: manualOverride if present,
|
|
||||||
// otherwise use currentTime.
|
|
||||||
const displayedRecordTime = manualOverride ?? currentTime;
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (displayedRecordTime) {
|
|
||||||
const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
|
|
||||||
setSrc(newSrc);
|
|
||||||
}
|
|
||||||
setImgLoaded(false);
|
|
||||||
setHasError(false);
|
|
||||||
|
|
||||||
// we know that these deps are correct
|
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
|
||||||
}, [displayedRecordTime]);
|
|
||||||
|
|
||||||
if (!config) {
|
if (!config) {
|
||||||
return <ActivityIndicator />;
|
return <ActivityIndicator />;
|
||||||
}
|
}
|
||||||
@ -375,10 +304,9 @@ export function TrackingDetails({
|
|||||||
|
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex items-center justify-center",
|
"flex w-full items-center justify-center",
|
||||||
isDesktop && "overflow-hidden",
|
isDesktop && "overflow-hidden",
|
||||||
cameraAspect === "tall" ? "max-h-[50dvh] lg:max-h-[70dvh]" : "w-full",
|
cameraAspect === "tall" ? "max-h-[50dvh] lg:max-h-[70dvh]" : "w-full",
|
||||||
cameraAspect === "tall" && isMobileOnly && "w-full",
|
|
||||||
cameraAspect !== "tall" && isDesktop && "flex-[3]",
|
cameraAspect !== "tall" && isDesktop && "flex-[3]",
|
||||||
)}
|
)}
|
||||||
style={{ aspectRatio: aspectRatio }}
|
style={{ aspectRatio: aspectRatio }}
|
||||||
@ -390,75 +318,21 @@ export function TrackingDetails({
|
|||||||
cameraAspect === "tall" ? "h-full" : "w-full",
|
cameraAspect === "tall" ? "h-full" : "w-full",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{displaySource == "video" && (
|
<HlsVideoPlayer
|
||||||
<HlsVideoPlayer
|
videoRef={videoRef}
|
||||||
videoRef={videoRef}
|
containerRef={containerRef}
|
||||||
containerRef={containerRef}
|
visible={true}
|
||||||
visible={true}
|
currentSource={videoSource}
|
||||||
currentSource={videoSource}
|
hotKeys={false}
|
||||||
hotKeys={false}
|
supportsFullscreen={false}
|
||||||
supportsFullscreen={false}
|
fullscreen={false}
|
||||||
fullscreen={false}
|
frigateControls={true}
|
||||||
frigateControls={true}
|
onTimeUpdate={handleTimeUpdate}
|
||||||
onTimeUpdate={handleTimeUpdate}
|
onSeekToTime={handleSeekToTime}
|
||||||
onSeekToTime={handleSeekToTime}
|
isDetailMode={true}
|
||||||
isDetailMode={true}
|
camera={event.camera}
|
||||||
camera={event.camera}
|
currentTimeOverride={currentTime}
|
||||||
currentTimeOverride={currentTime}
|
/>
|
||||||
/>
|
|
||||||
)}
|
|
||||||
{displaySource == "image" && (
|
|
||||||
<>
|
|
||||||
<ImageLoadingIndicator
|
|
||||||
className="absolute inset-0"
|
|
||||||
imgLoaded={imgLoaded}
|
|
||||||
/>
|
|
||||||
{hasError && (
|
|
||||||
<div className="relative aspect-video">
|
|
||||||
<div className="flex flex-col items-center justify-center p-20 text-center">
|
|
||||||
<LuFolderX className="size-16" />
|
|
||||||
{t("objectLifecycle.noImageFound")}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
<div
|
|
||||||
className={cn("relative", imgLoaded ? "visible" : "invisible")}
|
|
||||||
>
|
|
||||||
<div className="absolute z-50 size-full">
|
|
||||||
<ObjectTrackOverlay
|
|
||||||
key={`overlay-${displayedRecordTime}`}
|
|
||||||
camera={event.camera}
|
|
||||||
showBoundingBoxes={true}
|
|
||||||
currentTime={displayedRecordTime}
|
|
||||||
videoWidth={imgRef?.current?.naturalWidth ?? 0}
|
|
||||||
videoHeight={imgRef?.current?.naturalHeight ?? 0}
|
|
||||||
className="absolute inset-0 z-10"
|
|
||||||
onSeekToTime={handleSeekToTime}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<img
|
|
||||||
key={event.id}
|
|
||||||
ref={imgRef}
|
|
||||||
className={cn(
|
|
||||||
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
|
|
||||||
)}
|
|
||||||
loading={isSafari ? "eager" : "lazy"}
|
|
||||||
style={
|
|
||||||
isIOS
|
|
||||||
? {
|
|
||||||
WebkitUserSelect: "none",
|
|
||||||
WebkitTouchCallout: "none",
|
|
||||||
}
|
|
||||||
: undefined
|
|
||||||
}
|
|
||||||
draggable={false}
|
|
||||||
src={src}
|
|
||||||
onLoad={() => setImgLoaded(true)}
|
|
||||||
onError={() => setHasError(true)}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"absolute top-2 z-[5] flex items-center gap-2",
|
"absolute top-2 z-[5] flex items-center gap-2",
|
||||||
@ -829,8 +703,7 @@ function LifecycleIconRow({
|
|||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
<span className="smart-capitalize">
|
<span className="smart-capitalize">
|
||||||
{item.data?.zones_friendly_names?.[zidx] ??
|
{zone.replaceAll("_", " ")}
|
||||||
zone.replaceAll("_", " ")}
|
|
||||||
</span>
|
</span>
|
||||||
</Badge>
|
</Badge>
|
||||||
);
|
);
|
||||||
|
|||||||
@ -289,7 +289,6 @@ export default function VideoControls({
|
|||||||
}}
|
}}
|
||||||
onUploadFrame={onUploadFrame}
|
onUploadFrame={onUploadFrame}
|
||||||
containerRef={containerRef}
|
containerRef={containerRef}
|
||||||
fullscreen={fullscreen}
|
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{features.fullscreen && toggleFullscreen && (
|
{features.fullscreen && toggleFullscreen && (
|
||||||
@ -307,7 +306,6 @@ type FrigatePlusUploadButtonProps = {
|
|||||||
onClose: () => void;
|
onClose: () => void;
|
||||||
onUploadFrame: () => void;
|
onUploadFrame: () => void;
|
||||||
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
|
||||||
fullscreen?: boolean;
|
|
||||||
};
|
};
|
||||||
function FrigatePlusUploadButton({
|
function FrigatePlusUploadButton({
|
||||||
video,
|
video,
|
||||||
@ -315,7 +313,6 @@ function FrigatePlusUploadButton({
|
|||||||
onClose,
|
onClose,
|
||||||
onUploadFrame,
|
onUploadFrame,
|
||||||
containerRef,
|
containerRef,
|
||||||
fullscreen,
|
|
||||||
}: FrigatePlusUploadButtonProps) {
|
}: FrigatePlusUploadButtonProps) {
|
||||||
const { t } = useTranslation(["components/player"]);
|
const { t } = useTranslation(["components/player"]);
|
||||||
|
|
||||||
@ -352,11 +349,7 @@ function FrigatePlusUploadButton({
|
|||||||
/>
|
/>
|
||||||
</AlertDialogTrigger>
|
</AlertDialogTrigger>
|
||||||
<AlertDialogContent
|
<AlertDialogContent
|
||||||
portalProps={
|
portalProps={{ container: containerRef?.current }}
|
||||||
fullscreen && containerRef?.current
|
|
||||||
? { container: containerRef.current }
|
|
||||||
: undefined
|
|
||||||
}
|
|
||||||
className="md:max-w-2xl lg:max-w-3xl xl:max-w-4xl"
|
className="md:max-w-2xl lg:max-w-3xl xl:max-w-4xl"
|
||||||
>
|
>
|
||||||
<AlertDialogHeader>
|
<AlertDialogHeader>
|
||||||
|
|||||||
@ -174,7 +174,9 @@ export default function CameraWizardDialog({
|
|||||||
...(friendlyName && { friendly_name: friendlyName }),
|
...(friendlyName && { friendly_name: friendlyName }),
|
||||||
ffmpeg: {
|
ffmpeg: {
|
||||||
inputs: wizardData.streams.map((stream, index) => {
|
inputs: wizardData.streams.map((stream, index) => {
|
||||||
if (stream.restream) {
|
const isRestreamed =
|
||||||
|
wizardData.restreamIds?.includes(stream.id) ?? false;
|
||||||
|
if (isRestreamed) {
|
||||||
const go2rtcStreamName =
|
const go2rtcStreamName =
|
||||||
wizardData.streams!.length === 1
|
wizardData.streams!.length === 1
|
||||||
? finalCameraName
|
? finalCameraName
|
||||||
@ -232,11 +234,7 @@ export default function CameraWizardDialog({
|
|||||||
wizardData.streams!.length === 1
|
wizardData.streams!.length === 1
|
||||||
? finalCameraName
|
? finalCameraName
|
||||||
: `${finalCameraName}_${index + 1}`;
|
: `${finalCameraName}_${index + 1}`;
|
||||||
|
go2rtcStreams[streamName] = [stream.url];
|
||||||
const streamUrl = stream.useFfmpeg
|
|
||||||
? `ffmpeg:${stream.url}`
|
|
||||||
: stream.url;
|
|
||||||
go2rtcStreams[streamName] = [streamUrl];
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if (Object.keys(go2rtcStreams).length > 0) {
|
if (Object.keys(go2rtcStreams).length > 0) {
|
||||||
|
|||||||
@ -262,7 +262,7 @@ export function PolygonCanvas({
|
|||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (activePolygonIndex === undefined || !polygons?.length) {
|
if (activePolygonIndex === undefined || !polygons) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,7 +270,7 @@ export function PolygonCanvas({
|
|||||||
const activePolygon = updatedPolygons[activePolygonIndex];
|
const activePolygon = updatedPolygons[activePolygonIndex];
|
||||||
|
|
||||||
// add default points order for already completed polygons
|
// add default points order for already completed polygons
|
||||||
if (!activePolygon.pointsOrder && activePolygon.isFinished) {
|
if (!activePolygon?.pointsOrder && activePolygon?.isFinished) {
|
||||||
updatedPolygons[activePolygonIndex] = {
|
updatedPolygons[activePolygonIndex] = {
|
||||||
...activePolygon,
|
...activePolygon,
|
||||||
pointsOrder: activePolygon.points.map((_, index) => index),
|
pointsOrder: activePolygon.points.map((_, index) => index),
|
||||||
|
|||||||
@ -179,7 +179,7 @@ export default function PolygonItem({
|
|||||||
if (res.status === 200) {
|
if (res.status === 200) {
|
||||||
toast.success(
|
toast.success(
|
||||||
t("masksAndZones.form.polygonDrawing.delete.success", {
|
t("masksAndZones.form.polygonDrawing.delete.success", {
|
||||||
name: polygon?.friendly_name ?? polygon?.name,
|
name: polygon?.name,
|
||||||
}),
|
}),
|
||||||
{
|
{
|
||||||
position: "top-center",
|
position: "top-center",
|
||||||
|
|||||||
@ -385,7 +385,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="text-md h-8"
|
className="h-8"
|
||||||
placeholder={t(
|
placeholder={t(
|
||||||
"cameraWizard.step1.cameraNamePlaceholder",
|
"cameraWizard.step1.cameraNamePlaceholder",
|
||||||
)}
|
)}
|
||||||
@ -475,7 +475,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="text-md h-8"
|
className="h-8"
|
||||||
placeholder="192.168.1.100"
|
placeholder="192.168.1.100"
|
||||||
{...field}
|
{...field}
|
||||||
/>
|
/>
|
||||||
@ -495,7 +495,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="text-md h-8"
|
className="h-8"
|
||||||
placeholder={t(
|
placeholder={t(
|
||||||
"cameraWizard.step1.usernamePlaceholder",
|
"cameraWizard.step1.usernamePlaceholder",
|
||||||
)}
|
)}
|
||||||
@ -518,7 +518,7 @@ export default function Step1NameCamera({
|
|||||||
<FormControl>
|
<FormControl>
|
||||||
<div className="relative">
|
<div className="relative">
|
||||||
<Input
|
<Input
|
||||||
className="text-md h-8 pr-10"
|
className="h-8 pr-10"
|
||||||
type={showPassword ? "text" : "password"}
|
type={showPassword ? "text" : "password"}
|
||||||
placeholder={t(
|
placeholder={t(
|
||||||
"cameraWizard.step1.passwordPlaceholder",
|
"cameraWizard.step1.passwordPlaceholder",
|
||||||
@ -558,7 +558,7 @@ export default function Step1NameCamera({
|
|||||||
</FormLabel>
|
</FormLabel>
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Input
|
<Input
|
||||||
className="text-md h-8"
|
className="h-8"
|
||||||
placeholder="rtsp://username:password@host:port/path"
|
placeholder="rtsp://username:password@host:port/path"
|
||||||
{...field}
|
{...field}
|
||||||
/>
|
/>
|
||||||
@ -608,12 +608,6 @@ export default function Step1NameCamera({
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{isTesting && (
|
|
||||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
|
||||||
<ActivityIndicator className="size-4" />
|
|
||||||
{testStatus}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||||
<Button
|
<Button
|
||||||
type="button"
|
type="button"
|
||||||
@ -641,7 +635,10 @@ export default function Step1NameCamera({
|
|||||||
variant="select"
|
variant="select"
|
||||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||||
>
|
>
|
||||||
{t("cameraWizard.step1.testConnection")}
|
{isTesting && <ActivityIndicator className="size-4" />}
|
||||||
|
{isTesting && testStatus
|
||||||
|
? testStatus
|
||||||
|
: t("cameraWizard.step1.testConnection")}
|
||||||
</Button>
|
</Button>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -201,12 +201,16 @@ export default function Step2StreamConfig({
|
|||||||
|
|
||||||
const setRestream = useCallback(
|
const setRestream = useCallback(
|
||||||
(streamId: string) => {
|
(streamId: string) => {
|
||||||
const stream = streams.find((s) => s.id === streamId);
|
const currentIds = wizardData.restreamIds || [];
|
||||||
if (!stream) return;
|
const isSelected = currentIds.includes(streamId);
|
||||||
|
const newIds = isSelected
|
||||||
updateStream(streamId, { restream: !stream.restream });
|
? currentIds.filter((id) => id !== streamId)
|
||||||
|
: [...currentIds, streamId];
|
||||||
|
onUpdate({
|
||||||
|
restreamIds: newIds,
|
||||||
|
});
|
||||||
},
|
},
|
||||||
[streams, updateStream],
|
[wizardData.restreamIds, onUpdate],
|
||||||
);
|
);
|
||||||
|
|
||||||
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
|
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
|
||||||
@ -431,7 +435,9 @@ export default function Step2StreamConfig({
|
|||||||
{t("cameraWizard.step2.go2rtc")}
|
{t("cameraWizard.step2.go2rtc")}
|
||||||
</span>
|
</span>
|
||||||
<Switch
|
<Switch
|
||||||
checked={stream.restream || false}
|
checked={(wizardData.restreamIds || []).includes(
|
||||||
|
stream.id,
|
||||||
|
)}
|
||||||
onCheckedChange={() => setRestream(stream.id)}
|
onCheckedChange={() => setRestream(stream.id)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -1,13 +1,7 @@
|
|||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { Badge } from "@/components/ui/badge";
|
import { Badge } from "@/components/ui/badge";
|
||||||
import { Switch } from "@/components/ui/switch";
|
|
||||||
import {
|
|
||||||
Popover,
|
|
||||||
PopoverContent,
|
|
||||||
PopoverTrigger,
|
|
||||||
} from "@/components/ui/popover";
|
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { LuRotateCcw, LuInfo } from "react-icons/lu";
|
import { LuRotateCcw } from "react-icons/lu";
|
||||||
import { useState, useCallback, useMemo, useEffect } from "react";
|
import { useState, useCallback, useMemo, useEffect } from "react";
|
||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
@ -222,6 +216,7 @@ export default function Step3Validation({
|
|||||||
brandTemplate: wizardData.brandTemplate,
|
brandTemplate: wizardData.brandTemplate,
|
||||||
customUrl: wizardData.customUrl,
|
customUrl: wizardData.customUrl,
|
||||||
streams: wizardData.streams,
|
streams: wizardData.streams,
|
||||||
|
restreamIds: wizardData.restreamIds,
|
||||||
};
|
};
|
||||||
|
|
||||||
onSave(configData);
|
onSave(configData);
|
||||||
@ -327,51 +322,6 @@ export default function Step3Validation({
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{result?.success && (
|
|
||||||
<div className="mb-3 flex items-center justify-between">
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<span className="text-sm">
|
|
||||||
{t("cameraWizard.step3.ffmpegModule")}
|
|
||||||
</span>
|
|
||||||
<Popover>
|
|
||||||
<PopoverTrigger asChild>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="sm"
|
|
||||||
className="h-4 w-4 p-0"
|
|
||||||
>
|
|
||||||
<LuInfo className="size-3" />
|
|
||||||
</Button>
|
|
||||||
</PopoverTrigger>
|
|
||||||
<PopoverContent className="pointer-events-auto w-80 text-xs">
|
|
||||||
<div className="space-y-2">
|
|
||||||
<div className="font-medium">
|
|
||||||
{t("cameraWizard.step3.ffmpegModule")}
|
|
||||||
</div>
|
|
||||||
<div className="text-muted-foreground">
|
|
||||||
{t(
|
|
||||||
"cameraWizard.step3.ffmpegModuleDescription",
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</PopoverContent>
|
|
||||||
</Popover>
|
|
||||||
</div>
|
|
||||||
<Switch
|
|
||||||
checked={stream.useFfmpeg || false}
|
|
||||||
onCheckedChange={(checked) => {
|
|
||||||
onUpdate({
|
|
||||||
streams: streams.map((s) =>
|
|
||||||
s.id === stream.id
|
|
||||||
? { ...s, useFfmpeg: checked }
|
|
||||||
: s,
|
|
||||||
),
|
|
||||||
});
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
|
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
|
||||||
<span className="break-all text-sm text-muted-foreground">
|
<span className="break-all text-sm text-muted-foreground">
|
||||||
{stream.url}
|
{stream.url}
|
||||||
@ -541,7 +491,8 @@ function StreamIssues({
|
|||||||
|
|
||||||
// Restreaming check
|
// Restreaming check
|
||||||
if (stream.roles.includes("record")) {
|
if (stream.roles.includes("record")) {
|
||||||
if (stream.restream) {
|
const restreamIds = wizardData.restreamIds || [];
|
||||||
|
if (restreamIds.includes(stream.id)) {
|
||||||
result.push({
|
result.push({
|
||||||
type: "warning",
|
type: "warning",
|
||||||
message: t("cameraWizard.step3.issues.restreamingWarning"),
|
message: t("cameraWizard.step3.issues.restreamingWarning"),
|
||||||
@ -709,10 +660,9 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Register stream with go2rtc
|
// Register stream with go2rtc
|
||||||
const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url;
|
|
||||||
axios
|
axios
|
||||||
.put(`go2rtc/streams/${streamId}`, null, {
|
.put(`go2rtc/streams/${streamId}`, null, {
|
||||||
params: { src: streamUrl },
|
params: { src: stream.url },
|
||||||
})
|
})
|
||||||
.then(() => {
|
.then(() => {
|
||||||
// Add small delay to allow go2rtc api to run and initialize the stream
|
// Add small delay to allow go2rtc api to run and initialize the stream
|
||||||
@ -730,7 +680,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
|||||||
// do nothing on cleanup errors - go2rtc won't consume the streams
|
// do nothing on cleanup errors - go2rtc won't consume the streams
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
}, [stream.url, stream.useFfmpeg, streamId]);
|
}, [stream.url, streamId]);
|
||||||
|
|
||||||
const resolution = stream.testResult?.resolution;
|
const resolution = stream.testResult?.resolution;
|
||||||
let aspectRatio = "16/9";
|
let aspectRatio = "16/9";
|
||||||
|
|||||||
@ -31,7 +31,6 @@ import { Link } from "react-router-dom";
|
|||||||
import { Switch } from "@/components/ui/switch";
|
import { Switch } from "@/components/ui/switch";
|
||||||
import { usePersistence } from "@/hooks/use-persistence";
|
import { usePersistence } from "@/hooks/use-persistence";
|
||||||
import { isDesktop } from "react-device-detect";
|
import { isDesktop } from "react-device-detect";
|
||||||
import { resolveZoneName } from "@/hooks/use-zone-friendly-name";
|
|
||||||
|
|
||||||
type DetailStreamProps = {
|
type DetailStreamProps = {
|
||||||
reviewItems?: ReviewSegment[];
|
reviewItems?: ReviewSegment[];
|
||||||
@ -368,11 +367,7 @@ function ReviewGroup({
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
data-review-id={id}
|
data-review-id={id}
|
||||||
className={`mx-1 cursor-pointer rounded-lg bg-secondary px-0 py-3 outline outline-[2px] -outline-offset-[1.8px] ${
|
className="cursor-pointer rounded-lg bg-secondary py-3"
|
||||||
isActive
|
|
||||||
? "shadow-selected outline-selected"
|
|
||||||
: "outline-transparent duration-500"
|
|
||||||
}`}
|
|
||||||
>
|
>
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
@ -387,10 +382,10 @@ function ReviewGroup({
|
|||||||
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
|
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
|
||||||
<LuCircle
|
<LuCircle
|
||||||
className={cn(
|
className={cn(
|
||||||
"size-3 duration-500",
|
"size-3",
|
||||||
review.severity == "alert"
|
isActive
|
||||||
? "fill-severity_alert text-severity_alert"
|
? "fill-selected text-selected"
|
||||||
: "fill-severity_detection text-severity_detection",
|
: "fill-muted duration-500 dark:fill-secondary-highlight dark:text-secondary-highlight",
|
||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
@ -459,7 +454,6 @@ function ReviewGroup({
|
|||||||
<EventList
|
<EventList
|
||||||
key={event.id}
|
key={event.id}
|
||||||
event={event}
|
event={event}
|
||||||
review={review}
|
|
||||||
effectiveTime={effectiveTime}
|
effectiveTime={effectiveTime}
|
||||||
annotationOffset={annotationOffset}
|
annotationOffset={annotationOffset}
|
||||||
onSeek={onSeek}
|
onSeek={onSeek}
|
||||||
@ -494,7 +488,6 @@ function ReviewGroup({
|
|||||||
|
|
||||||
type EventListProps = {
|
type EventListProps = {
|
||||||
event: Event;
|
event: Event;
|
||||||
review: ReviewSegment;
|
|
||||||
effectiveTime?: number;
|
effectiveTime?: number;
|
||||||
annotationOffset: number;
|
annotationOffset: number;
|
||||||
onSeek: (ts: number, play?: boolean) => void;
|
onSeek: (ts: number, play?: boolean) => void;
|
||||||
@ -502,7 +495,6 @@ type EventListProps = {
|
|||||||
};
|
};
|
||||||
function EventList({
|
function EventList({
|
||||||
event,
|
event,
|
||||||
review,
|
|
||||||
effectiveTime,
|
effectiveTime,
|
||||||
annotationOffset,
|
annotationOffset,
|
||||||
onSeek,
|
onSeek,
|
||||||
@ -621,7 +613,6 @@ function EventList({
|
|||||||
|
|
||||||
<div className="mt-2">
|
<div className="mt-2">
|
||||||
<ObjectTimeline
|
<ObjectTimeline
|
||||||
review={review}
|
|
||||||
eventId={event.id}
|
eventId={event.id}
|
||||||
onSeek={handleTimelineClick}
|
onSeek={handleTimelineClick}
|
||||||
effectiveTime={effectiveTime}
|
effectiveTime={effectiveTime}
|
||||||
@ -655,16 +646,6 @@ function LifecycleItem({
|
|||||||
const { t } = useTranslation("views/events");
|
const { t } = useTranslation("views/events");
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
item = {
|
|
||||||
...item,
|
|
||||||
data: {
|
|
||||||
...item.data,
|
|
||||||
zones_friendly_names: item?.data?.zones?.map((zone) => {
|
|
||||||
return resolveZoneName(config, zone);
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const aspectRatio = useMemo(() => {
|
const aspectRatio = useMemo(() => {
|
||||||
if (!config || !item?.camera) {
|
if (!config || !item?.camera) {
|
||||||
return 16 / 9;
|
return 16 / 9;
|
||||||
@ -780,7 +761,6 @@ function LifecycleItem({
|
|||||||
|
|
||||||
// Fetch and render timeline entries for a single event id on demand.
|
// Fetch and render timeline entries for a single event id on demand.
|
||||||
function ObjectTimeline({
|
function ObjectTimeline({
|
||||||
review,
|
|
||||||
eventId,
|
eventId,
|
||||||
onSeek,
|
onSeek,
|
||||||
effectiveTime,
|
effectiveTime,
|
||||||
@ -788,7 +768,6 @@ function ObjectTimeline({
|
|||||||
startTime,
|
startTime,
|
||||||
endTime,
|
endTime,
|
||||||
}: {
|
}: {
|
||||||
review: ReviewSegment;
|
|
||||||
eventId: string;
|
eventId: string;
|
||||||
onSeek: (ts: number, play?: boolean) => void;
|
onSeek: (ts: number, play?: boolean) => void;
|
||||||
effectiveTime?: number;
|
effectiveTime?: number;
|
||||||
@ -797,27 +776,13 @@ function ObjectTimeline({
|
|||||||
endTime?: number;
|
endTime?: number;
|
||||||
}) {
|
}) {
|
||||||
const { t } = useTranslation("views/events");
|
const { t } = useTranslation("views/events");
|
||||||
const { data: fullTimeline, isValidating } = useSWR<
|
const { data: timeline, isValidating } = useSWR<TrackingDetailsSequence[]>([
|
||||||
TrackingDetailsSequence[]
|
|
||||||
>([
|
|
||||||
"timeline",
|
"timeline",
|
||||||
{
|
{
|
||||||
source_id: eventId,
|
source_id: eventId,
|
||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
|
|
||||||
const timeline = useMemo(() => {
|
|
||||||
if (!fullTimeline) {
|
|
||||||
return fullTimeline;
|
|
||||||
}
|
|
||||||
|
|
||||||
return fullTimeline.filter(
|
|
||||||
(t) =>
|
|
||||||
t.timestamp >= review.start_time &&
|
|
||||||
(review.end_time == undefined || t.timestamp <= review.end_time),
|
|
||||||
);
|
|
||||||
}, [fullTimeline, review]);
|
|
||||||
|
|
||||||
if (isValidating && (!timeline || timeline.length === 0)) {
|
if (isValidating && (!timeline || timeline.length === 0)) {
|
||||||
return <ActivityIndicator className="ml-2 size-3" />;
|
return <ActivityIndicator className="ml-2 size-3" />;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -101,7 +101,7 @@ export default function Step1NameAndType({
|
|||||||
|
|
||||||
const form = useForm<z.infer<typeof formSchema>>({
|
const form = useForm<z.infer<typeof formSchema>>({
|
||||||
resolver: zodResolver(formSchema),
|
resolver: zodResolver(formSchema),
|
||||||
mode: "onBlur",
|
mode: "onChange",
|
||||||
defaultValues: {
|
defaultValues: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
name: initialData?.name ?? trigger?.name ?? "",
|
name: initialData?.name ?? trigger?.name ?? "",
|
||||||
|
|||||||
@ -845,7 +845,6 @@ function FaceAttemptGroup({
|
|||||||
selectedItems={selectedFaces}
|
selectedItems={selectedFaces}
|
||||||
i18nLibrary="views/faceLibrary"
|
i18nLibrary="views/faceLibrary"
|
||||||
objectType="person"
|
objectType="person"
|
||||||
noClassificationLabel="details.unknown"
|
|
||||||
onClick={(data) => {
|
onClick={(data) => {
|
||||||
if (data) {
|
if (data) {
|
||||||
onClickFaces([data.filename], true);
|
onClickFaces([data.filename], true);
|
||||||
|
|||||||
@ -157,11 +157,9 @@ function MobileMenuItem({
|
|||||||
const { t } = useTranslation(["views/settings"]);
|
const { t } = useTranslation(["views/settings"]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div
|
<Button
|
||||||
className={cn(
|
variant="ghost"
|
||||||
"inline-flex h-10 w-full cursor-pointer items-center justify-between whitespace-nowrap rounded-md px-4 py-2 pr-2 text-sm font-medium text-primary-variant disabled:pointer-events-none disabled:opacity-50",
|
className={cn("w-full justify-between pr-2", className)}
|
||||||
className,
|
|
||||||
)}
|
|
||||||
onClick={() => {
|
onClick={() => {
|
||||||
onSelect(item.key);
|
onSelect(item.key);
|
||||||
onClose?.();
|
onClose?.();
|
||||||
@ -169,7 +167,7 @@ function MobileMenuItem({
|
|||||||
>
|
>
|
||||||
<div className="smart-capitalize">{t("menu." + item.key)}</div>
|
<div className="smart-capitalize">{t("menu." + item.key)}</div>
|
||||||
<LuChevronRight className="size-4" />
|
<LuChevronRight className="size-4" />
|
||||||
</div>
|
</Button>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,9 +273,6 @@ export default function Settings() {
|
|||||||
} else {
|
} else {
|
||||||
setPageToggle(page as SettingsType);
|
setPageToggle(page as SettingsType);
|
||||||
}
|
}
|
||||||
if (isMobile) {
|
|
||||||
setContentMobileOpen(true);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// don't clear url params if we're creating a new object mask
|
// don't clear url params if we're creating a new object mask
|
||||||
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
||||||
@ -287,9 +282,6 @@ export default function Settings() {
|
|||||||
const cameraNames = cameras.map((c) => c.name);
|
const cameraNames = cameras.map((c) => c.name);
|
||||||
if (cameraNames.includes(camera)) {
|
if (cameraNames.includes(camera)) {
|
||||||
setSelectedCamera(camera);
|
setSelectedCamera(camera);
|
||||||
if (isMobile) {
|
|
||||||
setContentMobileOpen(true);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// don't clear url params if we're creating a new object mask or trigger
|
// don't clear url params if we're creating a new object mask or trigger
|
||||||
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
|
||||||
|
|||||||
@ -85,8 +85,6 @@ export type StreamConfig = {
|
|||||||
quality?: string;
|
quality?: string;
|
||||||
testResult?: TestResult;
|
testResult?: TestResult;
|
||||||
userTested?: boolean;
|
userTested?: boolean;
|
||||||
useFfmpeg?: boolean;
|
|
||||||
restream?: boolean;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export type TestResult = {
|
export type TestResult = {
|
||||||
@ -107,6 +105,7 @@ export type WizardFormData = {
|
|||||||
brandTemplate?: CameraBrand;
|
brandTemplate?: CameraBrand;
|
||||||
customUrl?: string;
|
customUrl?: string;
|
||||||
streams?: StreamConfig[];
|
streams?: StreamConfig[];
|
||||||
|
restreamIds?: string[];
|
||||||
};
|
};
|
||||||
|
|
||||||
// API Response Types
|
// API Response Types
|
||||||
@ -147,7 +146,6 @@ export type CameraConfigData = {
|
|||||||
inputs: {
|
inputs: {
|
||||||
path: string;
|
path: string;
|
||||||
roles: string[];
|
roles: string[];
|
||||||
input_args?: string;
|
|
||||||
}[];
|
}[];
|
||||||
};
|
};
|
||||||
live?: {
|
live?: {
|
||||||
|
|||||||
@ -307,7 +307,6 @@ export type CustomClassificationModelConfig = {
|
|||||||
threshold: number;
|
threshold: number;
|
||||||
object_config?: {
|
object_config?: {
|
||||||
objects: string[];
|
objects: string[];
|
||||||
classification_type: string;
|
|
||||||
};
|
};
|
||||||
state_config?: {
|
state_config?: {
|
||||||
cameras: {
|
cameras: {
|
||||||
|
|||||||
@ -42,9 +42,7 @@ export function getLifecycleItemDescription(
|
|||||||
return t("trackingDetails.lifecycleItemDesc.entered_zone", {
|
return t("trackingDetails.lifecycleItemDesc.entered_zone", {
|
||||||
ns: "views/explore",
|
ns: "views/explore",
|
||||||
label,
|
label,
|
||||||
zones: formatZonesList(
|
zones: formatZonesList(lifecycleItem.data.zones),
|
||||||
lifecycleItem.data.zones_friendly_names ?? lifecycleItem.data.zones,
|
|
||||||
),
|
|
||||||
});
|
});
|
||||||
case "active":
|
case "active":
|
||||||
return t("trackingDetails.lifecycleItemDesc.active", {
|
return t("trackingDetails.lifecycleItemDesc.active", {
|
||||||
|
|||||||
@ -43,5 +43,5 @@ export function generateFixedHash(name: string, prefix: string = "id"): string {
|
|||||||
* @returns True if the name is valid, false otherwise
|
* @returns True if the name is valid, false otherwise
|
||||||
*/
|
*/
|
||||||
export function isValidId(name: string): boolean {
|
export function isValidId(name: string): boolean {
|
||||||
return /^[a-zA-Z0-9_-]+$/.test(name) && !/^\d+$/.test(name);
|
return /^[a-zA-Z0-9_-]+$/.test(name);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
import { baseUrl } from "@/api/baseUrl";
|
import { baseUrl } from "@/api/baseUrl";
|
||||||
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
|
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
|
||||||
import ClassificationModelEditDialog from "@/components/classification/ClassificationModelEditDialog";
|
|
||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
|
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
|
||||||
import { Button, buttonVariants } from "@/components/ui/button";
|
import { Button, buttonVariants } from "@/components/ui/button";
|
||||||
@ -11,17 +10,18 @@ import {
|
|||||||
CustomClassificationModelConfig,
|
CustomClassificationModelConfig,
|
||||||
FrigateConfig,
|
FrigateConfig,
|
||||||
} from "@/types/frigateConfig";
|
} from "@/types/frigateConfig";
|
||||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { FaFolderPlus } from "react-icons/fa";
|
import { FaFolderPlus } from "react-icons/fa";
|
||||||
import { MdModelTraining } from "react-icons/md";
|
import { MdModelTraining } from "react-icons/md";
|
||||||
import { LuPencil, LuTrash2 } from "react-icons/lu";
|
import { LuTrash2 } from "react-icons/lu";
|
||||||
import { FiMoreVertical } from "react-icons/fi";
|
import { FiMoreVertical } from "react-icons/fi";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import Heading from "@/components/ui/heading";
|
import Heading from "@/components/ui/heading";
|
||||||
import { useOverlayState } from "@/hooks/use-overlay-state";
|
import { useOverlayState } from "@/hooks/use-overlay-state";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
|
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||||
import {
|
import {
|
||||||
DropdownMenu,
|
DropdownMenu,
|
||||||
DropdownMenuContent,
|
DropdownMenuContent,
|
||||||
@ -164,7 +164,6 @@ export default function ModelSelectionView({
|
|||||||
key={config.name}
|
key={config.name}
|
||||||
config={config}
|
config={config}
|
||||||
onClick={() => onClick(config)}
|
onClick={() => onClick(config)}
|
||||||
onUpdate={() => refreshConfig()}
|
|
||||||
onDelete={() => refreshConfig()}
|
onDelete={() => refreshConfig()}
|
||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
@ -203,10 +202,9 @@ function NoModelsView({
|
|||||||
type ModelCardProps = {
|
type ModelCardProps = {
|
||||||
config: CustomClassificationModelConfig;
|
config: CustomClassificationModelConfig;
|
||||||
onClick: () => void;
|
onClick: () => void;
|
||||||
onUpdate: () => void;
|
|
||||||
onDelete: () => void;
|
onDelete: () => void;
|
||||||
};
|
};
|
||||||
function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||||
const { t } = useTranslation(["views/classificationModel"]);
|
const { t } = useTranslation(["views/classificationModel"]);
|
||||||
|
|
||||||
const { data: dataset } = useSWR<{
|
const { data: dataset } = useSWR<{
|
||||||
@ -214,50 +212,42 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
|||||||
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
|
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
|
||||||
|
|
||||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
||||||
const [editDialogOpen, setEditDialogOpen] = useState(false);
|
const bypassDialogRef = useRef(false);
|
||||||
|
|
||||||
|
useKeyboardListener(["Shift"], (_, modifiers) => {
|
||||||
|
bypassDialogRef.current = modifiers.shift;
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
const handleDelete = useCallback(async () => {
|
const handleDelete = useCallback(async () => {
|
||||||
try {
|
await axios
|
||||||
await axios.delete(`classification/${config.name}`);
|
.delete(`classification/${config.name}`)
|
||||||
await axios.put("/config/set", {
|
.then((resp) => {
|
||||||
requires_restart: 0,
|
if (resp.status == 200) {
|
||||||
update_topic: `config/classification/custom/${config.name}`,
|
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
||||||
config_data: {
|
position: "top-center",
|
||||||
classification: {
|
});
|
||||||
custom: {
|
onDelete();
|
||||||
[config.name]: "",
|
}
|
||||||
},
|
})
|
||||||
},
|
.catch((error) => {
|
||||||
},
|
const errorMessage =
|
||||||
|
error.response?.data?.message ||
|
||||||
|
error.response?.data?.detail ||
|
||||||
|
"Unknown error";
|
||||||
|
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
|
||||||
position: "top-center",
|
|
||||||
});
|
|
||||||
onDelete();
|
|
||||||
} catch (err) {
|
|
||||||
const error = err as {
|
|
||||||
response?: { data?: { message?: string; detail?: string } };
|
|
||||||
};
|
|
||||||
const errorMessage =
|
|
||||||
error.response?.data?.message ||
|
|
||||||
error.response?.data?.detail ||
|
|
||||||
"Unknown error";
|
|
||||||
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
|
||||||
position: "top-center",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}, [config, onDelete, t]);
|
}, [config, onDelete, t]);
|
||||||
|
|
||||||
const handleDeleteClick = useCallback((e: React.MouseEvent) => {
|
const handleDeleteClick = useCallback(() => {
|
||||||
e.stopPropagation();
|
if (bypassDialogRef.current) {
|
||||||
setDeleteDialogOpen(true);
|
handleDelete();
|
||||||
}, []);
|
} else {
|
||||||
|
setDeleteDialogOpen(true);
|
||||||
const handleEditClick = useCallback((e: React.MouseEvent) => {
|
}
|
||||||
e.stopPropagation();
|
}, [handleDelete]);
|
||||||
setEditDialogOpen(true);
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const coverImage = useMemo(() => {
|
const coverImage = useMemo(() => {
|
||||||
if (!dataset) {
|
if (!dataset) {
|
||||||
@ -279,13 +269,6 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<ClassificationModelEditDialog
|
|
||||||
open={editDialogOpen}
|
|
||||||
model={config}
|
|
||||||
onClose={() => setEditDialogOpen(false)}
|
|
||||||
onSuccess={() => onUpdate()}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<AlertDialog
|
<AlertDialog
|
||||||
open={deleteDialogOpen}
|
open={deleteDialogOpen}
|
||||||
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
|
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
|
||||||
@ -321,7 +304,7 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
|||||||
className="size-full"
|
className="size-full"
|
||||||
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
||||||
/>
|
/>
|
||||||
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
|
<ImageShadowOverlay />
|
||||||
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
|
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
|
||||||
{config.name}
|
{config.name}
|
||||||
</div>
|
</div>
|
||||||
@ -332,17 +315,14 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
|
|||||||
<FiMoreVertical className="size-5 text-white" />
|
<FiMoreVertical className="size-5 text-white" />
|
||||||
</BlurredIconButton>
|
</BlurredIconButton>
|
||||||
</DropdownMenuTrigger>
|
</DropdownMenuTrigger>
|
||||||
<DropdownMenuContent
|
<DropdownMenuContent align="end">
|
||||||
align="end"
|
|
||||||
onClick={(e) => e.stopPropagation()}
|
|
||||||
>
|
|
||||||
<DropdownMenuItem onClick={handleEditClick}>
|
|
||||||
<LuPencil className="mr-2 size-4" />
|
|
||||||
<span>{t("button.edit", { ns: "common" })}</span>
|
|
||||||
</DropdownMenuItem>
|
|
||||||
<DropdownMenuItem onClick={handleDeleteClick}>
|
<DropdownMenuItem onClick={handleDeleteClick}>
|
||||||
<LuTrash2 className="mr-2 size-4" />
|
<LuTrash2 className="mr-2 size-4" />
|
||||||
<span>{t("button.delete", { ns: "common" })}</span>
|
<span>
|
||||||
|
{bypassDialogRef.current
|
||||||
|
? t("button.deleteNow", { ns: "common" })
|
||||||
|
: t("button.delete", { ns: "common" })}
|
||||||
|
</span>
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
</DropdownMenuContent>
|
</DropdownMenuContent>
|
||||||
</DropdownMenu>
|
</DropdownMenu>
|
||||||
|
|||||||
@ -327,39 +327,31 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
|
|||||||
</AlertDialog>
|
</AlertDialog>
|
||||||
|
|
||||||
<div className="flex flex-row justify-between gap-2 p-2 align-middle">
|
<div className="flex flex-row justify-between gap-2 p-2 align-middle">
|
||||||
{(isDesktop || !selectedImages?.length) && (
|
<div className="flex flex-row items-center justify-center gap-2">
|
||||||
<div className="flex flex-row items-center justify-center gap-2">
|
<Button
|
||||||
<Button
|
className="flex items-center gap-2.5 rounded-lg"
|
||||||
className="flex items-center gap-2.5 rounded-lg"
|
aria-label={t("label.back", { ns: "common" })}
|
||||||
aria-label={t("label.back", { ns: "common" })}
|
onClick={() => navigate(-1)}
|
||||||
onClick={() => navigate(-1)}
|
|
||||||
>
|
|
||||||
<IoMdArrowRoundBack className="size-5 text-secondary-foreground" />
|
|
||||||
{isDesktop && (
|
|
||||||
<div className="text-primary">
|
|
||||||
{t("button.back", { ns: "common" })}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</Button>
|
|
||||||
|
|
||||||
<LibrarySelector
|
|
||||||
pageToggle={pageToggle}
|
|
||||||
dataset={dataset || {}}
|
|
||||||
trainImages={trainImages || []}
|
|
||||||
setPageToggle={setPageToggle}
|
|
||||||
onDelete={onDelete}
|
|
||||||
onRename={() => {}}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
{selectedImages?.length > 0 ? (
|
|
||||||
<div
|
|
||||||
className={cn(
|
|
||||||
"flex w-full items-center justify-end gap-2",
|
|
||||||
isMobileOnly && "justify-between",
|
|
||||||
)}
|
|
||||||
>
|
>
|
||||||
<div className="flex w-48 items-center justify-center text-sm text-muted-foreground">
|
<IoMdArrowRoundBack className="size-5 text-secondary-foreground" />
|
||||||
|
{isDesktop && (
|
||||||
|
<div className="text-primary">
|
||||||
|
{t("button.back", { ns: "common" })}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
|
<LibrarySelector
|
||||||
|
pageToggle={pageToggle}
|
||||||
|
dataset={dataset || {}}
|
||||||
|
trainImages={trainImages || []}
|
||||||
|
setPageToggle={setPageToggle}
|
||||||
|
onDelete={onDelete}
|
||||||
|
onRename={() => {}}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
{selectedImages?.length > 0 ? (
|
||||||
|
<div className="flex items-center justify-center gap-2">
|
||||||
|
<div className="mx-1 flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||||
<div className="p-1">{`${selectedImages.length} selected`}</div>
|
<div className="p-1">{`${selectedImages.length} selected`}</div>
|
||||||
<div className="p-1">{"|"}</div>
|
<div className="p-1">{"|"}</div>
|
||||||
<div
|
<div
|
||||||
@ -969,7 +961,6 @@ function ObjectTrainGrid({
|
|||||||
selectedItems={selectedImages}
|
selectedItems={selectedImages}
|
||||||
i18nLibrary="views/classificationModel"
|
i18nLibrary="views/classificationModel"
|
||||||
objectType={model.object_config?.objects?.at(0) ?? "Object"}
|
objectType={model.object_config?.objects?.at(0) ?? "Object"}
|
||||||
noClassificationLabel="details.none"
|
|
||||||
onClick={(data) => {
|
onClick={(data) => {
|
||||||
if (data) {
|
if (data) {
|
||||||
onClickImages([data.filename], true);
|
onClickImages([data.filename], true);
|
||||||
|
|||||||
@ -136,7 +136,7 @@ export default function EventView({
|
|||||||
|
|
||||||
const [selectedReviews, setSelectedReviews] = useState<ReviewSegment[]>([]);
|
const [selectedReviews, setSelectedReviews] = useState<ReviewSegment[]>([]);
|
||||||
const onSelectReview = useCallback(
|
const onSelectReview = useCallback(
|
||||||
(review: ReviewSegment, ctrl: boolean, detail: boolean) => {
|
(review: ReviewSegment, ctrl: boolean) => {
|
||||||
if (selectedReviews.length > 0 || ctrl) {
|
if (selectedReviews.length > 0 || ctrl) {
|
||||||
const index = selectedReviews.findIndex((r) => r.id === review.id);
|
const index = selectedReviews.findIndex((r) => r.id === review.id);
|
||||||
|
|
||||||
@ -156,31 +156,17 @@ export default function EventView({
|
|||||||
setSelectedReviews(copy);
|
setSelectedReviews(copy);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If a specific date is selected in the calendar and it's after the event start,
|
|
||||||
// use the selected date instead of the event start time
|
|
||||||
const effectiveStartTime =
|
|
||||||
timeRange.after > review.start_time
|
|
||||||
? timeRange.after
|
|
||||||
: review.start_time;
|
|
||||||
|
|
||||||
onOpenRecording({
|
onOpenRecording({
|
||||||
camera: review.camera,
|
camera: review.camera,
|
||||||
startTime: effectiveStartTime - REVIEW_PADDING,
|
startTime: review.start_time - REVIEW_PADDING,
|
||||||
severity: review.severity,
|
severity: review.severity,
|
||||||
timelineType: detail ? "detail" : undefined,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
review.has_been_reviewed = true;
|
review.has_been_reviewed = true;
|
||||||
markItemAsReviewed(review);
|
markItemAsReviewed(review);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[
|
[selectedReviews, setSelectedReviews, onOpenRecording, markItemAsReviewed],
|
||||||
selectedReviews,
|
|
||||||
setSelectedReviews,
|
|
||||||
onOpenRecording,
|
|
||||||
markItemAsReviewed,
|
|
||||||
timeRange.after,
|
|
||||||
],
|
|
||||||
);
|
);
|
||||||
const onSelectAllReviews = useCallback(() => {
|
const onSelectAllReviews = useCallback(() => {
|
||||||
if (!currentReviewItems || currentReviewItems.length == 0) {
|
if (!currentReviewItems || currentReviewItems.length == 0) {
|
||||||
@ -416,6 +402,7 @@ export default function EventView({
|
|||||||
onSelectAllReviews={onSelectAllReviews}
|
onSelectAllReviews={onSelectAllReviews}
|
||||||
setSelectedReviews={setSelectedReviews}
|
setSelectedReviews={setSelectedReviews}
|
||||||
pullLatestData={pullLatestData}
|
pullLatestData={pullLatestData}
|
||||||
|
onOpenRecording={onOpenRecording}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{severity == "significant_motion" && (
|
{severity == "significant_motion" && (
|
||||||
@ -455,14 +442,11 @@ type DetectionReviewProps = {
|
|||||||
loading: boolean;
|
loading: boolean;
|
||||||
markItemAsReviewed: (review: ReviewSegment) => void;
|
markItemAsReviewed: (review: ReviewSegment) => void;
|
||||||
markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void;
|
markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void;
|
||||||
onSelectReview: (
|
onSelectReview: (review: ReviewSegment, ctrl: boolean) => void;
|
||||||
review: ReviewSegment,
|
|
||||||
ctrl: boolean,
|
|
||||||
detail: boolean,
|
|
||||||
) => void;
|
|
||||||
onSelectAllReviews: () => void;
|
onSelectAllReviews: () => void;
|
||||||
setSelectedReviews: (reviews: ReviewSegment[]) => void;
|
setSelectedReviews: (reviews: ReviewSegment[]) => void;
|
||||||
pullLatestData: () => void;
|
pullLatestData: () => void;
|
||||||
|
onOpenRecording: (recordingInfo: RecordingStartingPoint) => void;
|
||||||
};
|
};
|
||||||
function DetectionReview({
|
function DetectionReview({
|
||||||
contentRef,
|
contentRef,
|
||||||
@ -482,6 +466,7 @@ function DetectionReview({
|
|||||||
onSelectAllReviews,
|
onSelectAllReviews,
|
||||||
setSelectedReviews,
|
setSelectedReviews,
|
||||||
pullLatestData,
|
pullLatestData,
|
||||||
|
onOpenRecording,
|
||||||
}: DetectionReviewProps) {
|
}: DetectionReviewProps) {
|
||||||
const { t } = useTranslation(["views/events"]);
|
const { t } = useTranslation(["views/events"]);
|
||||||
|
|
||||||
@ -773,7 +758,16 @@ function DetectionReview({
|
|||||||
ctrl: boolean,
|
ctrl: boolean,
|
||||||
detail: boolean,
|
detail: boolean,
|
||||||
) => {
|
) => {
|
||||||
onSelectReview(review, ctrl, detail);
|
if (detail) {
|
||||||
|
onOpenRecording({
|
||||||
|
camera: review.camera,
|
||||||
|
startTime: review.start_time - REVIEW_PADDING,
|
||||||
|
severity: review.severity,
|
||||||
|
timelineType: "detail",
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
onSelectReview(review, ctrl);
|
||||||
|
}
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -970,6 +970,7 @@ function Timeline({
|
|||||||
"relative overflow-hidden",
|
"relative overflow-hidden",
|
||||||
isDesktop
|
isDesktop
|
||||||
? cn(
|
? cn(
|
||||||
|
"no-scrollbar overflow-y-auto",
|
||||||
timelineType == "timeline"
|
timelineType == "timeline"
|
||||||
? "w-[100px] flex-shrink-0"
|
? "w-[100px] flex-shrink-0"
|
||||||
: timelineType == "detail"
|
: timelineType == "detail"
|
||||||
|
|||||||
@ -709,11 +709,11 @@ export default function CameraSettingsView({
|
|||||||
<div className="flex w-full flex-row items-center gap-2 pt-2 md:w-[25%]">
|
<div className="flex w-full flex-row items-center gap-2 pt-2 md:w-[25%]">
|
||||||
<Button
|
<Button
|
||||||
className="flex flex-1"
|
className="flex flex-1"
|
||||||
aria-label={t("button.reset", { ns: "common" })}
|
aria-label={t("button.cancel", { ns: "common" })}
|
||||||
onClick={onCancel}
|
onClick={onCancel}
|
||||||
type="button"
|
type="button"
|
||||||
>
|
>
|
||||||
<Trans>button.reset</Trans>
|
<Trans>button.cancel</Trans>
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button
|
||||||
variant="select"
|
variant="select"
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user