Compare commits

..

12 Commits

Author SHA1 Message Date
Josh Hawkins
7f9a782c94 add enabled field to camera profiles for enabling/disabling cameras 2026-03-06 15:47:17 -06:00
Josh Hawkins
1b0d78c40e consolidate 2026-03-05 13:28:07 -06:00
Josh Hawkins
81e39c3806 fix CameraLiveConfig JSON serialization error on profile activation
refactor _publish_updates to only publish ZMQ updates for
sections that actually changed, not all sections on affected cameras.
2026-03-05 13:08:54 -06:00
Josh Hawkins
bd3539fb31 formatting 2026-03-05 13:01:10 -06:00
Josh Hawkins
54f39ede4e add tests for invalid profile values and keys
Tests that Pydantic rejects: invalid field values (fps: "not_a_number"),
unknown section keys (ffmpeg in profile), invalid nested values, and
invalid profiles in full config parsing.
2026-03-05 12:59:43 -06:00
Josh Hawkins
e90754ddf6 wire ProfileManager into app startup and FastAPI
- Create ProfileManager after dispatcher init
- Restore persisted profile on startup
- Pass dispatcher and profile_manager to FastAPI app
2026-03-05 12:56:53 -06:00
Josh Hawkins
e2ea836761 add MQTT and dispatcher integration for profiles
- Subscribe to frigate/profile/set MQTT topic
- Publish profile/state and profiles/available on connect
- Add _on_profile_command handler to dispatcher
- Broadcast active profile state on WebSocket connect
2026-03-05 12:54:32 -06:00
Josh Hawkins
3641bb24eb add profile API endpoints (GET /profiles, GET/PUT /profile) 2026-03-05 12:52:17 -06:00
Josh Hawkins
2e6d83e94e add ProfileManager for profile activation and persistence
Handles snapshotting base configs, applying profile overrides via
deep_merge + apply_section_update, publishing ZMQ updates, and
persisting active profile to /config/.active_profile.
2026-03-05 12:51:22 -06:00
Josh Hawkins
0ce12a5185 add active_profile field to FrigateConfig
Runtime-only field excluded from YAML serialization, tracks which
profile is currently active.
2026-03-05 12:49:13 -06:00
Josh Hawkins
439d9607ec add profiles field to CameraConfig 2026-03-05 12:48:33 -06:00
Josh Hawkins
3610366744 add CameraProfileConfig model for named config overrides 2026-03-05 12:46:45 -06:00
112 changed files with 993 additions and 8857 deletions

4
.gitignore vendored
View File

@ -3,8 +3,6 @@ __pycache__
.mypy_cache
*.swp
debug
.claude/*
.mcp.json
.vscode/*
!.vscode/launch.json
config/*
@ -21,4 +19,4 @@ web/.env
core
!/web/**/*.ts
.idea/*
.ipynb_checkpoints
.ipynb_checkpoints

View File

@ -1,18 +1,18 @@
# Nvidia ONNX Runtime GPU Support
# NVidia TensorRT Support (amd64 only)
--extra-index-url 'https://pypi.nvidia.com'
cython==3.0.*; platform_machine == 'x86_64'
nvidia-cuda-cupti-cu12==12.9.79; platform_machine == 'x86_64'
nvidia-cublas-cu12==12.9.1.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12==9.19.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu12==11.4.1.*; platform_machine == 'x86_64'
nvidia-curand-cu12==10.3.10.*; platform_machine == 'x86_64'
nvidia-cuda-nvcc-cu12==12.9.86; platform_machine == 'x86_64'
nvidia-cuda-nvrtc-cu12==12.9.86; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12==12.9.79; platform_machine == 'x86_64'
nvidia-cusolver-cu12==11.7.5.*; platform_machine == 'x86_64'
nvidia-cusparse-cu12==12.5.10.*; platform_machine == 'x86_64'
nvidia-nccl-cu12==2.29.7; platform_machine == 'x86_64'
nvidia-nvjitlink-cu12==12.9.86; platform_machine == 'x86_64'
nvidia_cuda_cupti_cu12==12.5.82; platform_machine == 'x86_64'
nvidia-cublas-cu12==12.5.3.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12==9.3.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu12==11.2.3.*; platform_machine == 'x86_64'
nvidia-curand-cu12==10.3.6.*; platform_machine == 'x86_64'
nvidia_cuda_nvcc_cu12==12.5.82; platform_machine == 'x86_64'
nvidia-cuda-nvrtc-cu12==12.5.82; platform_machine == 'x86_64'
nvidia_cuda_runtime_cu12==12.5.82; platform_machine == 'x86_64'
nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.24.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -38,6 +38,7 @@ Remember that motion detection is just used to determine when object detection s
The threshold value dictates how much of a change in a pixels luminance is required to be considered motion.
```yaml
# default threshold value
motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
@ -52,6 +53,7 @@ Watching the motion boxes in the debug view, increase the threshold until you on
### Contour Area
```yaml
# default contour_area value
motion:
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
@ -79,49 +81,27 @@ However, if the preferred day settings do not work well at night it is recommend
## Tuning For Large Changes In Motion
### Lightning Threshold
```yaml
# default lightning_threshold:
motion:
# Optional: The percentage of the image used to detect lightning or
# other substantial changes where motion detection needs to
# recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely
# to consider lightning or IR mode changes as valid motion.
# Decreasing this value will make motion detection more likely
# to ignore large amounts of motion such as a person
# approaching a doorbell camera.
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
```
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. `lightning_threshold` defines the percentage of the image used to detect these substantial changes. Increasing this value makes motion detection more likely to treat large changes (like IR mode switches) as valid motion. Decreasing it makes motion detection more likely to ignore large amounts of motion, such as a person approaching a doorbell camera.
Note that `lightning_threshold` does **not** stop motion-based recordings from being saved — it only prevents additional motion analysis after the threshold is exceeded, reducing false positive object detections during high-motion periods (e.g. storms or PTZ sweeps) without interfering with recordings.
:::warning
Some cameras, like doorbell cameras, may have missed detections when someone walks directly in front of the camera and the `lightning_threshold` causes motion detection to recalibrate. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
:::
### Skip Motion On Large Scene Changes
:::note
```yaml
motion:
# Optional: Fraction of the frame that must change in a single update
# before Frigate will completely ignore any motion in that frame.
# Values range between 0.0 and 1.0, leave unset (null) to disable.
# Setting this to 0.7 would cause Frigate to **skip** reporting
# motion boxes when more than 70% of the image appears to change
# (e.g. during lightning storms, IR/color mode switches, or other
# sudden lighting events).
skip_motion_threshold: 0.7
```
This option is handy when you want to prevent large transient changes from triggering recordings or object detection. It differs from `lightning_threshold` because it completely suppresses motion instead of just forcing a recalibration.
:::warning
When the skip threshold is exceeded, **no motion is reported** for that frame, meaning **nothing is recorded** for that frame. That means you can miss something important, like a PTZ camera auto-tracking an object or activity while the camera is moving. If you prefer to guarantee that every frame is saved, leave this unset and accept occasional recordings containing scene noise — they typically only take up a few megabytes and are quick to scan in the timeline UI.
Lightning threshold does not stop motion based recordings from being saved.
:::
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.

View File

@ -480,16 +480,12 @@ motion:
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255.
threshold: 30
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection needs
# to recalibrate and motion checks stop for that frame. Recordings are unaffected. (default: shown below)
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
# Optional: Fraction of the frame that must change in a single update before motion boxes are completely
# ignored. Values range between 0.0 and 1.0. When exceeded, no motion boxes are reported and **no motion
# recording** is created for that frame. Leave unset (null) to disable this feature. Use with care on PTZ
# cameras or other situations where you require guaranteed frame capture.
skip_motion_threshold: None
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects.

View File

@ -76,40 +76,6 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
:::
### GenAI Provider
Frigate can use a GenAI provider for semantic search embeddings when that provider has the `embeddings` role. Currently, only **llama.cpp** supports multimodal embeddings (both text and images).
To use llama.cpp for semantic search:
1. Configure a GenAI provider in your config with `embeddings` in its `roles`.
2. Set `semantic_search.model` to the GenAI config key (e.g. `default`).
3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support:
```yaml
genai:
default:
provider: llamacpp
base_url: http://localhost:8080
model: your-model-name
roles:
- embeddings
- vision
- tools
semantic_search:
enabled: True
model: default
```
The llama.cpp server must be started with `--embeddings` for the embeddings API, and a multi-modal embeddings model. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for details.
:::note
Switching between Jina models and a GenAI provider requires reindexing. Embeddings from different backends are incompatible.
:::
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.

View File

@ -159,8 +159,7 @@ Published when a license plate is recognized on a car object. See the [License P
"plate": "123ABC",
"score": 0.95,
"camera": "driveway_cam",
"timestamp": 1607123958.748393,
"plate_box": [917, 487, 1029, 529] // box coordinates of the detected license plate in the frame
"timestamp": 1607123958.748393
}
```

View File

@ -3,67 +3,17 @@ id: dummy-camera
title: Analyzing Object Detection
---
Frigate provides several tools for investigating object detection and tracking behavior: reviewing recorded detections through the UI, using the built-in Debug Replay feature, and manually setting up a dummy camera for advanced scenarios.
When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis.
## Reviewing Detections in the UI
## When to use
Before setting up a replay, you can often diagnose detection issues by reviewing existing recordings directly in the Frigate UI.
- Replaying an exported clip to reproduce incorrect detections
- Testing configuration changes (model settings, trackers, filters) against a known clip
- Gathering deterministic logs and recordings for debugging or issue reports
### Detail View (History)
## Example Config
The **Detail Stream** view in History shows recorded video with detection overlays (bounding boxes, path points, and zone highlights) drawn on top. Select a review item to see its tracked objects and lifecycle events. Clicking a lifecycle event seeks the video to that point so you can see exactly what the detector saw.
### Tracking Details (Explore)
In **Explore**, clicking a thumbnail opens the **Tracking Details** pane, which shows the full lifecycle of a single tracked object: every detection, zone entry/exit, and attribute change. The video plays back with the bounding box overlaid, letting you step through the object's entire lifecycle.
### Annotation Offset
Both views support an **Annotation Offset** setting (`detect.annotation_offset` in your camera config) that shifts the detection overlay in time relative to the recorded video. This compensates for the timing drift between the `detect` and `record` pipelines.
These streams use fundamentally different clocks with different buffering and latency characteristics, so the detection data and the recorded video are never perfectly synchronized. The annotation offset shifts the overlay to visually align the bounding boxes with the objects in the recorded video.
#### Why the offset varies between clips
The base timing drift between detect and record is roughly constant for a given camera, so a single offset value works well on average. However, you may notice the alignment is not pixel-perfect in every clip. This is normal and caused by several factors:
- **Keyframe-constrained seeking**: When the browser seeks to a timestamp, it can only land on the nearest keyframe. Each recording segment has keyframes at different positions relative to the detection timestamps, so the same offset may land slightly early in one clip and slightly late in another.
- **Segment boundary trimming**: When a recording range starts mid-segment, the video is trimmed to the requested start point. This trim may not align with a keyframe, shifting the effective reference point.
- **Capture-time jitter**: Network buffering, camera buffer flushes, and ffmpeg's own buffering mean the system-clock timestamp and the corresponding recorded frame are not always offset by exactly the same amount.
The per-clip variation is typically quite low and is mostly an artifact of keyframe granularity rather than a change in the true drift. A "perfect" alignment would require per-frame, keyframe-aware offset compensation, which is not practical. Treat the annotation offset as a best-effort average for your camera.
## Debug Replay
Debug Replay lets you re-run Frigate's detection pipeline against a section of recorded video without manually configuring a dummy camera. It automatically extracts the recording, creates a temporary camera with the same detection settings as the original, and loops the clip through the pipeline so you can observe detections in real time.
### When to use
- Reproducing a detection or tracking issue from a specific time range
- Testing configuration changes (model settings, zones, filters, motion) against a known clip
- Gathering logs and debug overlays for a bug report
:::note
Only one replay session can be active at a time. If a session is already running, you will be prompted to navigate to it or stop it first.
:::
### Variables to consider
- The replay will not always produce identical results to the original run. Different frames may be selected on replay, which can change detections and tracking.
- Motion detection depends on the exact frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
- Object detection is not fully deterministic: models and post-processing can yield slightly different results across runs.
Treat the replay as a close approximation rather than an exact reproduction. Run multiple loops and examine the debug overlays and logs to understand the behavior.
## Manual Dummy Camera
For advanced scenarios — such as testing with a clip from a different source, debugging ffmpeg behavior, or running a clip through a completely custom configuration — you can set up a dummy camera manually.
### Example config
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml`:
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml` like this:
```yaml
cameras:
@ -82,10 +32,10 @@ cameras:
enabled: false
```
- `-re -stream_loop -1` tells ffmpeg to play the file in real time and loop indefinitely.
- `-fflags +genpts` generates presentation timestamps when they are missing in the file.
- `-re -stream_loop -1` tells `ffmpeg` to play the file in realtime and loop indefinitely, which is useful for long debugging sessions.
- `-fflags +genpts` helps generate presentation timestamps when they are missing in the file.
### Steps
## Steps
1. Export or copy the clip you want to replay to the Frigate host (e.g., `/media/frigate/` or `debug/clips/`). Depending on what you are looking to debug, it is often helpful to add some "pre-capture" time (where the tracked object is not yet visible) to the clip when exporting.
2. Add the temporary camera to `config/config.yml` (example above). Use a unique name such as `test` or `replay_camera` so it's easy to remove later.
@ -95,8 +45,16 @@ cameras:
5. Iterate on camera or enrichment settings (model, fps, zones, filters) and re-check the replay until the behavior is resolved.
6. Remove the temporary camera from your config after debugging to avoid spurious telemetry or recordings.
### Troubleshooting
## Variables to consider in object tracking
- **No video**: verify the file path is correct and accessible from the Frigate process/container.
- **FFmpeg errors**: check the log output and adjust `input_args` for your file format. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
- **No detections**: confirm the camera `roles` include `detect` and that the model/detector configuration is enabled.
- The exported video will not always line up exactly with how it originally ran through Frigate (or even with the last loop). Different frames may be used on replay, which can change detections and tracking.
- Motion detection depends on the frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
- Object detection is not deterministic: models and post-processing can yield different results across runs, so you may not get identical detections or track IDs every time.
When debugging, treat the replay as a close approximation rather than a byte-for-byte replay. Capture multiple runs, enable recording if helpful, and examine logs and saved event clips to understand variability.
## Troubleshooting
- No video: verify the path is correct and accessible from the Frigate process/container.
- FFmpeg errors: check the log output for ffmpeg-specific flags and adjust `input_args` accordingly for your file/container. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
- No detections: confirm the camera `roles` include `detect`, and model/detector configuration is enabled.

View File

@ -628,38 +628,23 @@ def config_set(request: Request, body: AppConfigSetBody):
request.app.frigate_config = config
request.app.genai_manager.update_config(config)
if request.app.stats_emitter is not None:
request.app.stats_emitter.config = config
if body.update_topic:
if body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
if camera == "*":
# Wildcard: fan out update to all cameras
enum_value = CameraConfigUpdateEnum[field]
for camera_name in config.cameras:
settings = config.get_nested_object(
f"config/cameras/{camera_name}/{field}"
)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(enum_value, camera_name),
settings,
)
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
settings = config.get_nested_object(body.update_topic)
settings = config.get_nested_object(body.update_topic)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(
CameraConfigUpdateEnum[field], camera
),
settings,
)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(
CameraConfigUpdateEnum[field], camera
),
settings,
)
else:
# Generic handling for global config updates
settings = config.get_nested_object(body.update_topic)

View File

@ -32,12 +32,6 @@ from frigate.models import User
logger = logging.getLogger(__name__)
# In-memory cache to track which clients we've logged for an anonymous access event.
# Keyed by a hashed value combining remote address + user-agent. The value is
# an expiration timestamp (float).
FIRST_LOAD_TTL_SECONDS = 60 * 60 * 24 * 7 # 7 days
_first_load_seen: dict[str, float] = {}
def require_admin_by_default():
"""
@ -290,15 +284,6 @@ def get_remote_addr(request: Request):
return remote_addr or "127.0.0.1"
def _cleanup_first_load_seen() -> None:
"""Cleanup expired entries in the in-memory first-load cache."""
now = time.time()
# Build list for removal to avoid mutating dict during iteration
expired = [k for k, exp in _first_load_seen.items() if exp <= now]
for k in expired:
del _first_load_seen[k]
def get_jwt_secret() -> str:
jwt_secret = None
# check env var
@ -759,30 +744,10 @@ def profile(request: Request):
roles_dict = request.app.frigate_config.auth.roles
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
response = JSONResponse(
return JSONResponse(
content={"username": username, "role": role, "allowed_cameras": allowed_cameras}
)
if username == "anonymous":
try:
remote_addr = get_remote_addr(request)
except Exception:
remote_addr = (
request.client.host if hasattr(request, "client") else "unknown"
)
ua = request.headers.get("user-agent", "")
key_material = f"{remote_addr}|{ua}"
cache_key = hashlib.sha256(key_material.encode()).hexdigest()
_cleanup_first_load_seen()
now = time.time()
if cache_key not in _first_load_seen:
_first_load_seen[cache_key] = now + FIRST_LOAD_TTL_SECONDS
logger.info(f"Anonymous user access from {remote_addr} ua={ua[:200]}")
return response
@router.get(
"/logout",

View File

@ -1,6 +1,5 @@
"""Camera apis."""
import asyncio
import json
import logging
import re
@ -12,9 +11,7 @@ import httpx
import requests
from fastapi import APIRouter, Depends, Query, Request, Response
from fastapi.responses import JSONResponse
from filelock import FileLock, Timeout
from onvif import ONVIFCamera, ONVIFError
from ruamel.yaml import YAML
from zeep.exceptions import Fault, TransportError
from zeep.transports import AsyncTransport
@ -24,14 +21,8 @@ from frigate.api.auth import (
require_role,
)
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateTopic,
)
from frigate.config.config import FrigateConfig
from frigate.util.builtin import clean_camera_user_pass
from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files
from frigate.util.config import find_config_file
from frigate.util.image import run_ffmpeg_snapshot
from frigate.util.services import ffprobe_stream
@ -1004,154 +995,3 @@ async def onvif_probe(
await onvif_camera.close()
except Exception as e:
logger.debug(f"Error closing ONVIF camera session: {e}")
@router.delete(
"/cameras/{camera_name}",
dependencies=[Depends(require_role(["admin"]))],
)
async def delete_camera(
request: Request,
camera_name: str,
delete_exports: bool = Query(default=False),
):
"""Delete a camera and all its associated data.
Removes the camera from config, stops processes, and cleans up
all database entries and media files.
Args:
camera_name: Name of the camera to delete
delete_exports: Whether to also delete exports for this camera
"""
frigate_config: FrigateConfig = request.app.frigate_config
if camera_name not in frigate_config.cameras:
return JSONResponse(
content={
"success": False,
"message": f"Camera {camera_name} not found",
},
status_code=404,
)
old_camera_config = frigate_config.cameras[camera_name]
config_file = find_config_file()
lock = FileLock(f"{config_file}.lock", timeout=5)
try:
with lock:
with open(config_file, "r") as f:
old_raw_config = f.read()
try:
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
with open(config_file, "r") as f:
data = yaml.load(f)
# Remove camera from config
if "cameras" in data and camera_name in data["cameras"]:
del data["cameras"][camera_name]
# Remove camera from auth roles
auth = data.get("auth", {})
if auth and "roles" in auth:
empty_roles = []
for role_name, cameras_list in auth["roles"].items():
if (
isinstance(cameras_list, list)
and camera_name in cameras_list
):
cameras_list.remove(camera_name)
# Custom roles can't be empty; mark for removal
if not cameras_list and role_name not in (
"admin",
"viewer",
):
empty_roles.append(role_name)
for role_name in empty_roles:
del auth["roles"][role_name]
with open(config_file, "w") as f:
yaml.dump(data, f)
with open(config_file, "r") as f:
new_raw_config = f.read()
try:
config = FrigateConfig.parse(new_raw_config)
except Exception:
with open(config_file, "w") as f:
f.write(old_raw_config)
logger.exception(
"Config error after removing camera %s",
camera_name,
)
return JSONResponse(
content={
"success": False,
"message": "Error parsing config after camera removal",
},
status_code=400,
)
except Exception as e:
logger.error(
"Error updating config to remove camera %s: %s", camera_name, e
)
return JSONResponse(
content={
"success": False,
"message": "Error updating config",
},
status_code=500,
)
# Update runtime config
request.app.frigate_config = config
request.app.genai_manager.update_config(config)
# Publish removal to stop ffmpeg processes and clean up runtime state
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum.remove, camera_name),
old_camera_config,
)
except Timeout:
return JSONResponse(
content={
"success": False,
"message": "Another process is currently updating the config",
},
status_code=409,
)
# Clean up database entries
counts, export_paths = await asyncio.to_thread(
cleanup_camera_db, camera_name, delete_exports
)
# Clean up media files in background thread
await asyncio.to_thread(
cleanup_camera_files, camera_name, export_paths if delete_exports else None
)
# Best-effort go2rtc stream removal
try:
requests.delete(
"http://127.0.0.1:1984/api/streams",
params={"src": camera_name},
timeout=5,
)
except Exception:
logger.debug("Failed to remove go2rtc stream for %s", camera_name)
return JSONResponse(
content={
"success": True,
"message": f"Camera {camera_name} has been deleted",
"cleanup": counts,
},
status_code=200,
)

View File

@ -11,7 +11,6 @@ class Tags(Enum):
classification = "Classification"
logs = "Logs"
media = "Media"
motion_search = "Motion Search"
notifications = "Notifications"
preview = "Preview"
recordings = "Recordings"

View File

@ -22,7 +22,6 @@ from frigate.api import (
event,
export,
media,
motion_search,
notification,
preview,
record,
@ -138,7 +137,6 @@ def create_fastapi_app(
app.include_router(export.router)
app.include_router(event.router)
app.include_router(media.router)
app.include_router(motion_search.router)
app.include_router(record.router)
app.include_router(debug_replay.router)
# App Properties

View File

@ -24,7 +24,6 @@ from tzlocal import get_localzone_name
from frigate.api.auth import (
allow_any_authenticated,
require_camera_access,
require_role,
)
from frigate.api.defs.query.media_query_parameters import (
Extension,
@ -1006,23 +1005,6 @@ def grid_snapshot(
)
@router.delete(
"/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))]
)
def clear_region_grid(request: Request, camera_name: str):
"""Clear the region grid for a camera."""
if camera_name not in request.app.frigate_config.cameras:
return JSONResponse(
content={"success": False, "message": "Camera not found"},
status_code=404,
)
Regions.delete().where(Regions.camera == camera_name).execute()
return JSONResponse(
content={"success": True, "message": "Region grid cleared"},
)
@router.get(
"/events/{event_id}/snapshot-clean.webp",
dependencies=[Depends(require_camera_access)],
@ -1281,13 +1263,6 @@ def preview_gif(
else:
# need to generate from existing images
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
if not os.path.isdir(preview_dir):
return JSONResponse(
content={"success": False, "message": "Preview not found"},
status_code=404,
)
file_start = f"preview_{camera_name}"
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
@ -1463,13 +1438,6 @@ def preview_mp4(
else:
# need to generate from existing images
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
if not os.path.isdir(preview_dir):
return JSONResponse(
content={"success": False, "message": "Preview not found"},
status_code=404,
)
file_start = f"preview_{camera_name}"
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"

View File

@ -1,292 +0,0 @@
"""Motion search API for detecting changes within a region of interest."""
import logging
from typing import Any, List, Optional
from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from frigate.api.auth import require_camera_access
from frigate.api.defs.tags import Tags
from frigate.jobs.motion_search import (
cancel_motion_search_job,
get_motion_search_job,
start_motion_search_job,
)
from frigate.types import JobStatusTypesEnum
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.motion_search])
class MotionSearchRequest(BaseModel):
"""Request body for motion search."""
start_time: float = Field(description="Start timestamp for the search range")
end_time: float = Field(description="End timestamp for the search range")
polygon_points: List[List[float]] = Field(
description="List of [x, y] normalized coordinates (0-1) defining the ROI polygon"
)
threshold: int = Field(
default=30,
ge=1,
le=255,
description="Pixel difference threshold (1-255)",
)
min_area: float = Field(
default=5.0,
ge=0.1,
le=100.0,
description="Minimum change area as a percentage of the ROI",
)
frame_skip: int = Field(
default=5,
ge=1,
le=30,
description="Process every Nth frame (1=all frames, 5=every 5th frame)",
)
parallel: bool = Field(
default=False,
description="Enable parallel scanning across segments",
)
max_results: int = Field(
default=25,
ge=1,
le=200,
description="Maximum number of search results to return",
)
class MotionSearchResult(BaseModel):
"""A single search result with timestamp and change info."""
timestamp: float = Field(description="Timestamp where change was detected")
change_percentage: float = Field(description="Percentage of ROI area that changed")
class MotionSearchMetricsResponse(BaseModel):
"""Metrics collected during motion search execution."""
segments_scanned: int = 0
segments_processed: int = 0
metadata_inactive_segments: int = 0
heatmap_roi_skip_segments: int = 0
fallback_full_range_segments: int = 0
frames_decoded: int = 0
wall_time_seconds: float = 0.0
segments_with_errors: int = 0
class MotionSearchStartResponse(BaseModel):
"""Response when motion search job starts."""
success: bool
message: str
job_id: str
class MotionSearchStatusResponse(BaseModel):
"""Response containing job status and results."""
success: bool
message: str
status: str # "queued", "running", "success", "failed", or "cancelled"
results: Optional[List[MotionSearchResult]] = None
total_frames_processed: Optional[int] = None
error_message: Optional[str] = None
metrics: Optional[MotionSearchMetricsResponse] = None
@router.post(
"/{camera_name}/search/motion",
response_model=MotionSearchStartResponse,
dependencies=[Depends(require_camera_access)],
summary="Start motion search job",
description="""Starts an asynchronous search for significant motion changes within
a user-defined Region of Interest (ROI) over a specified time range. Returns a job_id
that can be used to poll for results.""",
)
async def start_motion_search(
request: Request,
camera_name: str,
body: MotionSearchRequest,
):
"""Start an async motion search job."""
config = request.app.frigate_config
if camera_name not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera {camera_name} not found"},
status_code=404,
)
# Validate polygon has at least 3 points
if len(body.polygon_points) < 3:
return JSONResponse(
content={
"success": False,
"message": "Polygon must have at least 3 points",
},
status_code=400,
)
# Validate time range
if body.start_time >= body.end_time:
return JSONResponse(
content={
"success": False,
"message": "Start time must be before end time",
},
status_code=400,
)
# Start the job using the jobs module
job_id = start_motion_search_job(
config=config,
camera_name=camera_name,
start_time=body.start_time,
end_time=body.end_time,
polygon_points=body.polygon_points,
threshold=body.threshold,
min_area=body.min_area,
frame_skip=body.frame_skip,
parallel=body.parallel,
max_results=body.max_results,
)
return JSONResponse(
content={
"success": True,
"message": "Search job started",
"job_id": job_id,
}
)
@router.get(
"/{camera_name}/search/motion/{job_id}",
response_model=MotionSearchStatusResponse,
dependencies=[Depends(require_camera_access)],
summary="Get motion search job status",
description="Returns the status and results (if complete) of a motion search job.",
)
async def get_motion_search_status_endpoint(
request: Request,
camera_name: str,
job_id: str,
):
"""Get the status of a motion search job."""
config = request.app.frigate_config
if camera_name not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera {camera_name} not found"},
status_code=404,
)
job = get_motion_search_job(job_id)
if not job:
return JSONResponse(
content={"success": False, "message": "Job not found"},
status_code=404,
)
api_status = job.status
# Build response content
response_content: dict[str, Any] = {
"success": api_status != JobStatusTypesEnum.failed,
"status": api_status,
}
if api_status == JobStatusTypesEnum.failed:
response_content["message"] = job.error_message or "Search failed"
response_content["error_message"] = job.error_message
elif api_status == JobStatusTypesEnum.cancelled:
response_content["message"] = "Search cancelled"
response_content["total_frames_processed"] = job.total_frames_processed
elif api_status == JobStatusTypesEnum.success:
response_content["message"] = "Search complete"
if job.results:
response_content["results"] = job.results.get("results", [])
response_content["total_frames_processed"] = job.results.get(
"total_frames_processed", job.total_frames_processed
)
else:
response_content["results"] = []
response_content["total_frames_processed"] = job.total_frames_processed
else:
response_content["message"] = "Job processing"
response_content["total_frames_processed"] = job.total_frames_processed
# Include partial results if available (streaming)
if job.results:
response_content["results"] = job.results.get("results", [])
response_content["total_frames_processed"] = job.results.get(
"total_frames_processed", job.total_frames_processed
)
# Include metrics if available
if job.metrics:
response_content["metrics"] = job.metrics.to_dict()
return JSONResponse(content=response_content)
@router.post(
"/{camera_name}/search/motion/{job_id}/cancel",
dependencies=[Depends(require_camera_access)],
summary="Cancel motion search job",
description="Cancels an active motion search job if it is still processing.",
)
async def cancel_motion_search_endpoint(
request: Request,
camera_name: str,
job_id: str,
):
"""Cancel an active motion search job."""
config = request.app.frigate_config
if camera_name not in config.cameras:
return JSONResponse(
content={"success": False, "message": f"Camera {camera_name} not found"},
status_code=404,
)
job = get_motion_search_job(job_id)
if not job:
return JSONResponse(
content={"success": False, "message": "Job not found"},
status_code=404,
)
# Check if already finished
api_status = job.status
if api_status not in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running):
return JSONResponse(
content={
"success": True,
"message": "Job already finished",
"status": api_status,
}
)
# Request cancellation
cancelled = cancel_motion_search_job(job_id)
if cancelled:
return JSONResponse(
content={
"success": True,
"message": "Search cancelled",
"status": "cancelled",
}
)
return JSONResponse(
content={
"success": False,
"message": "Failed to cancel job",
},
status_code=500,
)

View File

@ -261,7 +261,6 @@ async def recordings(
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.motion_heatmap,
Recordings.duration,
)
.where(

View File

@ -52,7 +52,6 @@ from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup
from frigate.events.maintainer import EventProcessor
from frigate.jobs.motion_search import stop_all_motion_search_jobs
from frigate.log import _stop_logging
from frigate.models import (
Event,
@ -617,9 +616,6 @@ class FrigateApp:
# used by the docker healthcheck
Path("/dev/shm/.frigate-is-stopping").touch()
# Cancel any running motion search jobs before setting stop_event
stop_all_motion_search_jobs()
self.stop_event.set()
# set an end_time on entries without an end_time before exiting

View File

@ -249,14 +249,6 @@ class CameraConfig(FrigateBaseModel):
def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self:
return
self._build_ffmpeg_cmds()
def recreate_ffmpeg_cmds(self):
"""Force regeneration of ffmpeg commands from current config."""
self._build_ffmpeg_cmds()
def _build_ffmpeg_cmds(self):
"""Build ffmpeg commands from the current ffmpeg config."""
ffmpeg_cmds = []
for ffmpeg_input in self.ffmpeg.inputs:
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)

View File

@ -24,17 +24,10 @@ class MotionConfig(FrigateBaseModel):
lightning_threshold: float = Field(
default=0.8,
title="Lightning threshold",
description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events.",
description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0).",
ge=0.3,
le=1.0,
)
skip_motion_threshold: Optional[float] = Field(
default=None,
title="Skip motion threshold",
description="If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature.",
ge=0.0,
le=1.0,
)
improve_contrast: bool = Field(
default=True,
title="Improve contrast",

View File

@ -12,7 +12,6 @@ from .objects import ObjectConfig
from .record import RecordConfig
from .review import ReviewConfig
from .snapshots import SnapshotsConfig
from .zone import ZoneConfig
__all__ = ["CameraProfileConfig"]
@ -35,4 +34,3 @@ class CameraProfileConfig(FrigateBaseModel):
record: Optional[RecordConfig] = None
review: Optional[ReviewConfig] = None
snapshots: Optional[SnapshotsConfig] = None
zones: Optional[dict[str, ZoneConfig]] = None

View File

@ -17,7 +17,6 @@ class CameraConfigUpdateEnum(str, Enum):
birdseye = "birdseye"
detect = "detect"
enabled = "enabled"
ffmpeg = "ffmpeg"
motion = "motion" # includes motion and motion masks
notifications = "notifications"
objects = "objects"
@ -92,9 +91,6 @@ class CameraConfigUpdateSubscriber:
if update_type == CameraConfigUpdateEnum.audio:
config.audio = updated_config
elif update_type == CameraConfigUpdateEnum.ffmpeg:
config.ffmpeg = updated_config
config.recreate_ffmpeg_cmds()
elif update_type == CameraConfigUpdateEnum.audio_transcription:
config.audio_transcription = updated_config
elif update_type == CameraConfigUpdateEnum.birdseye:

View File

@ -1,5 +1,5 @@
from enum import Enum
from typing import Dict, List, Optional, Union
from typing import Dict, List, Optional
from pydantic import ConfigDict, Field
@ -173,10 +173,10 @@ class SemanticSearchConfig(FrigateBaseModel):
title="Reindex on startup",
description="Trigger a full reindex of historical tracked objects into the embeddings database.",
)
model: Optional[Union[SemanticSearchModelEnum, str]] = Field(
model: Optional[SemanticSearchModelEnum] = Field(
default=SemanticSearchModelEnum.jinav1,
title="Semantic search model or GenAI provider name",
description="The embeddings model to use for semantic search (for example 'jinav1'), or the name of a GenAI provider with the embeddings role.",
title="Semantic search model",
description="The embeddings model to use for semantic search (for example 'jinav1').",
)
model_size: str = Field(
default="small",

View File

@ -61,7 +61,6 @@ from .classification import (
FaceRecognitionConfig,
LicensePlateRecognitionConfig,
SemanticSearchConfig,
SemanticSearchModelEnum,
)
from .database import DatabaseConfig
from .env import EnvVars
@ -600,24 +599,6 @@ class FrigateConfig(FrigateBaseModel):
)
role_to_name[role] = name
# validate semantic_search.model when it is a GenAI provider name
if (
self.semantic_search.enabled
and isinstance(self.semantic_search.model, str)
and not isinstance(self.semantic_search.model, SemanticSearchModelEnum)
):
if self.semantic_search.model not in self.genai:
raise ValueError(
f"semantic_search.model '{self.semantic_search.model}' is not a "
"valid GenAI config key. Must match a key in genai config."
)
genai_cfg = self.genai[self.semantic_search.model]
if GenAIRoleEnum.embeddings not in genai_cfg.roles:
raise ValueError(
f"GenAI provider '{self.semantic_search.model}' must have "
"'embeddings' in its roles for semantic search."
)
# set default min_score for object attributes
for attribute in self.model.all_attributes:
if not self.objects.filters.get(attribute):

View File

@ -1,6 +1,5 @@
"""Profile manager for activating/deactivating named config profiles."""
import copy
import logging
from pathlib import Path
from typing import Optional
@ -10,7 +9,6 @@ from frigate.config.camera.updater import (
CameraConfigUpdatePublisher,
CameraConfigUpdateTopic,
)
from frigate.config.camera.zone import ZoneConfig
from frigate.const import CONFIG_DIR
from frigate.util.builtin import deep_merge
from frigate.util.config import apply_section_update
@ -46,15 +44,13 @@ class ProfileManager:
self.config_updater = config_updater
self._base_configs: dict[str, dict[str, dict]] = {}
self._base_enabled: dict[str, bool] = {}
self._base_zones: dict[str, dict[str, ZoneConfig]] = {}
self._snapshot_base_configs()
def _snapshot_base_configs(self) -> None:
"""Snapshot each camera's current section configs, enabled, and zones."""
"""Snapshot each camera's current section configs and enabled state."""
for cam_name, cam_config in self.config.cameras.items():
self._base_configs[cam_name] = {}
self._base_enabled[cam_name] = cam_config.enabled
self._base_zones[cam_name] = copy.deepcopy(cam_config.zones)
for section in PROFILE_SECTION_UPDATES:
section_config = getattr(cam_config, section, None)
if section_config is not None:
@ -109,12 +105,6 @@ class ProfileManager:
cam_config.enabled = base_enabled
changed.setdefault(cam_name, set()).add("enabled")
# Restore zones
base_zones = self._base_zones.get(cam_name)
if base_zones is not None and cam_config.zones != base_zones:
cam_config.zones = copy.deepcopy(base_zones)
changed.setdefault(cam_name, set()).add("zones")
# Restore section configs
base = self._base_configs.get(cam_name, {})
for section in PROFILE_SECTION_UPDATES:
@ -146,14 +136,6 @@ class ProfileManager:
cam_config.enabled = profile.enabled
changed.setdefault(cam_name, set()).add("enabled")
# Apply zones override — merge profile zones into base zones
if profile.zones is not None:
base_zones = self._base_zones.get(cam_name, {})
merged_zones = copy.deepcopy(base_zones)
merged_zones.update(profile.zones)
cam_config.zones = merged_zones
changed.setdefault(cam_name, set()).add("zones")
base = self._base_configs.get(cam_name, {})
for section in PROFILE_SECTION_UPDATES:
@ -193,15 +175,6 @@ class ProfileManager:
)
continue
if section == "zones":
self.config_updater.publish_update(
CameraConfigUpdateTopic(
CameraConfigUpdateEnum.zones, cam_name
),
cam_config.zones,
)
continue
update_enum = PROFILE_SECTION_UPDATES.get(section)
if update_enum is None:
continue

View File

@ -401,10 +401,35 @@ class LicensePlateProcessingMixin:
all_confidences.append(flat_confidences)
all_areas.append(combined_area)
# Step 3: Sort the combined plates
# Step 3: Filter and sort the combined plates
if all_license_plates:
filtered_data = []
for plate, conf_list, area in zip(
all_license_plates, all_confidences, all_areas
):
if len(plate) < self.lpr_config.min_plate_length:
logger.debug(
f"{camera}: Filtered out '{plate}' due to length ({len(plate)} < {self.lpr_config.min_plate_length})"
)
continue
if self.lpr_config.format:
try:
if not re.fullmatch(self.lpr_config.format, plate):
logger.debug(
f"{camera}: Filtered out '{plate}' due to format mismatch"
)
continue
except re.error:
# Skip format filtering if regex is invalid
logger.error(
f"{camera}: Invalid regex in LPR format configuration: {self.lpr_config.format}"
)
filtered_data.append((plate, conf_list, area))
sorted_data = sorted(
zip(all_license_plates, all_confidences, all_areas),
filtered_data,
key=lambda x: (x[2], len(x[0]), sum(x[1]) / len(x[1]) if x[1] else 0),
reverse=True,
)
@ -1225,8 +1250,6 @@ class LicensePlateProcessingMixin:
logger.debug(f"{camera}: License plate area below minimum threshold.")
return
plate_box = license_plate
license_plate_frame = rgb[
license_plate[1] : license_plate[3],
license_plate[0] : license_plate[2],
@ -1343,20 +1366,6 @@ class LicensePlateProcessingMixin:
logger.debug(f"{camera}: License plate is less than min_area")
return
# Scale back to original car coordinates and then to frame
plate_box_in_car = (
license_plate[0] // 2,
license_plate[1] // 2,
license_plate[2] // 2,
license_plate[3] // 2,
)
plate_box = (
left + plate_box_in_car[0],
top + plate_box_in_car[1],
left + plate_box_in_car[2],
top + plate_box_in_car[3],
)
license_plate_frame = car[
license_plate[1] : license_plate[3],
license_plate[0] : license_plate[2],
@ -1420,8 +1429,6 @@ class LicensePlateProcessingMixin:
0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2
)
plate_box = tuple(int(x) for x in expanded_box)
# Crop using the expanded box
license_plate_frame = license_plate_frame[
int(expanded_box[1]) : int(expanded_box[3]),
@ -1550,27 +1557,6 @@ class LicensePlateProcessingMixin:
f"{camera}: Clustering changed top plate '{top_plate}' (conf: {avg_confidence:.3f}) to rep '{rep_plate}' (conf: {rep_conf:.3f})"
)
# Apply length and format filters to the clustered representative
# rather than individual OCR readings, so noisy variants still
# contribute to clustering even when they don't pass on their own.
if len(rep_plate) < self.lpr_config.min_plate_length:
logger.debug(
f"{camera}: Filtered out clustered plate '{rep_plate}' due to length ({len(rep_plate)} < {self.lpr_config.min_plate_length})"
)
return
if self.lpr_config.format:
try:
if not re.fullmatch(self.lpr_config.format, rep_plate):
logger.debug(
f"{camera}: Filtered out clustered plate '{rep_plate}' due to format mismatch"
)
return
except re.error:
logger.error(
f"{camera}: Invalid regex in LPR format configuration: {self.lpr_config.format}"
)
# Update stored rep
self.detected_license_plates[id].update(
{
@ -1629,7 +1615,6 @@ class LicensePlateProcessingMixin:
"id": id,
"camera": camera,
"timestamp": start,
"plate_box": plate_box,
}
),
)

View File

@ -50,16 +50,3 @@ class PostProcessorApi(ABC):
None if request was not handled, otherwise return response.
"""
pass
def update_config(self, topic: str, payload: Any) -> None:
"""Handle a config change notification.
Called for every config update published under ``config/``.
Processors should override this to check the topic and act only
on changes relevant to them. Default is a no-op.
Args:
topic: The config topic that changed.
payload: The updated configuration object.
"""
pass

View File

@ -47,16 +47,6 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
self.sub_label_publisher = sub_label_publisher
super().__init__(config, metrics, model_runner)
CONFIG_UPDATE_TOPIC = "config/lpr"
def update_config(self, topic: str, payload: Any) -> None:
"""Update LPR config at runtime."""
if topic != self.CONFIG_UPDATE_TOPIC:
return
self.lpr_config = payload
logger.debug("LPR post-processor config updated dynamically")
def process_data(
self, data: dict[str, Any], data_type: PostProcessDataEnum
) -> None:

View File

@ -61,16 +61,3 @@ class RealTimeProcessorApi(ABC):
None.
"""
pass
def update_config(self, topic: str, payload: Any) -> None:
"""Handle a config change notification.
Called for every config update published under ``config/``.
Processors should override this to check the topic and act only
on changes relevant to them. Default is a no-op.
Args:
topic: The config topic that changed.
payload: The updated configuration object.
"""
pass

View File

@ -169,16 +169,6 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
)
self.detected_birds[obj_data["id"]] = score
CONFIG_UPDATE_TOPIC = "config/classification"
def update_config(self, topic: str, payload: Any) -> None:
"""Update bird classification config at runtime."""
if topic != self.CONFIG_UPDATE_TOPIC:
return
self.config.classification = payload
logger.debug("Bird classification config updated dynamically")
def handle_request(self, topic, request_data):
return None

View File

@ -95,23 +95,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.recognizer.build()
CONFIG_UPDATE_TOPIC = "config/face_recognition"
def update_config(self, topic: str, payload: Any) -> None:
"""Update face recognition config at runtime."""
if topic != self.CONFIG_UPDATE_TOPIC:
return
previous_min_area = self.config.face_recognition.min_area
self.config.face_recognition = payload
self.face_config = payload
for camera_config in self.config.cameras.values():
if camera_config.face_recognition.min_area == previous_min_area:
camera_config.face_recognition.min_area = payload.min_area
logger.debug("Face recognition config updated dynamically")
def __download_models(self, path: str) -> None:
try:
file_name = os.path.basename(path)

View File

@ -40,23 +40,6 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
self.camera_current_cars: dict[str, list[str]] = {}
super().__init__(config, metrics)
CONFIG_UPDATE_TOPIC = "config/lpr"
def update_config(self, topic: str, payload: Any) -> None:
"""Update LPR config at runtime."""
if topic != self.CONFIG_UPDATE_TOPIC:
return
previous_min_area = self.config.lpr.min_area
self.config.lpr = payload
self.lpr_config = payload
for camera_config in self.config.cameras.values():
if camera_config.lpr.min_area == previous_min_area:
camera_config.lpr.min_area = payload.min_area
logger.debug("LPR config updated dynamically")
def process_frame(
self,
obj_data: dict[str, Any],

View File

@ -21,8 +21,7 @@ from frigate.const import (
REPLAY_DIR,
THUMB_DIR,
)
from frigate.models import Recordings
from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files
from frigate.models import Event, Recordings, ReviewSegment, Timeline
from frigate.util.config import find_config_file
logger = logging.getLogger(__name__)
@ -358,13 +357,43 @@ class DebugReplayManager:
def _cleanup_db(self, camera_name: str) -> None:
"""Defensively remove any database rows for the replay camera."""
cleanup_camera_db(camera_name)
try:
Event.delete().where(Event.camera == camera_name).execute()
except Exception as e:
logger.error("Failed to delete replay events: %s", e)
try:
Timeline.delete().where(Timeline.camera == camera_name).execute()
except Exception as e:
logger.error("Failed to delete replay timeline: %s", e)
try:
Recordings.delete().where(Recordings.camera == camera_name).execute()
except Exception as e:
logger.error("Failed to delete replay recordings: %s", e)
try:
ReviewSegment.delete().where(ReviewSegment.camera == camera_name).execute()
except Exception as e:
logger.error("Failed to delete replay review segments: %s", e)
def _cleanup_files(self, camera_name: str) -> None:
"""Remove filesystem artifacts for the replay camera."""
cleanup_camera_files(camera_name)
dirs_to_clean = [
os.path.join(RECORD_DIR, camera_name),
os.path.join(CLIPS_DIR, camera_name),
os.path.join(THUMB_DIR, camera_name),
]
# Remove replay-specific cache directory
for dir_path in dirs_to_clean:
if os.path.exists(dir_path):
try:
shutil.rmtree(dir_path)
logger.debug("Removed replay directory: %s", dir_path)
except Exception as e:
logger.error("Failed to remove %s: %s", dir_path, e)
# Remove replay clip and any related files
if os.path.exists(REPLAY_DIR):
try:
shutil.rmtree(REPLAY_DIR)

View File

@ -28,7 +28,6 @@ from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
from frigate.util.file import get_event_thumbnail_bytes
from .genai_embedding import GenAIEmbedding
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
from .onnx.jina_v2_embedding import JinaV2Embedding
@ -74,7 +73,6 @@ class Embeddings:
config: FrigateConfig,
db: SqliteVecQueueDatabase,
metrics: DataProcessorMetrics,
genai_manager=None,
) -> None:
self.config = config
self.db = db
@ -106,27 +104,7 @@ class Embeddings:
},
)
model_cfg = self.config.semantic_search.model
if not isinstance(model_cfg, SemanticSearchModelEnum):
# GenAI provider
embeddings_client = (
genai_manager.embeddings_client if genai_manager else None
)
if not embeddings_client:
raise ValueError(
f"semantic_search.model is '{model_cfg}' (GenAI provider) but "
"no embeddings client is configured. Ensure the GenAI provider "
"has 'embeddings' in its roles."
)
self.embedding = GenAIEmbedding(embeddings_client)
self.text_embedding = lambda input_data: self.embedding(
input_data, embedding_type="text"
)
self.vision_embedding = lambda input_data: self.embedding(
input_data, embedding_type="vision"
)
elif model_cfg == SemanticSearchModelEnum.jinav2:
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
# Single JinaV2Embedding instance for both text and vision
self.embedding = JinaV2Embedding(
model_size=self.config.semantic_search.model_size,
@ -140,8 +118,7 @@ class Embeddings:
self.vision_embedding = lambda input_data: self.embedding(
input_data, embedding_type="vision"
)
else:
# Default to jinav1
else: # Default to jinav1
self.text_embedding = JinaV1TextEmbedding(
model_size=config.semantic_search.model_size,
requestor=self.requestor,
@ -159,11 +136,8 @@ class Embeddings:
self.metrics.text_embeddings_eps.value = self.text_eps.eps()
def get_model_definitions(self):
model_cfg = self.config.semantic_search.model
if not isinstance(model_cfg, SemanticSearchModelEnum):
# GenAI provider: no ONNX models to download
models = []
elif model_cfg == SemanticSearchModelEnum.jinav2:
# Version-specific models
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
models = [
"jinaai/jina-clip-v2-tokenizer",
"jinaai/jina-clip-v2-model_fp16.onnx"
@ -338,12 +312,11 @@ class Embeddings:
# Get total count of events to process
total_events = Event.select().count()
if not isinstance(self.config.semantic_search.model, SemanticSearchModelEnum):
batch_size = 1
elif self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
batch_size = 4
else:
batch_size = 32
batch_size = (
4
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2
else 32
)
current_page = 1
totals = {

View File

@ -1,89 +0,0 @@
"""GenAI-backed embeddings for semantic search."""
import io
import logging
from typing import TYPE_CHECKING
import numpy as np
from PIL import Image
if TYPE_CHECKING:
from frigate.genai import GenAIClient
logger = logging.getLogger(__name__)
EMBEDDING_DIM = 768
class GenAIEmbedding:
"""Embedding adapter that delegates to a GenAI provider's embed API.
Provides the same interface as JinaV2Embedding for semantic search:
__call__(inputs, embedding_type) -> list[np.ndarray]. Output embeddings are
normalized to 768 dimensions for Frigate's sqlite-vec schema.
"""
def __init__(self, client: "GenAIClient") -> None:
self.client = client
def __call__(
self,
inputs: list[str] | list[bytes] | list[Image.Image],
embedding_type: str = "text",
) -> list[np.ndarray]:
"""Generate embeddings for text or images.
Args:
inputs: List of strings (text) or bytes/PIL images (vision).
embedding_type: "text" or "vision".
Returns:
List of 768-dim numpy float32 arrays.
"""
if not inputs:
return []
if embedding_type == "text":
texts = [str(x) for x in inputs]
embeddings = self.client.embed(texts=texts)
elif embedding_type == "vision":
images: list[bytes] = []
for inp in inputs:
if isinstance(inp, bytes):
images.append(inp)
elif isinstance(inp, Image.Image):
buf = io.BytesIO()
inp.convert("RGB").save(buf, format="JPEG")
images.append(buf.getvalue())
else:
logger.warning(
"GenAIEmbedding: skipping unsupported vision input type %s",
type(inp).__name__,
)
if not images:
return []
embeddings = self.client.embed(images=images)
else:
raise ValueError(
f"Invalid embedding_type '{embedding_type}'. Must be 'text' or 'vision'."
)
result = []
for emb in embeddings:
arr = np.asarray(emb, dtype=np.float32)
if arr.ndim > 1:
# Some providers return token-level embeddings; pool to one vector.
arr = arr.mean(axis=0)
arr = arr.flatten()
if arr.size != EMBEDDING_DIM:
if arr.size > EMBEDDING_DIM:
arr = arr[:EMBEDDING_DIM]
else:
arr = np.pad(
arr,
(0, EMBEDDING_DIM - arr.size),
mode="constant",
constant_values=0,
)
result.append(arr)
return result

View File

@ -96,7 +96,9 @@ class EmbeddingMaintainer(threading.Thread):
CameraConfigUpdateEnum.semantic_search,
],
)
self.enrichment_config_subscriber = ConfigSubscriber("config/")
self.classification_config_subscriber = ConfigSubscriber(
"config/classification/custom/"
)
# Configure Frigate DB
db = SqliteVecQueueDatabase(
@ -114,10 +116,8 @@ class EmbeddingMaintainer(threading.Thread):
models = [Event, Recordings, ReviewSegment, Trigger]
db.bind(models)
self.genai_manager = GenAIClientManager(config)
if config.semantic_search.enabled:
self.embeddings = Embeddings(config, db, metrics, self.genai_manager)
self.embeddings = Embeddings(config, db, metrics)
# Check if we need to re-index events
if config.semantic_search.reindex:
@ -144,6 +144,7 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager = SharedMemoryFrameManager()
self.detected_license_plates: dict[str, dict[str, Any]] = {}
self.genai_manager = GenAIClientManager(config)
# model runners to share between realtime and post processors
if self.config.lpr.enabled:
@ -271,7 +272,7 @@ class EmbeddingMaintainer(threading.Thread):
"""Maintain a SQLite-vec database for semantic search."""
while not self.stop_event.is_set():
self.config_updater.check_for_updates()
self._check_enrichment_config_updates()
self._check_classification_config_updates()
self._process_requests()
self._process_updates()
self._process_recordings_updates()
@ -282,7 +283,7 @@ class EmbeddingMaintainer(threading.Thread):
self._process_event_metadata()
self.config_updater.stop()
self.enrichment_config_subscriber.stop()
self.classification_config_subscriber.stop()
self.event_subscriber.stop()
self.event_end_subscriber.stop()
self.recordings_subscriber.stop()
@ -293,86 +294,67 @@ class EmbeddingMaintainer(threading.Thread):
self.requestor.stop()
logger.info("Exiting embeddings maintenance...")
def _check_enrichment_config_updates(self) -> None:
"""Check for enrichment config updates and delegate to processors."""
topic, payload = self.enrichment_config_subscriber.check_for_update()
def _check_classification_config_updates(self) -> None:
"""Check for classification config updates and add/remove processors."""
topic, model_config = self.classification_config_subscriber.check_for_update()
if topic is None:
return
if topic:
model_name = topic.split("/")[-1]
# Custom classification add/remove requires managing the processor list
if topic.startswith("config/classification/custom/"):
self._handle_custom_classification_update(topic, payload)
return
if model_config is None:
self.realtime_processors = [
processor
for processor in self.realtime_processors
if not (
isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
)
and processor.model_config.name == model_name
)
]
# Broadcast to all processors — each decides if the topic is relevant
for processor in self.realtime_processors:
processor.update_config(topic, payload)
logger.info(
f"Successfully removed classification processor for model: {model_name}"
)
else:
self.config.classification.custom[model_name] = model_config
for processor in self.post_processors:
processor.update_config(topic, payload)
def _handle_custom_classification_update(
self, topic: str, model_config: Any
) -> None:
"""Handle add/remove of custom classification processors."""
model_name = topic.split("/")[-1]
if model_config is None:
self.realtime_processors = [
processor
for processor in self.realtime_processors
if not (
isinstance(
# Check if processor already exists
for processor in self.realtime_processors:
if isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
):
if processor.model_config.name == model_name:
logger.debug(
f"Classification processor for model {model_name} already exists, skipping"
)
return
if model_config.state_config is not None:
processor = CustomStateClassificationProcessor(
self.config, model_config, self.requestor, self.metrics
)
and processor.model_config.name == model_name
else:
processor = CustomObjectClassificationProcessor(
self.config,
model_config,
self.event_metadata_publisher,
self.requestor,
self.metrics,
)
self.realtime_processors.append(processor)
logger.info(
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
)
]
logger.info(
f"Successfully removed classification processor for model: {model_name}"
)
return
self.config.classification.custom[model_name] = model_config
# Check if processor already exists
for processor in self.realtime_processors:
if isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
):
if processor.model_config.name == model_name:
logger.debug(
f"Classification processor for model {model_name} already exists, skipping"
)
return
if model_config.state_config is not None:
processor = CustomStateClassificationProcessor(
self.config, model_config, self.requestor, self.metrics
)
else:
processor = CustomObjectClassificationProcessor(
self.config,
model_config,
self.event_metadata_publisher,
self.requestor,
self.metrics,
)
self.realtime_processors.append(processor)
logger.info(
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
)
def _process_requests(self) -> None:
"""Process embeddings requests"""

View File

@ -7,7 +7,6 @@ import os
import re
from typing import Any, Optional
import numpy as np
from playhouse.shortcuts import model_to_dict
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
@ -305,25 +304,6 @@ Guidelines:
"""Get the context window size for this provider in tokens."""
return 4096
def embed(
self,
texts: list[str] | None = None,
images: list[bytes] | None = None,
) -> list[np.ndarray]:
"""Generate embeddings for text and/or images.
Returns list of numpy arrays (one per input). Expected dimension is 768
for Frigate semantic search compatibility.
Providers that support embeddings should override this method.
"""
logger.warning(
"%s does not support embeddings. "
"This method should be overridden by the provider implementation.",
self.__class__.__name__,
)
return []
def chat_with_tools(
self,
messages: list[dict[str, Any]],

View File

@ -1,15 +1,12 @@
"""llama.cpp Provider for Frigate AI."""
import base64
import io
import json
import logging
from typing import Any, Optional
import httpx
import numpy as np
import requests
from PIL import Image
from frigate.config import GenAIProviderEnum
from frigate.genai import GenAIClient, register_genai_provider
@ -18,20 +15,6 @@ from frigate.genai.utils import parse_tool_calls_from_message
logger = logging.getLogger(__name__)
def _to_jpeg(img_bytes: bytes) -> bytes | None:
"""Convert image bytes to JPEG. llama.cpp/STB does not support WebP."""
try:
img = Image.open(io.BytesIO(img_bytes))
if img.mode != "RGB":
img = img.convert("RGB")
buf = io.BytesIO()
img.save(buf, format="JPEG", quality=85)
return buf.getvalue()
except Exception as e:
logger.warning("Failed to convert image to JPEG: %s", e)
return None
@register_genai_provider(GenAIProviderEnum.llamacpp)
class LlamaCppClient(GenAIClient):
"""Generative AI client for Frigate using llama.cpp server."""
@ -193,110 +176,6 @@ class LlamaCppClient(GenAIClient):
)
return result if result else None
def embed(
self,
texts: list[str] | None = None,
images: list[bytes] | None = None,
) -> list[np.ndarray]:
"""Generate embeddings via llama.cpp /embeddings endpoint.
Supports batch requests. Uses content format with prompt_string and
multimodal_data for images (PR #15108). Server must be started with
--embeddings and --mmproj for multimodal support.
"""
if self.provider is None:
logger.warning(
"llama.cpp provider has not been initialized. Check your llama.cpp configuration."
)
return []
texts = texts or []
images = images or []
if not texts and not images:
return []
EMBEDDING_DIM = 768
content = []
for text in texts:
content.append({"prompt_string": text})
for img in images:
# llama.cpp uses STB which does not support WebP; convert to JPEG
jpeg_bytes = _to_jpeg(img)
to_encode = jpeg_bytes if jpeg_bytes is not None else img
encoded = base64.b64encode(to_encode).decode("utf-8")
# prompt_string must contain <__media__> placeholder for image tokenization
content.append(
{
"prompt_string": "<__media__>\n",
"multimodal_data": [encoded],
}
)
try:
response = requests.post(
f"{self.provider}/embeddings",
json={"model": self.genai_config.model, "content": content},
timeout=self.timeout,
)
response.raise_for_status()
result = response.json()
items = result.get("data", result) if isinstance(result, dict) else result
if not isinstance(items, list):
logger.warning("llama.cpp embeddings returned unexpected format")
return []
embeddings = []
for item in items:
emb = item.get("embedding") if isinstance(item, dict) else None
if emb is None:
logger.warning("llama.cpp embeddings item missing embedding field")
continue
arr = np.array(emb, dtype=np.float32)
if arr.ndim > 1:
# llama.cpp can return token-level embeddings; pool per item
arr = arr.mean(axis=0)
arr = arr.flatten()
orig_dim = arr.size
if orig_dim != EMBEDDING_DIM:
if orig_dim > EMBEDDING_DIM:
arr = arr[:EMBEDDING_DIM]
logger.debug(
"Truncated llama.cpp embedding from %d to %d dimensions",
orig_dim,
EMBEDDING_DIM,
)
else:
arr = np.pad(
arr,
(0, EMBEDDING_DIM - orig_dim),
mode="constant",
constant_values=0,
)
logger.debug(
"Padded llama.cpp embedding from %d to %d dimensions",
orig_dim,
EMBEDDING_DIM,
)
embeddings.append(arr)
return embeddings
except requests.exceptions.Timeout:
logger.warning("llama.cpp embeddings request timed out")
return []
except requests.exceptions.RequestException as e:
error_detail = str(e)
if hasattr(e, "response") and e.response is not None:
try:
error_detail = f"{str(e)} - Response: {e.response.text[:500]}"
except Exception:
pass
logger.warning("llama.cpp embeddings error: %s", error_detail)
return []
except Exception as e:
logger.warning("Unexpected error in llama.cpp embeddings: %s", str(e))
return []
def chat_with_tools(
self,
messages: list[dict[str, Any]],

View File

@ -1,6 +1,5 @@
"""Ollama Provider for Frigate AI."""
import json
import logging
from typing import Any, Optional
@ -109,22 +108,7 @@ class OllamaClient(GenAIClient):
if msg.get("name"):
msg_dict["name"] = msg["name"]
if msg.get("tool_calls"):
# Ollama requires tool call arguments as dicts, but the
# conversation format (OpenAI-style) stores them as JSON
# strings. Convert back to dicts for Ollama.
ollama_tool_calls = []
for tc in msg["tool_calls"]:
func = tc.get("function") or {}
args = func.get("arguments") or {}
if isinstance(args, str):
try:
args = json.loads(args)
except (json.JSONDecodeError, TypeError):
args = {}
ollama_tool_calls.append(
{"function": {"name": func.get("name", ""), "arguments": args}}
)
msg_dict["tool_calls"] = ollama_tool_calls
msg_dict["tool_calls"] = msg["tool_calls"]
request_messages.append(msg_dict)
request_params: dict[str, Any] = {
@ -136,27 +120,25 @@ class OllamaClient(GenAIClient):
request_params["stream"] = True
if tools:
request_params["tools"] = tools
if tool_choice:
request_params["tool_choice"] = (
"none"
if tool_choice == "none"
else "required"
if tool_choice == "required"
else "auto"
)
return request_params
def _message_from_response(self, response: dict[str, Any]) -> dict[str, Any]:
"""Parse Ollama chat response into {content, tool_calls, finish_reason}."""
if not response or "message" not in response:
logger.debug("Ollama response empty or missing 'message' key")
return {
"content": None,
"tool_calls": None,
"finish_reason": "error",
}
message = response["message"]
logger.debug(
"Ollama response message keys: %s, content_len=%s, thinking_len=%s, "
"tool_calls=%s, done=%s",
list(message.keys()) if hasattr(message, "keys") else "N/A",
len(message.get("content", "") or "") if message.get("content") else 0,
len(message.get("thinking", "") or "") if message.get("thinking") else 0,
bool(message.get("tool_calls")),
response.get("done"),
)
content = message.get("content", "").strip() if message.get("content") else None
tool_calls = parse_tool_calls_from_message(message)
finish_reason = "error"
@ -216,13 +198,7 @@ class OllamaClient(GenAIClient):
tools: Optional[list[dict[str, Any]]] = None,
tool_choice: Optional[str] = "auto",
):
"""Stream chat with tools; yields content deltas then final message.
When tools are provided, Ollama streaming does not include tool_calls
in the response chunks. To work around this, we use a non-streaming
call when tools are present to ensure tool calls are captured, then
emit the content as a single delta followed by the final message.
"""
"""Stream chat with tools; yields content deltas then final message."""
if self.provider is None:
logger.warning(
"Ollama provider has not been initialized. Check your Ollama configuration."
@ -237,27 +213,6 @@ class OllamaClient(GenAIClient):
)
return
try:
# Ollama does not return tool_calls in streaming mode, so fall
# back to a non-streaming call when tools are provided.
if tools:
logger.debug(
"Ollama: tools provided, using non-streaming call for tool support"
)
request_params = self._build_request_params(
messages, tools, tool_choice, stream=False
)
async_client = OllamaAsyncClient(
host=self.genai_config.base_url,
timeout=self.timeout,
)
response = await async_client.chat(**request_params)
result = self._message_from_response(response)
content = result.get("content")
if content:
yield ("content_delta", content)
yield ("message", result)
return
request_params = self._build_request_params(
messages, tools, tool_choice, stream=True
)
@ -278,10 +233,11 @@ class OllamaClient(GenAIClient):
yield ("content_delta", delta)
if chunk.get("done"):
full_content = "".join(content_parts).strip() or None
tool_calls = parse_tool_calls_from_message(msg)
final_message = {
"content": full_content,
"tool_calls": None,
"finish_reason": "stop",
"tool_calls": tool_calls,
"finish_reason": "tool_calls" if tool_calls else "stop",
}
break

View File

@ -23,26 +23,21 @@ def parse_tool_calls_from_message(
if not raw or not isinstance(raw, list):
return None
result = []
for idx, tool_call in enumerate(raw):
for tool_call in raw:
function_data = tool_call.get("function") or {}
raw_arguments = function_data.get("arguments") or {}
if isinstance(raw_arguments, dict):
arguments = raw_arguments
elif isinstance(raw_arguments, str):
try:
arguments = json.loads(raw_arguments)
except (json.JSONDecodeError, KeyError, TypeError) as e:
logger.warning(
"Failed to parse tool call arguments: %s, tool: %s",
e,
function_data.get("name", "unknown"),
)
arguments = {}
else:
try:
arguments_str = function_data.get("arguments") or "{}"
arguments = json.loads(arguments_str)
except (json.JSONDecodeError, KeyError, TypeError) as e:
logger.warning(
"Failed to parse tool call arguments: %s, tool: %s",
e,
function_data.get("name", "unknown"),
)
arguments = {}
result.append(
{
"id": tool_call.get("id", "") or f"call_{idx}",
"id": tool_call.get("id", ""),
"name": function_data.get("name", ""),
"arguments": arguments,
}

View File

@ -1,864 +0,0 @@
"""Motion search job management with background execution and parallel verification."""
import logging
import os
import threading
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
from dataclasses import asdict, dataclass, field
from datetime import datetime
from typing import Any, Optional
import cv2
import numpy as np
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.const import UPDATE_JOB_STATE
from frigate.jobs.job import Job
from frigate.jobs.manager import (
get_job_by_id,
set_current_job,
)
from frigate.models import Recordings
from frigate.types import JobStatusTypesEnum
logger = logging.getLogger(__name__)
# Constants
HEATMAP_GRID_SIZE = 16
@dataclass
class MotionSearchMetrics:
"""Metrics collected during motion search execution."""
segments_scanned: int = 0
segments_processed: int = 0
metadata_inactive_segments: int = 0
heatmap_roi_skip_segments: int = 0
fallback_full_range_segments: int = 0
frames_decoded: int = 0
wall_time_seconds: float = 0.0
segments_with_errors: int = 0
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary."""
return asdict(self)
@dataclass
class MotionSearchResult:
"""A single search result with timestamp and change info."""
timestamp: float
change_percentage: float
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary."""
return asdict(self)
@dataclass
class MotionSearchJob(Job):
"""Job state for motion search operations."""
job_type: str = "motion_search"
camera: str = ""
start_time_range: float = 0.0
end_time_range: float = 0.0
polygon_points: list[list[float]] = field(default_factory=list)
threshold: int = 30
min_area: float = 5.0
frame_skip: int = 5
parallel: bool = False
max_results: int = 25
# Track progress
total_frames_processed: int = 0
# Metrics for observability
metrics: Optional[MotionSearchMetrics] = None
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary for WebSocket transmission."""
d = asdict(self)
if self.metrics:
d["metrics"] = self.metrics.to_dict()
return d
def create_polygon_mask(
polygon_points: list[list[float]], frame_width: int, frame_height: int
) -> np.ndarray:
"""Create a binary mask from normalized polygon coordinates."""
motion_points = np.array(
[[int(p[0] * frame_width), int(p[1] * frame_height)] for p in polygon_points],
dtype=np.int32,
)
mask = np.zeros((frame_height, frame_width), dtype=np.uint8)
cv2.fillPoly(mask, [motion_points], 255)
return mask
def compute_roi_bbox_normalized(
polygon_points: list[list[float]],
) -> tuple[float, float, float, float]:
"""Compute the bounding box of the ROI in normalized coordinates (0-1).
Returns (x_min, y_min, x_max, y_max) in normalized coordinates.
"""
if not polygon_points:
return (0.0, 0.0, 1.0, 1.0)
x_coords = [p[0] for p in polygon_points]
y_coords = [p[1] for p in polygon_points]
return (min(x_coords), min(y_coords), max(x_coords), max(y_coords))
def heatmap_overlaps_roi(
heatmap: dict[str, int], roi_bbox: tuple[float, float, float, float]
) -> bool:
"""Check if a sparse motion heatmap has any overlap with the ROI bounding box.
Args:
heatmap: Sparse dict mapping cell index (str) to intensity (1-255).
roi_bbox: (x_min, y_min, x_max, y_max) in normalized coordinates (0-1).
Returns:
True if there is overlap (any active cell in the ROI region).
"""
if not isinstance(heatmap, dict):
# Invalid heatmap, assume overlap to be safe
return True
x_min, y_min, x_max, y_max = roi_bbox
# Convert normalized coordinates to grid cells (0-15)
grid_x_min = max(0, int(x_min * HEATMAP_GRID_SIZE))
grid_y_min = max(0, int(y_min * HEATMAP_GRID_SIZE))
grid_x_max = min(HEATMAP_GRID_SIZE - 1, int(x_max * HEATMAP_GRID_SIZE))
grid_y_max = min(HEATMAP_GRID_SIZE - 1, int(y_max * HEATMAP_GRID_SIZE))
# Check each cell in the ROI bbox
for y in range(grid_y_min, grid_y_max + 1):
for x in range(grid_x_min, grid_x_max + 1):
idx = str(y * HEATMAP_GRID_SIZE + x)
if idx in heatmap:
return True
return False
def segment_passes_activity_gate(recording: Recordings) -> bool:
"""Check if a segment passes the activity gate.
Returns True if any of motion, objects, or regions is non-zero/non-null.
Returns True if all are null (old segments without data).
"""
motion = recording.motion
objects = recording.objects
regions = recording.regions
# Old segments without metadata - pass through (conservative)
if motion is None and objects is None and regions is None:
return True
# Pass if any activity indicator is positive
return bool(motion) or bool(objects) or bool(regions)
def segment_passes_heatmap_gate(
recording: Recordings, roi_bbox: tuple[float, float, float, float]
) -> bool:
"""Check if a segment passes the heatmap overlap gate.
Returns True if:
- No heatmap is stored (old segments).
- The heatmap overlaps with the ROI bbox.
"""
heatmap = getattr(recording, "motion_heatmap", None)
if heatmap is None:
# No heatmap stored, fall back to activity gate
return True
return heatmap_overlaps_roi(heatmap, roi_bbox)
class MotionSearchRunner(threading.Thread):
"""Thread-based runner for motion search jobs with parallel verification."""
def __init__(
self,
job: MotionSearchJob,
config: FrigateConfig,
cancel_event: threading.Event,
) -> None:
super().__init__(daemon=True, name=f"motion_search_{job.id}")
self.job = job
self.config = config
self.cancel_event = cancel_event
self.internal_stop_event = threading.Event()
self.requestor = InterProcessRequestor()
self.metrics = MotionSearchMetrics()
self.job.metrics = self.metrics
# Worker cap: min(4, cpu_count)
cpu_count = os.cpu_count() or 1
self.max_workers = min(4, cpu_count)
def run(self) -> None:
"""Execute the motion search job."""
try:
self.job.status = JobStatusTypesEnum.running
self.job.start_time = datetime.now().timestamp()
self._broadcast_status()
results = self._execute_search()
if self.cancel_event.is_set():
self.job.status = JobStatusTypesEnum.cancelled
else:
self.job.status = JobStatusTypesEnum.success
self.job.results = {
"results": [r.to_dict() for r in results],
"total_frames_processed": self.job.total_frames_processed,
}
self.job.end_time = datetime.now().timestamp()
self.metrics.wall_time_seconds = self.job.end_time - self.job.start_time
self.job.metrics = self.metrics
logger.debug(
"Motion search job %s completed: status=%s, results=%d, frames=%d",
self.job.id,
self.job.status,
len(results),
self.job.total_frames_processed,
)
self._broadcast_status()
except Exception as e:
logger.exception("Motion search job %s failed: %s", self.job.id, e)
self.job.status = JobStatusTypesEnum.failed
self.job.error_message = str(e)
self.job.end_time = datetime.now().timestamp()
self.metrics.wall_time_seconds = self.job.end_time - (
self.job.start_time or 0
)
self.job.metrics = self.metrics
self._broadcast_status()
finally:
if self.requestor:
self.requestor.stop()
def _broadcast_status(self) -> None:
"""Broadcast job status update via IPC to WebSocket subscribers."""
if self.job.status == JobStatusTypesEnum.running and self.job.start_time:
self.metrics.wall_time_seconds = (
datetime.now().timestamp() - self.job.start_time
)
try:
self.requestor.send_data(UPDATE_JOB_STATE, self.job.to_dict())
except Exception as e:
logger.warning("Failed to broadcast motion search status: %s", e)
def _should_stop(self) -> bool:
"""Check if processing should stop due to cancellation or internal limits."""
return self.cancel_event.is_set() or self.internal_stop_event.is_set()
def _execute_search(self) -> list[MotionSearchResult]:
"""Main search execution logic."""
camera_name = self.job.camera
camera_config = self.config.cameras.get(camera_name)
if not camera_config:
raise ValueError(f"Camera {camera_name} not found")
frame_width = camera_config.detect.width
frame_height = camera_config.detect.height
# Create polygon mask
polygon_mask = create_polygon_mask(
self.job.polygon_points, frame_width, frame_height
)
if np.count_nonzero(polygon_mask) == 0:
logger.warning("Polygon mask is empty for job %s", self.job.id)
return []
# Compute ROI bbox in normalized coordinates for heatmap gate
roi_bbox = compute_roi_bbox_normalized(self.job.polygon_points)
# Query recordings
recordings = list(
Recordings.select()
.where(
(
Recordings.start_time.between(
self.job.start_time_range, self.job.end_time_range
)
)
| (
Recordings.end_time.between(
self.job.start_time_range, self.job.end_time_range
)
)
| (
(self.job.start_time_range > Recordings.start_time)
& (self.job.end_time_range < Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.order_by(Recordings.start_time.asc())
)
if not recordings:
logger.debug("No recordings found for motion search job %s", self.job.id)
return []
logger.debug(
"Motion search job %s: queried %d recording segments for camera %s "
"(range %.1f - %.1f)",
self.job.id,
len(recordings),
camera_name,
self.job.start_time_range,
self.job.end_time_range,
)
self.metrics.segments_scanned = len(recordings)
# Apply activity and heatmap gates
filtered_recordings = []
for recording in recordings:
if not segment_passes_activity_gate(recording):
self.metrics.metadata_inactive_segments += 1
self.metrics.segments_processed += 1
logger.debug(
"Motion search job %s: segment %s skipped by activity gate "
"(motion=%s, objects=%s, regions=%s)",
self.job.id,
recording.id,
recording.motion,
recording.objects,
recording.regions,
)
continue
if not segment_passes_heatmap_gate(recording, roi_bbox):
self.metrics.heatmap_roi_skip_segments += 1
self.metrics.segments_processed += 1
logger.debug(
"Motion search job %s: segment %s skipped by heatmap gate "
"(heatmap present=%s, roi_bbox=%s)",
self.job.id,
recording.id,
recording.motion_heatmap is not None,
roi_bbox,
)
continue
filtered_recordings.append(recording)
self._broadcast_status()
# Fallback: if all segments were filtered out, scan all segments
# This allows motion search to find things the detector missed
if not filtered_recordings and recordings:
logger.info(
"All %d segments filtered by gates, falling back to full scan",
len(recordings),
)
self.metrics.fallback_full_range_segments = len(recordings)
filtered_recordings = recordings
logger.debug(
"Motion search job %s: %d/%d segments passed gates "
"(activity_skipped=%d, heatmap_skipped=%d)",
self.job.id,
len(filtered_recordings),
len(recordings),
self.metrics.metadata_inactive_segments,
self.metrics.heatmap_roi_skip_segments,
)
if self.job.parallel:
return self._search_motion_parallel(filtered_recordings, polygon_mask)
return self._search_motion_sequential(filtered_recordings, polygon_mask)
def _search_motion_parallel(
self,
recordings: list[Recordings],
polygon_mask: np.ndarray,
) -> list[MotionSearchResult]:
"""Search for motion in parallel across segments, streaming results."""
all_results: list[MotionSearchResult] = []
total_frames = 0
next_recording_idx_to_merge = 0
logger.debug(
"Motion search job %s: starting motion search with %d workers "
"across %d segments",
self.job.id,
self.max_workers,
len(recordings),
)
# Initialize partial results on the job so they stream to the frontend
self.job.results = {"results": [], "total_frames_processed": 0}
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
futures: dict[Future, int] = {}
completed_segments: dict[int, tuple[list[MotionSearchResult], int]] = {}
for idx, recording in enumerate(recordings):
if self._should_stop():
break
future = executor.submit(
self._process_recording_for_motion,
recording.path,
recording.start_time,
recording.end_time,
self.job.start_time_range,
self.job.end_time_range,
polygon_mask,
self.job.threshold,
self.job.min_area,
self.job.frame_skip,
)
futures[future] = idx
for future in as_completed(futures):
if self._should_stop():
# Cancel remaining futures
for f in futures:
f.cancel()
break
recording_idx = futures[future]
recording = recordings[recording_idx]
try:
results, frames = future.result()
self.metrics.segments_processed += 1
completed_segments[recording_idx] = (results, frames)
while next_recording_idx_to_merge in completed_segments:
segment_results, segment_frames = completed_segments.pop(
next_recording_idx_to_merge
)
all_results.extend(segment_results)
total_frames += segment_frames
self.job.total_frames_processed = total_frames
self.metrics.frames_decoded = total_frames
if segment_results:
deduped = self._deduplicate_results(all_results)
self.job.results = {
"results": [
r.to_dict() for r in deduped[: self.job.max_results]
],
"total_frames_processed": total_frames,
}
self._broadcast_status()
if segment_results and len(deduped) >= self.job.max_results:
self.internal_stop_event.set()
for pending_future in futures:
pending_future.cancel()
break
next_recording_idx_to_merge += 1
if self.internal_stop_event.is_set():
break
except Exception as e:
self.metrics.segments_processed += 1
self.metrics.segments_with_errors += 1
self._broadcast_status()
logger.warning(
"Error processing segment %s: %s",
recording.path,
e,
)
self.job.total_frames_processed = total_frames
self.metrics.frames_decoded = total_frames
logger.debug(
"Motion search job %s: motion search complete, "
"found %d raw results, decoded %d frames, %d segment errors",
self.job.id,
len(all_results),
total_frames,
self.metrics.segments_with_errors,
)
# Sort and deduplicate results
all_results.sort(key=lambda x: x.timestamp)
return self._deduplicate_results(all_results)[: self.job.max_results]
def _search_motion_sequential(
self,
recordings: list[Recordings],
polygon_mask: np.ndarray,
) -> list[MotionSearchResult]:
"""Search for motion sequentially across segments, streaming results."""
all_results: list[MotionSearchResult] = []
total_frames = 0
logger.debug(
"Motion search job %s: starting sequential motion search across %d segments",
self.job.id,
len(recordings),
)
self.job.results = {"results": [], "total_frames_processed": 0}
for recording in recordings:
if self.cancel_event.is_set():
break
try:
results, frames = self._process_recording_for_motion(
recording.path,
recording.start_time,
recording.end_time,
self.job.start_time_range,
self.job.end_time_range,
polygon_mask,
self.job.threshold,
self.job.min_area,
self.job.frame_skip,
)
all_results.extend(results)
total_frames += frames
self.job.total_frames_processed = total_frames
self.metrics.frames_decoded = total_frames
self.metrics.segments_processed += 1
if results:
all_results.sort(key=lambda x: x.timestamp)
deduped = self._deduplicate_results(all_results)[
: self.job.max_results
]
self.job.results = {
"results": [r.to_dict() for r in deduped],
"total_frames_processed": total_frames,
}
self._broadcast_status()
if results and len(deduped) >= self.job.max_results:
break
except Exception as e:
self.metrics.segments_processed += 1
self.metrics.segments_with_errors += 1
self._broadcast_status()
logger.warning("Error processing segment %s: %s", recording.path, e)
self.job.total_frames_processed = total_frames
self.metrics.frames_decoded = total_frames
logger.debug(
"Motion search job %s: sequential motion search complete, "
"found %d raw results, decoded %d frames, %d segment errors",
self.job.id,
len(all_results),
total_frames,
self.metrics.segments_with_errors,
)
all_results.sort(key=lambda x: x.timestamp)
return self._deduplicate_results(all_results)[: self.job.max_results]
def _deduplicate_results(
self, results: list[MotionSearchResult], min_gap: float = 1.0
) -> list[MotionSearchResult]:
"""Deduplicate results that are too close together."""
if not results:
return results
deduplicated: list[MotionSearchResult] = []
last_timestamp = 0.0
for result in results:
if result.timestamp - last_timestamp >= min_gap:
deduplicated.append(result)
last_timestamp = result.timestamp
return deduplicated
def _process_recording_for_motion(
self,
recording_path: str,
recording_start: float,
recording_end: float,
search_start: float,
search_end: float,
polygon_mask: np.ndarray,
threshold: int,
min_area: float,
frame_skip: int,
) -> tuple[list[MotionSearchResult], int]:
"""Process a single recording file for motion detection.
This method is designed to be called from a thread pool.
Args:
min_area: Minimum change area as a percentage of the ROI (0-100).
"""
results: list[MotionSearchResult] = []
frames_processed = 0
if not os.path.exists(recording_path):
logger.warning("Recording file not found: %s", recording_path)
return results, frames_processed
cap = cv2.VideoCapture(recording_path)
if not cap.isOpened():
logger.error("Could not open recording: %s", recording_path)
return results, frames_processed
try:
fps = cap.get(cv2.CAP_PROP_FPS) or 30.0
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
recording_duration = recording_end - recording_start
# Calculate frame range
start_offset = max(0, search_start - recording_start)
end_offset = min(recording_duration, search_end - recording_start)
start_frame = int(start_offset * fps)
end_frame = int(end_offset * fps)
start_frame = max(0, min(start_frame, total_frames - 1))
end_frame = max(0, min(end_frame, total_frames))
if start_frame >= end_frame:
return results, frames_processed
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
# Get ROI bounding box
roi_bbox = cv2.boundingRect(polygon_mask)
roi_x, roi_y, roi_w, roi_h = roi_bbox
prev_frame_gray = None
frame_step = max(frame_skip, 1)
frame_idx = start_frame
while frame_idx < end_frame:
if self._should_stop():
break
ret, frame = cap.read()
if not ret:
frame_idx += 1
continue
if (frame_idx - start_frame) % frame_step != 0:
frame_idx += 1
continue
frames_processed += 1
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Handle frame dimension changes
if gray.shape != polygon_mask.shape:
resized_mask = cv2.resize(
polygon_mask, (gray.shape[1], gray.shape[0]), cv2.INTER_NEAREST
)
current_bbox = cv2.boundingRect(resized_mask)
else:
resized_mask = polygon_mask
current_bbox = roi_bbox
roi_x, roi_y, roi_w, roi_h = current_bbox
cropped_gray = gray[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
cropped_mask = resized_mask[
roi_y : roi_y + roi_h, roi_x : roi_x + roi_w
]
cropped_mask_area = np.count_nonzero(cropped_mask)
if cropped_mask_area == 0:
frame_idx += 1
continue
# Convert percentage to pixel count for this ROI
min_area_pixels = int((min_area / 100.0) * cropped_mask_area)
masked_gray = cv2.bitwise_and(
cropped_gray, cropped_gray, mask=cropped_mask
)
if prev_frame_gray is not None:
diff = cv2.absdiff(prev_frame_gray, masked_gray)
diff_blurred = cv2.GaussianBlur(diff, (3, 3), 0)
_, thresh = cv2.threshold(
diff_blurred, threshold, 255, cv2.THRESH_BINARY
)
thresh_dilated = cv2.dilate(thresh, None, iterations=1)
thresh_masked = cv2.bitwise_and(
thresh_dilated, thresh_dilated, mask=cropped_mask
)
change_pixels = cv2.countNonZero(thresh_masked)
if change_pixels > min_area_pixels:
contours, _ = cv2.findContours(
thresh_masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
total_change_area = sum(
cv2.contourArea(c)
for c in contours
if cv2.contourArea(c) >= min_area_pixels
)
if total_change_area > 0:
frame_time_offset = (frame_idx - start_frame) / fps
timestamp = (
recording_start + start_offset + frame_time_offset
)
change_percentage = (
total_change_area / cropped_mask_area
) * 100
results.append(
MotionSearchResult(
timestamp=timestamp,
change_percentage=round(change_percentage, 2),
)
)
prev_frame_gray = masked_gray
frame_idx += 1
finally:
cap.release()
logger.debug(
"Motion search segment complete: %s, %d frames processed, %d results found",
recording_path,
frames_processed,
len(results),
)
return results, frames_processed
# Module-level state for managing per-camera jobs
_motion_search_jobs: dict[str, tuple[MotionSearchJob, threading.Event]] = {}
_jobs_lock = threading.Lock()
def stop_all_motion_search_jobs() -> None:
"""Cancel all running motion search jobs for clean shutdown."""
with _jobs_lock:
for job_id, (job, cancel_event) in _motion_search_jobs.items():
if job.status in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running):
cancel_event.set()
logger.debug("Signalling motion search job %s to stop", job_id)
def start_motion_search_job(
config: FrigateConfig,
camera_name: str,
start_time: float,
end_time: float,
polygon_points: list[list[float]],
threshold: int = 30,
min_area: float = 5.0,
frame_skip: int = 5,
parallel: bool = False,
max_results: int = 25,
) -> str:
"""Start a new motion search job.
Returns the job ID.
"""
job = MotionSearchJob(
camera=camera_name,
start_time_range=start_time,
end_time_range=end_time,
polygon_points=polygon_points,
threshold=threshold,
min_area=min_area,
frame_skip=frame_skip,
parallel=parallel,
max_results=max_results,
)
cancel_event = threading.Event()
with _jobs_lock:
_motion_search_jobs[job.id] = (job, cancel_event)
set_current_job(job)
runner = MotionSearchRunner(job, config, cancel_event)
runner.start()
logger.debug(
"Started motion search job %s for camera %s: "
"time_range=%.1f-%.1f, threshold=%d, min_area=%.1f%%, "
"frame_skip=%d, parallel=%s, max_results=%d, polygon_points=%d vertices",
job.id,
camera_name,
start_time,
end_time,
threshold,
min_area,
frame_skip,
parallel,
max_results,
len(polygon_points),
)
return job.id
def get_motion_search_job(job_id: str) -> Optional[MotionSearchJob]:
"""Get a motion search job by ID."""
with _jobs_lock:
job_entry = _motion_search_jobs.get(job_id)
if job_entry:
return job_entry[0]
# Check completed jobs via manager
return get_job_by_id("motion_search", job_id)
def cancel_motion_search_job(job_id: str) -> bool:
"""Cancel a motion search job.
Returns True if cancellation was initiated, False if job not found.
"""
with _jobs_lock:
job_entry = _motion_search_jobs.get(job_id)
if not job_entry:
return False
job, cancel_event = job_entry
if job.status not in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running):
# Already finished
return True
cancel_event.set()
job.status = JobStatusTypesEnum.cancelled
job_payload = job.to_dict()
logger.info("Cancelled motion search job %s", job_id)
requestor: Optional[InterProcessRequestor] = None
try:
requestor = InterProcessRequestor()
requestor.send_data(UPDATE_JOB_STATE, job_payload)
except Exception as e:
logger.warning(
"Failed to broadcast cancelled motion search job %s: %s", job_id, e
)
finally:
if requestor:
requestor.stop()
return True

View File

@ -78,7 +78,6 @@ class Recordings(Model):
dBFS = IntegerField(null=True)
segment_size = FloatField(default=0) # this should be stored as MB
regions = IntegerField(null=True)
motion_heatmap = JSONField(null=True) # 16x16 grid, 256 values (0-255)
class ExportCase(Model):

View File

@ -176,32 +176,11 @@ class ImprovedMotionDetector(MotionDetector):
motion_boxes = []
pct_motion = 0
# skip motion entirely if the scene change percentage exceeds configured
# threshold. this is useful to ignore lighting storms, IR mode switches,
# etc. rather than registering them as brief motion and then recalibrating.
# note: skipping means the frame is dropped and **no recording will be
# created**, which could hide a legitimate object if the camera is actively
# autotracking. the alternative is to allow motion and accept a small
# recording that can be reviewed in the timeline. disabled by default (None).
if (
self.config.skip_motion_threshold is not None
and pct_motion > self.config.skip_motion_threshold
):
# force a recalibration so we transition to the new background
self.calibrating = True
return []
# once the motion is less than 5% and the number of contours is < 4, assume its calibrated
if pct_motion < 0.05 and len(motion_boxes) <= 4:
self.calibrating = False
# if calibrating or the motion contours are > 80% of the image area
# (lightning, ir, ptz) recalibrate. the lightning threshold does **not**
# stop motion detection entirely; it simply halts additional processing for
# the current frame once the percentage crosses the threshold. this helps
# reduce false positive object detections and CPU usage during highmotion
# events. recordings continue to be generated because users expect data
# while a PTZ camera is moving.
# if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate
if self.calibrating or pct_motion > self.config.lightning_threshold:
self.calibrating = True

View File

@ -273,13 +273,17 @@ class BirdsEyeFrameManager:
stop_event: mp.Event,
):
self.config = config
self.mode = config.birdseye.mode
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
self.canvas = Canvas(width, height, config.birdseye.layout.scaling_factor)
self.stop_event = stop_event
self.last_refresh_time = 0
self.inactivity_threshold = config.birdseye.inactivity_threshold
if config.birdseye.layout.max_cameras:
self.last_refresh_time = 0
# initialize the frame as black and with the Frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
@ -422,7 +426,7 @@ class BirdsEyeFrameManager:
and self.config.cameras[cam].enabled
and cam_data["last_active_frame"] > 0
and cam_data["current_frame_time"] - cam_data["last_active_frame"]
< self.config.birdseye.inactivity_threshold
< self.inactivity_threshold
]
)
logger.debug(f"Active cameras: {active_cameras}")

View File

@ -15,7 +15,6 @@ from ws4py.server.wsgirefserver import (
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.ws import WebSocket
from frigate.config import FrigateConfig
@ -139,7 +138,6 @@ class OutputProcess(FrigateProcess):
CameraConfigUpdateEnum.record,
],
)
birdseye_config_subscriber = ConfigSubscriber("config/birdseye", exact=True)
jsmpeg_cameras: dict[str, JsmpegCamera] = {}
birdseye: Birdseye | None = None
@ -169,20 +167,6 @@ class OutputProcess(FrigateProcess):
websocket_thread.start()
while not self.stop_event.is_set():
update_topic, birdseye_config = (
birdseye_config_subscriber.check_for_update()
)
if update_topic is not None:
previous_global_mode = self.config.birdseye.mode
self.config.birdseye = birdseye_config
for camera_config in self.config.cameras.values():
if camera_config.birdseye.mode == previous_global_mode:
camera_config.birdseye.mode = birdseye_config.mode
logger.debug("Applied dynamic birdseye config update")
# check if there is an updated config
updates = config_subscriber.check_for_updates()
@ -313,7 +297,6 @@ class OutputProcess(FrigateProcess):
birdseye.stop()
config_subscriber.stop()
birdseye_config_subscriber.stop()
websocket_server.manager.close_all()
websocket_server.manager.stop()
websocket_server.manager.join()

View File

@ -50,13 +50,11 @@ class SegmentInfo:
active_object_count: int,
region_count: int,
average_dBFS: int,
motion_heatmap: dict[str, int] | None = None,
) -> None:
self.motion_count = motion_count
self.active_object_count = active_object_count
self.region_count = region_count
self.average_dBFS = average_dBFS
self.motion_heatmap = motion_heatmap
def should_discard_segment(self, retain_mode: RetainModeEnum) -> bool:
keep = False
@ -456,59 +454,6 @@ class RecordingMaintainer(threading.Thread):
if end_time < retain_cutoff:
self.drop_segment(cache_path)
def _compute_motion_heatmap(
self, camera: str, motion_boxes: list[tuple[int, int, int, int]]
) -> dict[str, int] | None:
"""Compute a 16x16 motion intensity heatmap from motion boxes.
Returns a sparse dict mapping cell index (as string) to intensity (1-255).
Only cells with motion are included.
Args:
camera: Camera name to get detect dimensions from.
motion_boxes: List of (x1, y1, x2, y2) pixel coordinates.
Returns:
Sparse dict like {"45": 3, "46": 5}, or None if no boxes.
"""
if not motion_boxes:
return None
camera_config = self.config.cameras.get(camera)
if not camera_config:
return None
frame_width = camera_config.detect.width
frame_height = camera_config.detect.height
if frame_width <= 0 or frame_height <= 0:
return None
GRID_SIZE = 16
counts: dict[int, int] = {}
for box in motion_boxes:
if len(box) < 4:
continue
x1, y1, x2, y2 = box
# Convert pixel coordinates to grid cells
grid_x1 = max(0, int((x1 / frame_width) * GRID_SIZE))
grid_y1 = max(0, int((y1 / frame_height) * GRID_SIZE))
grid_x2 = min(GRID_SIZE - 1, int((x2 / frame_width) * GRID_SIZE))
grid_y2 = min(GRID_SIZE - 1, int((y2 / frame_height) * GRID_SIZE))
for y in range(grid_y1, grid_y2 + 1):
for x in range(grid_x1, grid_x2 + 1):
idx = y * GRID_SIZE + x
counts[idx] = min(255, counts.get(idx, 0) + 1)
if not counts:
return None
# Convert to string keys for JSON storage
return {str(k): v for k, v in counts.items()}
def segment_stats(
self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime
) -> SegmentInfo:
@ -516,8 +461,6 @@ class RecordingMaintainer(threading.Thread):
active_count = 0
region_count = 0
motion_count = 0
all_motion_boxes: list[tuple[int, int, int, int]] = []
for frame in self.object_recordings_info[camera]:
# frame is after end time of segment
if frame[0] > end_time.timestamp():
@ -536,8 +479,6 @@ class RecordingMaintainer(threading.Thread):
)
motion_count += len(frame[2])
region_count += len(frame[3])
# Collect motion boxes for heatmap computation
all_motion_boxes.extend(frame[2])
audio_values = []
for frame in self.audio_recordings_info[camera]:
@ -557,14 +498,8 @@ class RecordingMaintainer(threading.Thread):
average_dBFS = 0 if not audio_values else np.average(audio_values)
motion_heatmap = self._compute_motion_heatmap(camera, all_motion_boxes)
return SegmentInfo(
motion_count,
active_count,
region_count,
round(average_dBFS),
motion_heatmap,
motion_count, active_count, region_count, round(average_dBFS)
)
async def move_segment(
@ -655,7 +590,6 @@ class RecordingMaintainer(threading.Thread):
Recordings.regions.name: segment_info.region_count,
Recordings.dBFS.name: segment_info.average_dBFS,
Recordings.segment_size.name: segment_size,
Recordings.motion_heatmap.name: segment_info.motion_heatmap,
}
except Exception as e:
logger.error(f"Unable to store recording segment {cache_path}")

View File

@ -1,261 +0,0 @@
"""Tests for the config_set endpoint's wildcard camera propagation."""
import os
import tempfile
import unittest
from unittest.mock import MagicMock, Mock, patch
import ruamel.yaml
from frigate.config import FrigateConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdatePublisher,
CameraConfigUpdateTopic,
)
from frigate.models import Event, Recordings, ReviewSegment
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
class TestConfigSetWildcardPropagation(BaseTestHttp):
"""Test that wildcard camera updates fan out to all cameras."""
def setUp(self):
super().setUp(models=[Event, Recordings, ReviewSegment])
self.minimal_config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"front_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
},
"back_yard": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 720,
"width": 1280,
"fps": 10,
},
},
},
}
def _create_app_with_publisher(self):
"""Create app with a mocked config publisher."""
from fastapi import Request
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
from frigate.api.fastapi_app import create_fastapi_app
mock_publisher = Mock(spec=CameraConfigUpdatePublisher)
mock_publisher.publisher = MagicMock()
app = create_fastapi_app(
FrigateConfig(**self.minimal_config),
self.db,
None,
None,
None,
None,
None,
None,
mock_publisher,
None,
enforce_default_admin=False,
)
async def mock_get_current_user(request: Request):
username = request.headers.get("remote-user")
role = request.headers.get("remote-role")
return {"username": username, "role": role}
async def mock_get_allowed_cameras_for_filter(request: Request):
return list(self.minimal_config.get("cameras", {}).keys())
app.dependency_overrides[get_current_user] = mock_get_current_user
app.dependency_overrides[get_allowed_cameras_for_filter] = (
mock_get_allowed_cameras_for_filter
)
return app, mock_publisher
def _write_config_file(self):
"""Write the minimal config to a temp YAML file and return the path."""
yaml = ruamel.yaml.YAML()
f = tempfile.NamedTemporaryFile(mode="w", suffix=".yml", delete=False)
yaml.dump(self.minimal_config, f)
f.close()
return f.name
@patch("frigate.api.app.find_config_file")
def test_wildcard_detect_update_fans_out_to_all_cameras(self, mock_find_config):
"""config/cameras/*/detect fans out to all cameras."""
config_path = self._write_config_file()
mock_find_config.return_value = config_path
try:
app, mock_publisher = self._create_app_with_publisher()
with AuthTestClient(app) as client:
resp = client.put(
"/config/set",
json={
"config_data": {"detect": {"fps": 15}},
"update_topic": "config/cameras/*/detect",
"requires_restart": 0,
},
)
self.assertEqual(resp.status_code, 200)
data = resp.json()
self.assertTrue(data["success"])
# Verify publish_update called for each camera
self.assertEqual(mock_publisher.publish_update.call_count, 2)
published_cameras = set()
for c in mock_publisher.publish_update.call_args_list:
topic = c[0][0]
self.assertIsInstance(topic, CameraConfigUpdateTopic)
self.assertEqual(topic.update_type, CameraConfigUpdateEnum.detect)
published_cameras.add(topic.camera)
self.assertEqual(published_cameras, {"front_door", "back_yard"})
# Global publisher should NOT be called for wildcard
mock_publisher.publisher.publish.assert_not_called()
finally:
os.unlink(config_path)
@patch("frigate.api.app.find_config_file")
def test_wildcard_motion_update_fans_out(self, mock_find_config):
"""config/cameras/*/motion fans out to all cameras."""
config_path = self._write_config_file()
mock_find_config.return_value = config_path
try:
app, mock_publisher = self._create_app_with_publisher()
with AuthTestClient(app) as client:
resp = client.put(
"/config/set",
json={
"config_data": {"motion": {"threshold": 30}},
"update_topic": "config/cameras/*/motion",
"requires_restart": 0,
},
)
self.assertEqual(resp.status_code, 200)
published_cameras = set()
for c in mock_publisher.publish_update.call_args_list:
topic = c[0][0]
self.assertEqual(topic.update_type, CameraConfigUpdateEnum.motion)
published_cameras.add(topic.camera)
self.assertEqual(published_cameras, {"front_door", "back_yard"})
finally:
os.unlink(config_path)
@patch("frigate.api.app.find_config_file")
def test_camera_specific_topic_only_updates_one_camera(self, mock_find_config):
"""config/cameras/front_door/detect only updates front_door."""
config_path = self._write_config_file()
mock_find_config.return_value = config_path
try:
app, mock_publisher = self._create_app_with_publisher()
with AuthTestClient(app) as client:
resp = client.put(
"/config/set",
json={
"config_data": {
"cameras": {"front_door": {"detect": {"fps": 20}}}
},
"update_topic": "config/cameras/front_door/detect",
"requires_restart": 0,
},
)
self.assertEqual(resp.status_code, 200)
# Only one camera updated
self.assertEqual(mock_publisher.publish_update.call_count, 1)
topic = mock_publisher.publish_update.call_args[0][0]
self.assertEqual(topic.camera, "front_door")
self.assertEqual(topic.update_type, CameraConfigUpdateEnum.detect)
# Global publisher should NOT be called
mock_publisher.publisher.publish.assert_not_called()
finally:
os.unlink(config_path)
@patch("frigate.api.app.find_config_file")
def test_wildcard_sends_merged_per_camera_config(self, mock_find_config):
"""Wildcard fan-out sends each camera's own merged config."""
config_path = self._write_config_file()
mock_find_config.return_value = config_path
try:
app, mock_publisher = self._create_app_with_publisher()
with AuthTestClient(app) as client:
resp = client.put(
"/config/set",
json={
"config_data": {"detect": {"fps": 15}},
"update_topic": "config/cameras/*/detect",
"requires_restart": 0,
},
)
self.assertEqual(resp.status_code, 200)
for c in mock_publisher.publish_update.call_args_list:
camera_detect_config = c[0][1]
self.assertIsNotNone(camera_detect_config)
self.assertTrue(hasattr(camera_detect_config, "fps"))
finally:
os.unlink(config_path)
@patch("frigate.api.app.find_config_file")
def test_non_camera_global_topic_uses_generic_publish(self, mock_find_config):
"""Non-camera topics (e.g. config/live) use the generic publisher."""
config_path = self._write_config_file()
mock_find_config.return_value = config_path
try:
app, mock_publisher = self._create_app_with_publisher()
with AuthTestClient(app) as client:
resp = client.put(
"/config/set",
json={
"config_data": {"live": {"height": 720}},
"update_topic": "config/live",
"requires_restart": 0,
},
)
self.assertEqual(resp.status_code, 200)
# Global topic publisher called
mock_publisher.publisher.publish.assert_called_once()
# Camera-level publish_update NOT called
mock_publisher.publish_update.assert_not_called()
finally:
os.unlink(config_path)
if __name__ == "__main__":
unittest.main()

View File

@ -1,91 +0,0 @@
import unittest
import numpy as np
from frigate.config.camera.motion import MotionConfig
from frigate.motion.improved_motion import ImprovedMotionDetector
class TestImprovedMotionDetector(unittest.TestCase):
def setUp(self):
# small frame for testing; actual frames are grayscale
self.frame_shape = (100, 100) # height, width
self.config = MotionConfig()
# motion detector assumes a rasterized_mask attribute exists on config
# when update_mask() is called; add one manually by bypassing pydantic.
object.__setattr__(
self.config,
"rasterized_mask",
np.ones((self.frame_shape[0], self.frame_shape[1]), dtype=np.uint8),
)
# create minimal PTZ metrics stub to satisfy detector checks
class _Stub:
def __init__(self, value=False):
self.value = value
def is_set(self):
return bool(self.value)
class DummyPTZ:
def __init__(self):
self.autotracker_enabled = _Stub(False)
self.motor_stopped = _Stub(False)
self.stop_time = _Stub(0)
self.detector = ImprovedMotionDetector(
self.frame_shape, self.config, fps=30, ptz_metrics=DummyPTZ()
)
# establish a baseline frame (all zeros)
base_frame = np.zeros(
(self.frame_shape[0], self.frame_shape[1]), dtype=np.uint8
)
self.detector.detect(base_frame)
def _half_change_frame(self) -> np.ndarray:
"""Produce a frame where roughly half of the pixels are different."""
frame = np.zeros((self.frame_shape[0], self.frame_shape[1]), dtype=np.uint8)
# flip the top half to white
frame[: self.frame_shape[0] // 2, :] = 255
return frame
def test_skip_motion_threshold_default(self):
"""With the default (None) setting, motion should always be reported."""
frame = self._half_change_frame()
boxes = self.detector.detect(frame)
self.assertTrue(
boxes, "Expected motion boxes when skip threshold is unset (disabled)"
)
def test_skip_motion_threshold_applied(self):
"""Setting a low skip threshold should prevent any boxes from being returned."""
# change the config and update the detector reference
self.config.skip_motion_threshold = 0.4
self.detector.config = self.config
self.detector.update_mask()
frame = self._half_change_frame()
boxes = self.detector.detect(frame)
self.assertEqual(
boxes,
[],
"Motion boxes should be empty when scene change exceeds skip threshold",
)
def test_skip_motion_threshold_does_not_affect_calibration(self):
"""Even when skipping, the detector should go into calibrating state."""
self.config.skip_motion_threshold = 0.4
self.detector.config = self.config
self.detector.update_mask()
frame = self._half_change_frame()
_ = self.detector.detect(frame)
self.assertTrue(
self.detector.calibrating,
"Detector should be in calibrating state after skip event",
)
if __name__ == "__main__":
unittest.main()

View File

@ -70,24 +70,6 @@ class TestCameraProfileConfig(unittest.TestCase):
profile = CameraProfileConfig()
assert profile.enabled is None
def test_zones_field(self):
"""Profile with zones override."""
profile = CameraProfileConfig(
zones={
"driveway": {
"coordinates": "0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9",
"objects": ["car"],
}
}
)
assert profile.zones is not None
assert "driveway" in profile.zones
def test_zones_default_none(self):
"""Zones defaults to None when not set."""
profile = CameraProfileConfig()
assert profile.zones is None
def test_none_sections_not_in_dump(self):
"""Sections left as None should not appear in exclude_unset dump."""
profile = CameraProfileConfig(detect={"enabled": False})
@ -398,81 +380,6 @@ class TestProfileManager(unittest.TestCase):
self.manager.activate_profile(None)
assert self.config.cameras["front"].enabled is True
@patch.object(ProfileManager, "_persist_active_profile")
def test_activate_profile_adds_zone(self, mock_persist):
"""Profile with zones adds/overrides zones on camera."""
from frigate.config.camera.profile import CameraProfileConfig
from frigate.config.camera.zone import ZoneConfig
self.config.cameras["front"].profiles["away"] = CameraProfileConfig(
zones={
"driveway": ZoneConfig(
coordinates="0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9",
objects=["car"],
)
}
)
self.manager = ProfileManager(self.config, self.mock_updater)
assert "driveway" not in self.config.cameras["front"].zones
err = self.manager.activate_profile("away")
assert err is None
assert "driveway" in self.config.cameras["front"].zones
@patch.object(ProfileManager, "_persist_active_profile")
def test_deactivate_restores_zones(self, mock_persist):
"""Deactivating a profile restores base zones."""
from frigate.config.camera.profile import CameraProfileConfig
from frigate.config.camera.zone import ZoneConfig
self.config.cameras["front"].profiles["away"] = CameraProfileConfig(
zones={
"driveway": ZoneConfig(
coordinates="0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9",
objects=["car"],
)
}
)
self.manager = ProfileManager(self.config, self.mock_updater)
self.manager.activate_profile("away")
assert "driveway" in self.config.cameras["front"].zones
self.manager.activate_profile(None)
assert "driveway" not in self.config.cameras["front"].zones
@patch.object(ProfileManager, "_persist_active_profile")
def test_zones_zmq_published(self, mock_persist):
"""ZMQ update is published for zones change."""
from frigate.config.camera.profile import CameraProfileConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateTopic,
)
from frigate.config.camera.zone import ZoneConfig
self.config.cameras["front"].profiles["away"] = CameraProfileConfig(
zones={
"driveway": ZoneConfig(
coordinates="0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9",
objects=["car"],
)
}
)
self.manager = ProfileManager(self.config, self.mock_updater)
self.mock_updater.reset_mock()
self.manager.activate_profile("away")
zones_calls = [
call
for call in self.mock_updater.publish_update.call_args_list
if call[0][0]
== CameraConfigUpdateTopic(CameraConfigUpdateEnum.zones, "front")
]
assert len(zones_calls) == 1
@patch.object(ProfileManager, "_persist_active_profile")
def test_enabled_zmq_published(self, mock_persist):
"""ZMQ update is published for enabled state change."""

View File

@ -1,153 +0,0 @@
"""Utilities for cleaning up camera data from database and filesystem."""
import glob
import logging
import os
import shutil
from frigate.const import CLIPS_DIR, RECORD_DIR, THUMB_DIR
from frigate.models import (
Event,
Export,
Previews,
Recordings,
Regions,
ReviewSegment,
Timeline,
Trigger,
)
logger = logging.getLogger(__name__)
def cleanup_camera_db(
camera_name: str, delete_exports: bool = False
) -> tuple[dict[str, int], list[str]]:
"""Remove all database rows for a camera.
Args:
camera_name: The camera name to clean up
delete_exports: Whether to also delete export records
Returns:
Tuple of (deletion counts dict, list of export file paths to remove)
"""
counts: dict[str, int] = {}
export_paths: list[str] = []
try:
counts["events"] = Event.delete().where(Event.camera == camera_name).execute()
except Exception as e:
logger.error("Failed to delete events for camera %s: %s", camera_name, e)
try:
counts["timeline"] = (
Timeline.delete().where(Timeline.camera == camera_name).execute()
)
except Exception as e:
logger.error("Failed to delete timeline for camera %s: %s", camera_name, e)
try:
counts["recordings"] = (
Recordings.delete().where(Recordings.camera == camera_name).execute()
)
except Exception as e:
logger.error("Failed to delete recordings for camera %s: %s", camera_name, e)
try:
counts["review_segments"] = (
ReviewSegment.delete().where(ReviewSegment.camera == camera_name).execute()
)
except Exception as e:
logger.error(
"Failed to delete review segments for camera %s: %s", camera_name, e
)
try:
counts["previews"] = (
Previews.delete().where(Previews.camera == camera_name).execute()
)
except Exception as e:
logger.error("Failed to delete previews for camera %s: %s", camera_name, e)
try:
counts["regions"] = (
Regions.delete().where(Regions.camera == camera_name).execute()
)
except Exception as e:
logger.error("Failed to delete regions for camera %s: %s", camera_name, e)
try:
counts["triggers"] = (
Trigger.delete().where(Trigger.camera == camera_name).execute()
)
except Exception as e:
logger.error("Failed to delete triggers for camera %s: %s", camera_name, e)
if delete_exports:
try:
exports = Export.select(Export.video_path, Export.thumb_path).where(
Export.camera == camera_name
)
for export in exports:
export_paths.append(export.video_path)
export_paths.append(export.thumb_path)
counts["exports"] = (
Export.delete().where(Export.camera == camera_name).execute()
)
except Exception as e:
logger.error("Failed to delete exports for camera %s: %s", camera_name, e)
return counts, export_paths
def cleanup_camera_files(
camera_name: str, export_paths: list[str] | None = None
) -> None:
"""Remove filesystem artifacts for a camera.
Args:
camera_name: The camera name to clean up
export_paths: Optional list of export file paths to remove
"""
dirs_to_clean = [
os.path.join(RECORD_DIR, camera_name),
os.path.join(CLIPS_DIR, camera_name),
os.path.join(THUMB_DIR, camera_name),
os.path.join(CLIPS_DIR, "previews", camera_name),
]
for dir_path in dirs_to_clean:
if os.path.exists(dir_path):
try:
shutil.rmtree(dir_path)
logger.debug("Removed directory: %s", dir_path)
except Exception as e:
logger.error("Failed to remove %s: %s", dir_path, e)
# Remove event snapshot files
for snapshot in glob.glob(os.path.join(CLIPS_DIR, f"{camera_name}-*.jpg")):
try:
os.remove(snapshot)
except Exception as e:
logger.error("Failed to remove snapshot %s: %s", snapshot, e)
# Remove review thumbnail files
for thumb in glob.glob(
os.path.join(CLIPS_DIR, "review", f"thumb-{camera_name}-*.webp")
):
try:
os.remove(thumb)
except Exception as e:
logger.error("Failed to remove review thumbnail %s: %s", thumb, e)
# Remove export files if requested
if export_paths:
for path in export_paths:
if path and os.path.exists(path):
try:
os.remove(path)
logger.debug("Removed export file: %s", path)
except Exception as e:
logger.error("Failed to remove export file %s: %s", path, e)

View File

@ -151,9 +151,7 @@ def sync_recordings(
max_inserts = 1000
for batch in chunked(recordings_to_delete, max_inserts):
RecordingsToDelete.insert_many(
[{"id": r["id"]} for r in batch]
).execute()
RecordingsToDelete.insert_many(batch).execute()
try:
deleted = (

View File

@ -110,7 +110,6 @@ def ensure_torch_dependencies() -> bool:
"pip",
"install",
"--break-system-packages",
"setuptools<81",
"torch",
"torchvision",
],

View File

@ -214,11 +214,7 @@ class CameraWatchdog(threading.Thread):
self.config_subscriber = CameraConfigUpdateSubscriber(
None,
{config.name: config},
[
CameraConfigUpdateEnum.enabled,
CameraConfigUpdateEnum.ffmpeg,
CameraConfigUpdateEnum.record,
],
[CameraConfigUpdateEnum.enabled, CameraConfigUpdateEnum.record],
)
self.requestor = InterProcessRequestor()
self.was_enabled = self.config.enabled
@ -258,13 +254,9 @@ class CameraWatchdog(threading.Thread):
self._last_record_status = status
self._last_status_update_time = now
def _check_config_updates(self) -> dict[str, list[str]]:
"""Check for config updates and return the update dict."""
return self.config_subscriber.check_for_updates()
def _update_enabled_state(self) -> bool:
"""Fetch the latest config and update enabled state."""
self._check_config_updates()
self.config_subscriber.check_for_updates()
return self.config.enabled
def reset_capture_thread(
@ -325,24 +317,7 @@ class CameraWatchdog(threading.Thread):
# 1 second watchdog loop
while not self.stop_event.wait(1):
updates = self._check_config_updates()
# Handle ffmpeg config changes by restarting all ffmpeg processes
if "ffmpeg" in updates and self.config.enabled:
self.logger.debug(
"FFmpeg config updated for %s, restarting ffmpeg processes",
self.config.name,
)
self.stop_all_ffmpeg()
self.start_all_ffmpeg()
self.latest_valid_segment_time = 0
self.latest_invalid_segment_time = 0
self.latest_cache_segment_time = 0
self.record_enable_time = datetime.now().astimezone(timezone.utc)
last_restart_time = datetime.now().timestamp()
continue
enabled = self.config.enabled
enabled = self._update_enabled_state()
if enabled != self.was_enabled:
if enabled:
self.logger.debug(f"Enabling camera {self.config.name}")

View File

@ -1,34 +0,0 @@
"""Peewee migrations -- 035_add_motion_heatmap.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import peewee as pw
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql('ALTER TABLE "recordings" ADD COLUMN "motion_heatmap" TEXT NULL')
def rollback(migrator, database, fake=False, **kwargs):
pass

63
web/package-lock.json generated
View File

@ -22,7 +22,6 @@
"@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.2",
"@radix-ui/react-popover": "^1.1.6",
"@radix-ui/react-progress": "^1.1.8",
"@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.6",
@ -2923,68 +2922,6 @@
}
}
},
"node_modules/@radix-ui/react-progress": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.8.tgz",
"integrity": "sha512-+gISHcSPUJ7ktBy9RnTqbdKW78bcGke3t6taawyZ71pio1JewwGSJizycs7rLhGTvMJYCQB1DBK4KQsxs7U8dA==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-context": "1.1.3",
"@radix-ui/react-primitive": "2.1.4"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-context": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.3.tgz",
"integrity": "sha512-ieIFACdMpYfMEjF0rEf5KLvfVyIkOz6PDGyNnP+u+4xQ6jny3VCgA4OgXOwNx2aUkxn8zx9fiVcM8CfFYv9Lxw==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-primitive": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz",
"integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-slot": "1.2.4"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-radio-group": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz",

View File

@ -28,7 +28,6 @@
"@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.2",
"@radix-ui/react-popover": "^1.1.6",
"@radix-ui/react-progress": "^1.1.8",
"@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.6",

View File

@ -264,11 +264,7 @@
},
"lightning_threshold": {
"label": "Lightning threshold",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events."
},
"skip_motion_threshold": {
"label": "Skip motion threshold",
"description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0."
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)."
},
"improve_contrast": {
"label": "Improve contrast",
@ -868,8 +864,7 @@
"description": "A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used."
},
"enabled": {
"label": "Enabled",
"description": "Enable or disable this zone. Disabled zones are ignored at runtime."
"label": "Whether this zone is active. Disabled zones are ignored at runtime."
},
"enabled_in_config": {
"label": "Keep track of original state of zone."

View File

@ -1391,11 +1391,7 @@
},
"lightning_threshold": {
"label": "Lightning threshold",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0). This does not prevent motion detection entirely; it merely causes the detector to stop analyzing additional frames once the threshold is exceeded. Motion-based recordings are still created during these events."
},
"skip_motion_threshold": {
"label": "Skip motion threshold",
"description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0."
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)."
},
"improve_contrast": {
"label": "Improve contrast",

View File

@ -61,25 +61,5 @@
"detected": "detected",
"normalActivity": "Normal",
"needsReview": "Needs review",
"securityConcern": "Security concern",
"motionSearch": {
"menuItem": "Motion search",
"openMenu": "Camera options"
},
"motionPreviews": {
"menuItem": "View motion previews",
"title": "Motion previews: {{camera}}",
"mobileSettingsTitle": "Motion Preview Settings",
"mobileSettingsDesc": "Adjust playback speed and dimming, and choose a date to review motion-only clips.",
"dim": "Dim",
"dimAria": "Adjust dimming intensity",
"dimDesc": "Increase dimming to increase motion area visibility.",
"speed": "Speed",
"speedAria": "Select preview playback speed",
"speedDesc": "Choose how quickly preview clips play.",
"back": "Back",
"empty": "No previews available",
"noPreview": "Preview unavailable",
"seekAria": "Seek {{camera}} player to {{time}}"
}
"securityConcern": "Security concern"
}

View File

@ -1,75 +0,0 @@
{
"documentTitle": "Motion Search - Frigate",
"title": "Motion Search",
"description": "Draw a polygon to define the region of interest, and specify a time range to search for motion changes within that region.",
"selectCamera": "Motion Search is loading",
"startSearch": "Start Search",
"searchStarted": "Search started",
"searchCancelled": "Search cancelled",
"cancelSearch": "Cancel",
"searching": "Search in progress.",
"searchComplete": "Search complete",
"noResultsYet": "Run a search to find motion changes in the selected region",
"noChangesFound": "No pixel changes detected in the selected region",
"changesFound_one": "Found {{count}} motion change",
"changesFound_other": "Found {{count}} motion changes",
"framesProcessed": "{{count}} frames processed",
"jumpToTime": "Jump to this time",
"results": "Results",
"showSegmentHeatmap": "Heatmap",
"newSearch": "New Search",
"clearResults": "Clear Results",
"clearROI": "Clear polygon",
"polygonControls": {
"points_one": "{{count}} point",
"points_other": "{{count}} points",
"undo": "Undo last point",
"reset": "Reset polygon"
},
"motionHeatmapLabel": "Motion Heatmap",
"dialog": {
"title": "Motion Search",
"cameraLabel": "Camera",
"previewAlt": "Camera preview for {{camera}}"
},
"timeRange": {
"title": "Search Range",
"start": "Start time",
"end": "End time"
},
"settings": {
"title": "Search Settings",
"parallelMode": "Parallel mode",
"parallelModeDesc": "Scan multiple recording segments at the same time (faster, but significantly more CPU intensive)",
"threshold": "Sensitivity Threshold",
"thresholdDesc": "Lower values detect smaller changes (1-255)",
"minArea": "Minimum Change Area",
"minAreaDesc": "Minimum percentage of the region of interest that must change to be considered significant",
"frameSkip": "Frame Skip",
"frameSkipDesc": "Process every Nth frame. Set this to your camera's frame rate to process one frame per second (e.g. 5 for a 5 FPS camera, 30 for a 30 FPS camera). Higher values will be faster, but may miss short motion events.",
"maxResults": "Maximum Results",
"maxResultsDesc": "Stop after this many matching timestamps"
},
"errors": {
"noCamera": "Please select a camera",
"noROI": "Please draw a region of interest",
"noTimeRange": "Please select a time range",
"invalidTimeRange": "End time must be after start time",
"searchFailed": "Search failed: {{message}}",
"polygonTooSmall": "Polygon must have at least 3 points",
"unknown": "Unknown error"
},
"changePercentage": "{{percentage}}% changed",
"metrics": {
"title": "Search Metrics",
"segmentsScanned": "Segments scanned",
"segmentsProcessed": "Processed",
"segmentsSkippedInactive": "Skipped (no activity)",
"segmentsSkippedHeatmap": "Skipped (no ROI overlap)",
"fallbackFullRange": "Fallback full-range scan",
"framesDecoded": "Frames decoded",
"wallTime": "Search time",
"segmentErrors": "Segment errors",
"seconds": "{{seconds}}s"
}
}

View File

@ -83,8 +83,7 @@
"triggers": "Triggers",
"debug": "Debug",
"frigateplus": "Frigate+",
"mediaSync": "Media sync",
"regionGrid": "Region grid"
"maintenance": "Maintenance"
},
"dialog": {
"unsavedChanges": {
@ -422,18 +421,6 @@
"cameraManagement": {
"title": "Manage Cameras",
"addCamera": "Add New Camera",
"deleteCamera": "Delete Camera",
"deleteCameraDialog": {
"title": "Delete Camera",
"description": "Deleting a camera will permanently remove all recordings, tracked objects, and configuration for that camera. Any go2rtc streams associated with this camera may still need to be manually removed.",
"selectPlaceholder": "Choose camera...",
"confirmTitle": "Are you sure?",
"confirmWarning": "Deleting <strong>{{cameraName}}</strong> cannot be undone.",
"deleteExports": "Also delete exports for this camera",
"confirmButton": "Delete Permanently",
"success": "Camera {{cameraName}} deleted successfully",
"error": "Failed to delete camera {{cameraName}}"
},
"editCamera": "Edit Camera:",
"selectCamera": "Select a Camera",
"backToSettings": "Back to Camera Settings",
@ -1245,16 +1232,6 @@
"previews": "Previews",
"exports": "Exports",
"recordings": "Recordings"
},
"regionGrid": {
"title": "Region Grid",
"desc": "The region grid is an optimization that learns where objects of different sizes typically appear in each camera's field of view. Frigate uses this data to efficiently size detection regions. The grid is automatically built over time from tracked object data.",
"clear": "Clear region grid",
"clearConfirmTitle": "Clear Region Grid",
"clearConfirmDesc": "Clearing the region grid is not recommended unless you have recently changed your detector model size or have changed your camera's physical position and are having object tracking issues. The grid will be automatically rebuilt over time as objects are tracked. A Frigate restart is required for changes to take effect.",
"clearSuccess": "Region grid cleared successfully",
"clearError": "Failed to clear region grid",
"restartRequired": "Restart required for region grid changes to take effect"
}
},
"configForm": {

View File

@ -11,6 +11,7 @@ import { Redirect } from "./components/navigation/Redirect";
import { cn } from "./lib/utils";
import { isPWA } from "./utils/isPWA";
import ProtectedRoute from "@/components/auth/ProtectedRoute";
import { AuthProvider } from "@/context/auth-context";
import useSWR from "swr";
import { FrigateConfig } from "./types/frigateConfig";
import ActivityIndicator from "@/components/indicators/activity-indicator";
@ -38,11 +39,13 @@ function App() {
return (
<Providers>
<BrowserRouter basename={window.baseUrl}>
<Wrapper>
{config?.safe_mode ? <SafeAppView /> : <DefaultAppView />}
</Wrapper>
</BrowserRouter>
<AuthProvider>
<BrowserRouter basename={window.baseUrl}>
<Wrapper>
{config?.safe_mode ? <SafeAppView /> : <DefaultAppView />}
</Wrapper>
</BrowserRouter>
</AuthProvider>
</Providers>
);
}
@ -82,13 +85,17 @@ function DefaultAppView() {
: "bottom-8 left-[52px]",
)}
>
<Suspense
fallback={
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
}
>
<Suspense>
<Routes>
<Route element={<ProtectedRoute requiredRoles={mainRouteRoles} />}>
<Route
element={
mainRouteRoles ? (
<ProtectedRoute requiredRoles={mainRouteRoles} />
) : (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
)
}
>
<Route index element={<Live />} />
<Route path="/review" element={<Events />} />
<Route path="/explore" element={<Explore />} />

View File

@ -10,7 +10,7 @@ import {
export default function ProtectedRoute({
requiredRoles,
}: {
requiredRoles?: string[];
requiredRoles: string[];
}) {
const { auth } = useContext(AuthContext);
@ -36,13 +36,6 @@ export default function ProtectedRoute({
);
}
// Wait for config to provide required roles
if (!requiredRoles) {
return (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
);
}
if (auth.isLoading) {
return (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />

View File

@ -25,7 +25,14 @@ const audio: SectionConfigOverrides = {
},
},
global: {
restartRequired: ["num_threads"],
restartRequired: [
"enabled",
"listen",
"filters",
"min_volume",
"max_not_heard",
"num_threads",
],
},
camera: {
restartRequired: ["num_threads"],

View File

@ -28,7 +28,10 @@ const birdseye: SectionConfigOverrides = {
"width",
"height",
"quality",
"mode",
"layout.scaling_factor",
"inactivity_threshold",
"layout.max_cameras",
"idle_heartbeat_fps",
],
uiSchema: {

View File

@ -3,7 +3,7 @@ import type { SectionConfigOverrides } from "./types";
const classification: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/custom_classification/object_classification",
restartRequired: ["bird.enabled"],
restartRequired: ["bird.enabled", "bird.threshold"],
hiddenFields: ["custom"],
advancedFields: [],
},

View File

@ -31,21 +31,18 @@ const detect: SectionConfigOverrides = {
},
global: {
restartRequired: [
"fps",
"enabled",
"width",
"height",
"fps",
"min_initialized",
"max_disappeared",
"annotation_offset",
"stationary",
],
},
camera: {
restartRequired: [
"fps",
"width",
"height",
"min_initialized",
"max_disappeared",
],
restartRequired: ["width", "height", "min_initialized", "max_disappeared"],
},
};

View File

@ -32,7 +32,18 @@ const faceRecognition: SectionConfigOverrides = {
"blur_confidence_filter",
"device",
],
restartRequired: ["enabled", "model_size", "device"],
restartRequired: [
"enabled",
"model_size",
"unknown_score",
"detection_threshold",
"recognition_threshold",
"min_area",
"min_faces",
"save_attempts",
"blur_confidence_filter",
"device",
],
},
};

View File

@ -116,7 +116,16 @@ const ffmpeg: SectionConfigOverrides = {
},
},
global: {
restartRequired: [],
restartRequired: [
"path",
"global_args",
"hwaccel_args",
"input_args",
"output_args",
"retry_interval",
"apple_compatibility",
"gpu",
],
fieldOrder: [
"hwaccel_args",
"path",
@ -153,7 +162,17 @@ const ffmpeg: SectionConfigOverrides = {
fieldGroups: {
cameraFfmpeg: ["input_args", "hwaccel_args", "output_args"],
},
restartRequired: [],
restartRequired: [
"inputs",
"path",
"global_args",
"hwaccel_args",
"input_args",
"output_args",
"retry_interval",
"apple_compatibility",
"gpu",
],
},
};

View File

@ -40,7 +40,21 @@ const lpr: SectionConfigOverrides = {
"device",
"replace_rules",
],
restartRequired: ["model_size", "enhancement", "device"],
restartRequired: [
"enabled",
"model_size",
"detection_threshold",
"min_area",
"recognition_threshold",
"min_plate_length",
"format",
"match_distance",
"known_plates",
"enhancement",
"debug_save_plates",
"device",
"replace_rules",
],
uiSchema: {
format: {
"ui:options": { size: "md" },

View File

@ -3,18 +3,11 @@ import type { SectionConfigOverrides } from "./types";
const motion: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/motion_detection",
fieldDocs: {
lightning_threshold:
"/configuration/motion_detection#lightning_threshold",
skip_motion_threshold:
"/configuration/motion_detection#skip_motion_on_large_scene_changes",
},
restartRequired: [],
fieldOrder: [
"enabled",
"threshold",
"lightning_threshold",
"skip_motion_threshold",
"improve_contrast",
"contour_area",
"delta_alpha",
@ -26,20 +19,9 @@ const motion: SectionConfigOverrides = {
sensitivity: ["enabled", "threshold", "contour_area"],
algorithm: ["improve_contrast", "delta_alpha", "frame_alpha"],
},
uiSchema: {
skip_motion_threshold: {
"ui:widget": "optionalField",
"ui:options": {
innerWidget: "range",
step: 0.05,
suppressMultiSchema: true,
},
},
},
hiddenFields: ["enabled_in_config", "mask", "raw_mask"],
advancedFields: [
"lightning_threshold",
"skip_motion_threshold",
"delta_alpha",
"frame_alpha",
"frame_height",
@ -47,7 +29,17 @@ const motion: SectionConfigOverrides = {
],
},
global: {
restartRequired: ["frame_height"],
restartRequired: [
"enabled",
"threshold",
"lightning_threshold",
"improve_contrast",
"contour_area",
"delta_alpha",
"frame_alpha",
"frame_height",
"mqtt_off_delay",
],
},
camera: {
restartRequired: ["frame_height"],
@ -74,7 +66,7 @@ const motion: SectionConfigOverrides = {
"frame_alpha",
"frame_height",
],
advancedFields: ["lightning_threshold", "skip_motion_threshold"],
advancedFields: ["lightning_threshold"],
},
};

View File

@ -83,7 +83,7 @@ const objects: SectionConfigOverrides = {
},
},
global: {
restartRequired: [],
restartRequired: ["track", "alert", "detect", "filters", "genai"],
hiddenFields: [
"enabled_in_config",
"mask",

View File

@ -29,7 +29,16 @@ const record: SectionConfigOverrides = {
},
},
global: {
restartRequired: [],
restartRequired: [
"enabled",
"expire_interval",
"continuous",
"motion",
"alerts",
"detections",
"preview",
"export",
],
},
camera: {
restartRequired: [],

View File

@ -44,7 +44,7 @@ const review: SectionConfigOverrides = {
},
},
global: {
restartRequired: [],
restartRequired: ["alerts", "detections", "genai"],
},
camera: {
restartRequired: [],

View File

@ -27,7 +27,14 @@ const snapshots: SectionConfigOverrides = {
},
},
global: {
restartRequired: [],
restartRequired: [
"enabled",
"bounding_box",
"crop",
"quality",
"timestamp",
"retain",
],
hiddenFields: ["enabled_in_config", "required_zones"],
},
camera: {

View File

@ -3,7 +3,14 @@ import type { SectionConfigOverrides } from "./types";
const telemetry: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/reference",
restartRequired: ["version_check"],
restartRequired: [
"network_interfaces",
"stats.amd_gpu_stats",
"stats.intel_gpu_stats",
"stats.intel_gpu_device",
"stats.network_bandwidth",
"version_check",
],
fieldOrder: ["network_interfaces", "stats", "version_check"],
advancedFields: [],
},

View File

@ -56,7 +56,6 @@ import ActivityIndicator from "@/components/indicators/activity-indicator";
import { StatusBarMessagesContext } from "@/context/statusbar-provider";
import {
cameraUpdateTopicMap,
globalCameraDefaultSections,
buildOverrides,
buildConfigDataForPath,
sanitizeSectionData as sharedSanitizeSectionData,
@ -235,10 +234,7 @@ export function ConfigSection({
? cameraUpdateTopicMap[sectionPath]
? `config/cameras/${cameraName}/${cameraUpdateTopicMap[sectionPath]}`
: undefined
: globalCameraDefaultSections.has(sectionPath) &&
cameraUpdateTopicMap[sectionPath]
? `config/cameras/*/${cameraUpdateTopicMap[sectionPath]}`
: `config/${sectionPath}`;
: `config/${sectionPath}`;
// Default: show title for camera level (since it might be collapsible), hide for global
const shouldShowTitle = showTitle ?? effectiveLevel === "camera";
@ -831,7 +827,7 @@ export function ConfigSection({
<div
className={cn(
"w-full border-t border-secondary bg-background pt-0",
"w-full border-t border-secondary bg-background pb-5 pt-0",
!noStickyButtons && "sticky bottom-0 z-50",
)}
>

View File

@ -26,7 +26,6 @@ import { FfmpegArgsWidget } from "./widgets/FfmpegArgsWidget";
import { InputRolesWidget } from "./widgets/InputRolesWidget";
import { TimezoneSelectWidget } from "./widgets/TimezoneSelectWidget";
import { CameraPathWidget } from "./widgets/CameraPathWidget";
import { OptionalFieldWidget } from "./widgets/OptionalFieldWidget";
import { FieldTemplate } from "./templates/FieldTemplate";
import { ObjectFieldTemplate } from "./templates/ObjectFieldTemplate";
@ -74,7 +73,6 @@ export const frigateTheme: FrigateTheme = {
audioLabels: AudioLabelSwitchesWidget,
zoneNames: ZoneSwitchesWidget,
timezoneSelect: TimezoneSelectWidget,
optionalField: OptionalFieldWidget,
},
templates: {
FieldTemplate: FieldTemplate as React.ComponentType<FieldTemplateProps>,

View File

@ -1,64 +0,0 @@
// Optional Field Widget - wraps any inner widget with an enable/disable switch
// Used for nullable fields where None means "disabled" (not the same as 0)
import type { WidgetProps } from "@rjsf/utils";
import { getWidget } from "@rjsf/utils";
import { Switch } from "@/components/ui/switch";
import { cn } from "@/lib/utils";
import { getNonNullSchema } from "../fields/nullableUtils";
export function OptionalFieldWidget(props: WidgetProps) {
const { id, value, disabled, readonly, onChange, schema, options, registry } =
props;
const innerWidgetName = (options.innerWidget as string) || undefined;
const isEnabled = value !== undefined && value !== null;
// Extract the non-null branch from anyOf [Type, null]
const innerSchema = getNonNullSchema(schema) ?? schema;
const InnerWidget = getWidget(innerSchema, innerWidgetName, registry.widgets);
const getDefaultValue = () => {
if (innerSchema.default !== undefined && innerSchema.default !== null) {
return innerSchema.default;
}
if (innerSchema.minimum !== undefined) {
return innerSchema.minimum;
}
if (innerSchema.type === "integer" || innerSchema.type === "number") {
return 0;
}
if (innerSchema.type === "string") {
return "";
}
return 0;
};
const handleToggle = (checked: boolean) => {
onChange(checked ? getDefaultValue() : undefined);
};
const innerProps: WidgetProps = {
...props,
schema: innerSchema,
disabled: disabled || readonly || !isEnabled,
value: isEnabled ? value : getDefaultValue(),
};
return (
<div className="flex items-center gap-3">
<Switch
id={`${id}-toggle`}
checked={isEnabled}
disabled={disabled || readonly}
onCheckedChange={handleToggle}
/>
<div
className={cn("flex-1", !isEnabled && "pointer-events-none opacity-40")}
>
<InnerWidget {...innerProps} />
</div>
</div>
);
}

View File

@ -20,7 +20,7 @@ export default function ActionsDropdown({
const { t } = useTranslation(["components/dialog", "views/replay", "common"]);
return (
<DropdownMenu modal={false}>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
className="flex items-center gap-2"

View File

@ -7,16 +7,10 @@ import axios from "axios";
import { useSWRConfig } from "swr";
import { toast } from "sonner";
import { Trans, useTranslation } from "react-i18next";
import { LuExternalLink, LuInfo, LuMinus, LuPlus } from "react-icons/lu";
import { LuInfo } from "react-icons/lu";
import { cn } from "@/lib/utils";
import { isMobile } from "react-device-detect";
import { useIsAdmin } from "@/hooks/use-is-admin";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { Link } from "react-router-dom";
const OFFSET_MIN = -2500;
const OFFSET_MAX = 2500;
const OFFSET_STEP = 50;
type Props = {
className?: string;
@ -25,7 +19,6 @@ type Props = {
export default function AnnotationOffsetSlider({ className }: Props) {
const { annotationOffset, setAnnotationOffset, camera } = useDetailStream();
const isAdmin = useIsAdmin();
const { getLocaleDocUrl } = useDocDomain();
const { mutate } = useSWRConfig();
const { t } = useTranslation(["views/explore"]);
const [isSaving, setIsSaving] = useState(false);
@ -39,16 +32,6 @@ export default function AnnotationOffsetSlider({ className }: Props) {
[setAnnotationOffset],
);
const stepOffset = useCallback(
(delta: number) => {
setAnnotationOffset((prev) => {
const next = prev + delta;
return Math.max(OFFSET_MIN, Math.min(OFFSET_MAX, next));
});
},
[setAnnotationOffset],
);
const reset = useCallback(() => {
setAnnotationOffset(0);
}, [setAnnotationOffset]);
@ -89,18 +72,11 @@ export default function AnnotationOffsetSlider({ className }: Props) {
return (
<div
className={cn(
"flex flex-col gap-1.5",
"flex flex-col gap-0.5",
isMobile && "landscape:gap-3",
className,
)}
>
<div className="flex items-center gap-2 text-sm">
<span>{t("trackingDetails.annotationSettings.offset.label")}:</span>
<span className="font-mono tabular-nums text-primary-variant">
{annotationOffset > 0 ? "+" : ""}
{annotationOffset}ms
</span>
</div>
<div
className={cn(
"flex items-center gap-3",
@ -108,81 +84,57 @@ export default function AnnotationOffsetSlider({ className }: Props) {
"landscape:flex-col landscape:items-start landscape:gap-4",
)}
>
<Button
type="button"
variant="outline"
size="icon"
className="size-8 shrink-0"
aria-label="-50ms"
onClick={() => stepOffset(-OFFSET_STEP)}
disabled={annotationOffset <= OFFSET_MIN}
>
<LuMinus className="size-4" />
</Button>
<div className="flex max-w-28 flex-row items-center gap-2 text-sm md:max-w-48">
<span className="max-w-24 md:max-w-44">
{t("trackingDetails.annotationSettings.offset.label")}:
</span>
<span className="text-primary-variant">{annotationOffset}</span>
</div>
<div className="w-full flex-1 landscape:flex">
<Slider
value={[annotationOffset]}
min={OFFSET_MIN}
max={OFFSET_MAX}
step={OFFSET_STEP}
min={-2500}
max={2500}
step={50}
onValueChange={handleChange}
/>
</div>
<Button
type="button"
variant="outline"
size="icon"
className="size-8 shrink-0"
aria-label="+50ms"
onClick={() => stepOffset(OFFSET_STEP)}
disabled={annotationOffset >= OFFSET_MAX}
>
<LuPlus className="size-4" />
</Button>
<div className="flex items-center gap-2">
<Button size="sm" variant="ghost" onClick={reset}>
{t("button.reset", { ns: "common" })}
</Button>
{isAdmin && (
<Button size="sm" onClick={save} disabled={isSaving}>
{isSaving
? t("button.saving", { ns: "common" })
: t("button.save", { ns: "common" })}
</Button>
)}
</div>
</div>
<div className="flex items-start gap-1.5 text-xs text-muted-foreground">
<div
className={cn(
"flex items-center gap-2 text-xs text-muted-foreground",
isMobile && "landscape:flex-col landscape:items-start",
)}
>
<Trans ns="views/explore">
trackingDetails.annotationSettings.offset.millisecondsToOffset
</Trans>
<Popover>
<PopoverTrigger asChild>
<button
className="mt-px shrink-0 focus:outline-none"
className="focus:outline-none"
aria-label={t("trackingDetails.annotationSettings.offset.tips")}
>
<LuInfo className="size-3.5" />
<LuInfo className="size-4" />
</button>
</PopoverTrigger>
<PopoverContent className="w-80 text-sm">
{t("trackingDetails.annotationSettings.offset.tips")}
<div className="mt-2 flex items-center text-primary-variant">
<Link
to={getLocaleDocUrl(
"troubleshooting/dummy-camera#annotation-offset",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</div>
<div className="flex items-center justify-end gap-2">
<Button size="sm" variant="ghost" onClick={reset}>
{t("button.reset", { ns: "common" })}
</Button>
{isAdmin && (
<Button size="sm" onClick={save} disabled={isSaving}>
{isSaving
? t("button.saving", { ns: "common" })
: t("button.save", { ns: "common" })}
</Button>
)}
</div>
</div>
);
}

View File

@ -1,23 +1,31 @@
import { Event } from "@/types/event";
import { FrigateConfig } from "@/types/frigateConfig";
import { zodResolver } from "@hookform/resolvers/zod";
import axios from "axios";
import { useCallback, useState } from "react";
import { LuExternalLink, LuMinus, LuPlus } from "react-icons/lu";
import { useForm } from "react-hook-form";
import { LuExternalLink } from "react-icons/lu";
import { Link } from "react-router-dom";
import { toast } from "sonner";
import useSWR from "swr";
import {
Form,
FormControl,
FormDescription,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { z } from "zod";
import { Button } from "@/components/ui/button";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { Input } from "@/components/ui/input";
import { Separator } from "@/components/ui/separator";
import { Slider } from "@/components/ui/slider";
import { Trans, useTranslation } from "react-i18next";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { useIsAdmin } from "@/hooks/use-is-admin";
const OFFSET_MIN = -2500;
const OFFSET_MAX = 2500;
const OFFSET_STEP = 50;
type AnnotationSettingsPaneProps = {
event: Event;
annotationOffset: number;
@ -37,69 +45,93 @@ export function AnnotationSettingsPane({
const [isLoading, setIsLoading] = useState(false);
const handleSliderChange = useCallback(
(values: number[]) => {
if (!values || values.length === 0) return;
setAnnotationOffset(values[0]);
const formSchema = z.object({
annotationOffset: z.coerce.number().optional().or(z.literal("")),
});
const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema),
mode: "onChange",
defaultValues: {
annotationOffset: annotationOffset,
},
[setAnnotationOffset],
);
});
const stepOffset = useCallback(
(delta: number) => {
setAnnotationOffset((prev) => {
const next = prev + delta;
return Math.max(OFFSET_MIN, Math.min(OFFSET_MAX, next));
});
},
[setAnnotationOffset],
);
const reset = useCallback(() => {
setAnnotationOffset(0);
}, [setAnnotationOffset]);
const saveToConfig = useCallback(async () => {
if (!config || !event) return;
setIsLoading(true);
try {
const res = await axios.put(
`config/set?cameras.${event.camera}.detect.annotation_offset=${annotationOffset}`,
{ requires_restart: 0 },
);
if (res.status === 200) {
toast.success(
t("trackingDetails.annotationSettings.offset.toast.success", {
camera: event.camera,
}),
{ position: "top-center" },
);
updateConfig();
} else {
toast.error(
t("toast.save.error.title", {
errorMessage: res.statusText,
ns: "common",
}),
{ position: "top-center" },
);
const saveToConfig = useCallback(
async (annotation_offset: number | string) => {
if (!config || !event) {
return;
}
} catch (error: unknown) {
const err = error as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
err?.response?.data?.message ||
err?.response?.data?.detail ||
"Unknown error";
toast.error(t("toast.save.error.title", { errorMessage, ns: "common" }), {
position: "top-center",
});
} finally {
setIsLoading(false);
axios
.put(
`config/set?cameras.${event?.camera}.detect.annotation_offset=${annotation_offset}`,
{
requires_restart: 0,
},
)
.then((res) => {
if (res.status === 200) {
toast.success(
t("trackingDetails.annotationSettings.offset.toast.success", {
camera: event?.camera,
}),
{
position: "top-center",
},
);
updateConfig();
} else {
toast.error(
t("toast.save.error.title", {
errorMessage: res.statusText,
ns: "common",
}),
{
position: "top-center",
},
);
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(
t("toast.save.error.title", { errorMessage, ns: "common" }),
{
position: "top-center",
},
);
})
.finally(() => {
setIsLoading(false);
});
},
[updateConfig, config, event, t],
);
function onSubmit(values: z.infer<typeof formSchema>) {
if (!values || values.annotationOffset == null || !config) {
return;
}
}, [annotationOffset, config, event, updateConfig, t]);
setIsLoading(true);
saveToConfig(values.annotationOffset);
}
function onApply(values: z.infer<typeof formSchema>) {
if (
!values ||
values.annotationOffset === null ||
values.annotationOffset === "" ||
!config
) {
return;
}
setAnnotationOffset(values.annotationOffset ?? 0);
}
return (
<div className="p-4">
@ -108,100 +140,91 @@ export function AnnotationSettingsPane({
</div>
<Separator className="mb-4 flex bg-secondary" />
<div className="flex flex-col gap-4">
<div className="flex flex-col gap-1">
<div className="text-sm font-medium">
{t("trackingDetails.annotationSettings.offset.label")}
</div>
<div className="text-sm text-muted-foreground">
<Trans ns="views/explore">
trackingDetails.annotationSettings.offset.millisecondsToOffset
</Trans>
</div>
</div>
<div className="flex items-center gap-3">
<Button
type="button"
variant="outline"
size="icon"
className="size-8 shrink-0"
aria-label="-50ms"
onClick={() => stepOffset(-OFFSET_STEP)}
disabled={annotationOffset <= OFFSET_MIN}
>
<LuMinus className="size-4" />
</Button>
<Slider
value={[annotationOffset]}
min={OFFSET_MIN}
max={OFFSET_MAX}
step={OFFSET_STEP}
onValueChange={handleSliderChange}
className="flex-1"
/>
<Button
type="button"
variant="outline"
size="icon"
className="size-8 shrink-0"
aria-label="+50ms"
onClick={() => stepOffset(OFFSET_STEP)}
disabled={annotationOffset >= OFFSET_MAX}
>
<LuPlus className="size-4" />
</Button>
</div>
<div className="flex items-center justify-between">
<span className="font-mono text-sm tabular-nums text-primary-variant">
{annotationOffset > 0 ? "+" : ""}
{annotationOffset}ms
</span>
<Button type="button" variant="ghost" size="sm" onClick={reset}>
{t("button.reset", { ns: "common" })}
</Button>
</div>
<div className="text-sm text-secondary-foreground">
{t("trackingDetails.annotationSettings.offset.tips")}
<div className="mt-2 flex items-center text-primary-variant">
<Link
to={getLocaleDocUrl(
"troubleshooting/dummy-camera#annotation-offset",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
{isAdmin && (
<>
<Separator className="bg-secondary" />
<Button
variant="select"
aria-label={t("button.save", { ns: "common" })}
disabled={isLoading}
onClick={saveToConfig}
>
{isLoading ? (
<div className="flex flex-row items-center gap-2">
<ActivityIndicator />
<span>{t("button.saving", { ns: "common" })}</span>
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="flex flex-1 flex-col space-y-3"
>
<FormField
control={form.control}
name="annotationOffset"
render={({ field }) => (
<>
<FormItem className="flex flex-row items-start justify-between space-x-2">
<div className="flex flex-col gap-1">
<FormLabel>
{t("trackingDetails.annotationSettings.offset.label")}
</FormLabel>
<FormDescription>
<Trans ns="views/explore">
trackingDetails.annotationSettings.offset.millisecondsToOffset
</Trans>
<FormMessage />
</FormDescription>
</div>
<div className="flex flex-col gap-3">
<div className="min-w-24">
<FormControl>
<Input
className="text-md w-full border border-input bg-background p-2 text-center hover:bg-accent hover:text-accent-foreground dark:[color-scheme:dark]"
placeholder="0"
{...field}
/>
</FormControl>
</div>
</div>
</FormItem>
<div className="mt-1 text-sm text-secondary-foreground">
{t("trackingDetails.annotationSettings.offset.tips")}
<div className="mt-2 flex items-center text-primary-variant">
<Link
to={getLocaleDocUrl("configuration/reference")}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</div>
) : (
t("button.save", { ns: "common" })
</>
)}
/>
<div className="flex flex-1 flex-col justify-end">
<div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
variant="default"
aria-label={t("button.apply", { ns: "common" })}
type="button"
onClick={form.handleSubmit(onApply)}
>
{t("button.apply", { ns: "common" })}
</Button>
{isAdmin && (
<Button
variant="select"
aria-label={t("button.save", { ns: "common" })}
disabled={isLoading}
className="flex flex-1"
type="submit"
>
{isLoading ? (
<div className="flex flex-row items-center gap-2">
<ActivityIndicator />
<span>{t("button.saving", { ns: "common" })}</span>
</div>
) : (
t("button.save", { ns: "common" })
)}
</Button>
)}
</Button>
</>
)}
</div>
</div>
</div>
</form>
</Form>
</div>
);
}

View File

@ -323,7 +323,6 @@ function DialogContentComponent({
<TrackingDetails
className={cn(isDesktop ? "size-full" : "flex flex-col gap-4")}
event={search as unknown as Event}
isAnnotationSettingsOpen={isPopoverOpen}
tabs={
isDesktop ? (
<TabsWithActions
@ -496,15 +495,6 @@ export default function SearchDetailDialog({
}
}, [search]);
useEffect(() => {
if (!isDesktop || !onPrevious || !onNext) {
setShowNavigationButtons(false);
return;
}
setShowNavigationButtons(isOpen);
}, [isOpen, onNext, onPrevious]);
// show/hide annotation settings is handled inside TabsWithActions
const searchTabs = useMemo(() => {

View File

@ -47,14 +47,12 @@ type TrackingDetailsProps = {
event: Event;
fullscreen?: boolean;
tabs?: React.ReactNode;
isAnnotationSettingsOpen?: boolean;
};
export function TrackingDetails({
className,
event,
tabs,
isAnnotationSettingsOpen = false,
}: TrackingDetailsProps) {
const videoRef = useRef<HTMLVideoElement | null>(null);
const { t } = useTranslation(["views/explore"]);
@ -71,14 +69,6 @@ export function TrackingDetails({
// user (eg, clicking a lifecycle row). When null we display `currentTime`.
const [manualOverride, setManualOverride] = useState<number | null>(null);
// Capture the annotation offset used for building the video source URL.
// This only updates when the event changes, NOT on every slider drag,
// so the HLS player doesn't reload while the user is adjusting the offset.
const sourceOffsetRef = useRef(annotationOffset);
useEffect(() => {
sourceOffsetRef.current = annotationOffset;
}, [event.id]); // eslint-disable-line react-hooks/exhaustive-deps
// event.start_time is detect time, convert to record, then subtract padding
const [currentTime, setCurrentTime] = useState(
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
@ -100,19 +90,14 @@ export function TrackingDetails({
const { data: config } = useSWR<FrigateConfig>("config");
// Fetch recording segments for the event's time range to handle motion-only gaps.
// Use the source offset (stable per event) so recordings don't refetch on every
// slider drag while adjusting annotation offset.
// Fetch recording segments for the event's time range to handle motion-only gaps
const eventStartRecord = useMemo(
() => (event.start_time ?? 0) + sourceOffsetRef.current / 1000,
// eslint-disable-next-line react-hooks/exhaustive-deps
[event.start_time, event.id],
() => (event.start_time ?? 0) + annotationOffset / 1000,
[event.start_time, annotationOffset],
);
const eventEndRecord = useMemo(
() =>
(event.end_time ?? Date.now() / 1000) + sourceOffsetRef.current / 1000,
// eslint-disable-next-line react-hooks/exhaustive-deps
[event.end_time, event.id],
() => (event.end_time ?? Date.now() / 1000) + annotationOffset / 1000,
[event.end_time, annotationOffset],
);
const { data: recordings } = useSWR<Recording[]>(
@ -313,53 +298,6 @@ export function TrackingDetails({
setSelectedObjectIds([event.id]);
}, [event.id, setSelectedObjectIds]);
// When the annotation settings popover is open, pin the video to a specific
// lifecycle event (detect-stream timestamp). As the user drags the offset
// slider, the video re-seeks to show the recording frame at
// pinnedTimestamp + newOffset, while the bounding box stays fixed at the
// pinned detect timestamp. This lets the user visually align the box to
// the car in the video.
const pinnedDetectTimestampRef = useRef<number | null>(null);
const wasAnnotationOpenRef = useRef(false);
// On popover open: pause, pin first lifecycle item, and seek.
useEffect(() => {
if (isAnnotationSettingsOpen && !wasAnnotationOpenRef.current) {
if (videoRef.current && displaySource === "video") {
videoRef.current.pause();
}
if (eventSequence && eventSequence.length > 0) {
pinnedDetectTimestampRef.current = eventSequence[0].timestamp;
}
}
if (!isAnnotationSettingsOpen) {
pinnedDetectTimestampRef.current = null;
}
wasAnnotationOpenRef.current = isAnnotationSettingsOpen;
}, [isAnnotationSettingsOpen, displaySource, eventSequence]);
// When the pinned timestamp or offset changes, re-seek the video and
// explicitly update currentTime so the overlay shows the pinned event's box.
useEffect(() => {
const pinned = pinnedDetectTimestampRef.current;
if (!isAnnotationSettingsOpen || pinned == null) return;
if (!videoRef.current || displaySource !== "video") return;
const targetTimeRecord = pinned + annotationOffset / 1000;
const relativeTime = timestampToVideoTime(targetTimeRecord);
videoRef.current.currentTime = relativeTime;
// Explicitly update currentTime state so the overlay's effectiveCurrentTime
// resolves back to the pinned detect timestamp:
// effectiveCurrentTime = targetTimeRecord - annotationOffset/1000 = pinned
setCurrentTime(targetTimeRecord);
}, [
isAnnotationSettingsOpen,
annotationOffset,
displaySource,
timestampToVideoTime,
]);
const handleLifecycleClick = useCallback(
(item: TrackingDetailsSequence) => {
if (!videoRef.current && !imgRef.current) return;
@ -515,23 +453,19 @@ export function TrackingDetails({
const videoSource = useMemo(() => {
// event.start_time and event.end_time are in DETECT stream time
// Convert to record stream time, then create video clip with padding.
// Use sourceOffsetRef (stable per event) so the HLS player doesn't
// reload while the user is dragging the annotation offset slider.
const sourceOffset = sourceOffsetRef.current;
const eventStartRec = event.start_time + sourceOffset / 1000;
const eventEndRec =
(event.end_time ?? Date.now() / 1000) + sourceOffset / 1000;
const startTime = eventStartRec - REVIEW_PADDING;
const endTime = eventEndRec + REVIEW_PADDING;
// Convert to record stream time, then create video clip with padding
const eventStartRecord = event.start_time + annotationOffset / 1000;
const eventEndRecord =
(event.end_time ?? Date.now() / 1000) + annotationOffset / 1000;
const startTime = eventStartRecord - REVIEW_PADDING;
const endTime = eventEndRecord + REVIEW_PADDING;
const playlist = `${baseUrl}vod/clip/${event.camera}/start/${startTime}/end/${endTime}/index.m3u8`;
return {
playlist,
startPosition: 0,
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [event]);
}, [event, annotationOffset]);
// Determine camera aspect ratio category
const cameraAspect = useMemo(() => {

View File

@ -1,215 +0,0 @@
import { useCallback, useState } from "react";
import { useTranslation } from "react-i18next";
import { Trans } from "react-i18next";
import axios from "axios";
import { toast } from "sonner";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import { Button } from "@/components/ui/button";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { Label } from "@/components/ui/label";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { Switch } from "@/components/ui/switch";
type DeleteCameraDialogProps = {
show: boolean;
cameras: string[];
onClose: () => void;
onDeleted: () => void;
};
export default function DeleteCameraDialog({
show,
cameras,
onClose,
onDeleted,
}: DeleteCameraDialogProps) {
const { t } = useTranslation(["views/settings", "common"]);
const [phase, setPhase] = useState<"select" | "confirm">("select");
const [selectedCamera, setSelectedCamera] = useState<string>("");
const [deleteExports, setDeleteExports] = useState(false);
const [isDeleting, setIsDeleting] = useState(false);
const handleClose = useCallback(() => {
if (isDeleting) return;
setPhase("select");
setSelectedCamera("");
setDeleteExports(false);
onClose();
}, [isDeleting, onClose]);
const handleDelete = useCallback(() => {
setPhase("confirm");
}, []);
const handleBack = useCallback(() => {
setPhase("select");
}, []);
const handleConfirmDelete = useCallback(async () => {
if (!selectedCamera || isDeleting) return;
setIsDeleting(true);
try {
await axios.delete(
`cameras/${selectedCamera}?delete_exports=${deleteExports}`,
);
toast.success(
t("cameraManagement.deleteCameraDialog.success", {
cameraName: selectedCamera,
}),
{ position: "top-center" },
);
setPhase("select");
setSelectedCamera("");
setDeleteExports(false);
onDeleted();
} catch (error) {
const errorMessage =
axios.isAxiosError(error) &&
(error.response?.data?.message || error.response?.data?.detail)
? error.response?.data?.message || error.response?.data?.detail
: t("cameraManagement.deleteCameraDialog.error", {
cameraName: selectedCamera,
});
toast.error(errorMessage, { position: "top-center" });
} finally {
setIsDeleting(false);
}
}, [selectedCamera, deleteExports, isDeleting, onDeleted, t]);
return (
<Dialog open={show} onOpenChange={handleClose}>
<DialogContent className="sm:max-w-[425px]">
{phase === "select" ? (
<>
<DialogHeader>
<DialogTitle>
{t("cameraManagement.deleteCameraDialog.title")}
</DialogTitle>
<DialogDescription>
{t("cameraManagement.deleteCameraDialog.description")}
</DialogDescription>
</DialogHeader>
<Select value={selectedCamera} onValueChange={setSelectedCamera}>
<SelectTrigger>
<SelectValue
placeholder={t(
"cameraManagement.deleteCameraDialog.selectPlaceholder",
)}
/>
</SelectTrigger>
<SelectContent>
{cameras.map((camera) => (
<SelectItem key={camera} value={camera}>
{camera}
</SelectItem>
))}
</SelectContent>
</Select>
<DialogFooter className="flex gap-3 sm:justify-end">
<div className="flex flex-1 flex-col justify-end">
<div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
aria-label={t("button.cancel", { ns: "common" })}
onClick={handleClose}
type="button"
>
{t("button.cancel", { ns: "common" })}
</Button>
<Button
variant="destructive"
aria-label={t("button.delete", { ns: "common" })}
className="flex flex-1 text-white"
onClick={handleDelete}
disabled={!selectedCamera}
>
{t("button.delete", { ns: "common" })}
</Button>
</div>
</div>
</DialogFooter>
</>
) : (
<>
<DialogHeader>
<DialogTitle>
{t("cameraManagement.deleteCameraDialog.confirmTitle")}
</DialogTitle>
<DialogDescription>
<Trans
ns="views/settings"
values={{ cameraName: selectedCamera }}
components={{ strong: <span className="font-medium" /> }}
>
cameraManagement.deleteCameraDialog.confirmWarning
</Trans>
</DialogDescription>
</DialogHeader>
<div className="flex items-center space-x-2">
<Switch
id="delete-exports"
checked={deleteExports}
onCheckedChange={(checked) =>
setDeleteExports(checked === true)
}
/>
<Label htmlFor="delete-exports" className="cursor-pointer">
{t("cameraManagement.deleteCameraDialog.deleteExports")}
</Label>
</div>
<DialogFooter className="flex gap-3 sm:justify-end">
<div className="flex flex-1 flex-col justify-end">
<div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
aria-label={t("button.back", { ns: "common" })}
onClick={handleBack}
type="button"
disabled={isDeleting}
>
{t("button.back", { ns: "common" })}
</Button>
<Button
variant="destructive"
className="flex flex-1 text-white"
onClick={handleConfirmDelete}
disabled={isDeleting}
>
{isDeleting ? (
<div className="flex flex-row items-center gap-2">
<ActivityIndicator />
<span>
{t(
"cameraManagement.deleteCameraDialog.confirmButton",
)}
</span>
</div>
) : (
t("cameraManagement.deleteCameraDialog.confirmButton")
)}
</Button>
</div>
</div>
</DialogFooter>
</>
)}
</DialogContent>
</Dialog>
);
}

View File

@ -1,6 +1,5 @@
import {
MutableRefObject,
ReactNode,
useCallback,
useEffect,
useRef,
@ -58,7 +57,6 @@ type HlsVideoPlayerProps = {
isDetailMode?: boolean;
camera?: string;
currentTimeOverride?: number;
transformedOverlay?: ReactNode;
};
export default function HlsVideoPlayer({
@ -83,7 +81,6 @@ export default function HlsVideoPlayer({
isDetailMode = false,
camera,
currentTimeOverride,
transformedOverlay,
}: HlsVideoPlayerProps) {
const { t } = useTranslation("components/player");
const { data: config } = useSWR<FrigateConfig>("config");
@ -353,162 +350,157 @@ export default function HlsVideoPlayer({
height: isMobile ? "100%" : undefined,
}}
>
<div className="relative size-full">
{transformedOverlay}
{isDetailMode &&
camera &&
currentTime &&
loadedMetadata &&
videoDimensions.width > 0 &&
videoDimensions.height > 0 && (
<div
className={cn(
"absolute inset-0 z-50",
isDesktop
? "size-full"
: "mx-auto flex items-center justify-center portrait:max-h-[50dvh]",
)}
style={{
aspectRatio: `${videoDimensions.width} / ${videoDimensions.height}`,
}}
>
<ObjectTrackOverlay
key={`overlay-${currentTime}`}
camera={camera}
showBoundingBoxes={!isPlaying}
currentTime={currentTime}
videoWidth={videoDimensions.width}
videoHeight={videoDimensions.height}
className="absolute inset-0 z-10"
onSeekToTime={(timestamp, play) => {
if (onSeekToTime) {
onSeekToTime(timestamp, play);
}
}}
/>
</div>
)}
<video
ref={videoRef}
className={`size-full rounded-lg bg-black md:rounded-2xl ${loadedMetadata ? "" : "invisible"} cursor-pointer`}
preload="auto"
autoPlay
controls={!frigateControls}
playsInline
muted={muted}
onClick={
isDesktop
? () => {
if (zoomScale == 1.0) onPlayPause(!isPlaying);
{isDetailMode &&
camera &&
currentTime &&
loadedMetadata &&
videoDimensions.width > 0 &&
videoDimensions.height > 0 && (
<div
className={cn(
"absolute inset-0 z-50",
isDesktop
? "size-full"
: "mx-auto flex items-center justify-center portrait:max-h-[50dvh]",
)}
style={{
aspectRatio: `${videoDimensions.width} / ${videoDimensions.height}`,
}}
>
<ObjectTrackOverlay
key={`overlay-${currentTime}`}
camera={camera}
showBoundingBoxes={!isPlaying}
currentTime={currentTime}
videoWidth={videoDimensions.width}
videoHeight={videoDimensions.height}
className="absolute inset-0 z-10"
onSeekToTime={(timestamp, play) => {
if (onSeekToTime) {
onSeekToTime(timestamp, play);
}
: undefined
}}
/>
</div>
)}
<video
ref={videoRef}
className={`size-full rounded-lg bg-black md:rounded-2xl ${loadedMetadata ? "" : "invisible"} cursor-pointer`}
preload="auto"
autoPlay
controls={!frigateControls}
playsInline
muted={muted}
onClick={
isDesktop
? () => {
if (zoomScale == 1.0) onPlayPause(!isPlaying);
}
: undefined
}
onVolumeChange={() => {
setVolume(videoRef.current?.volume ?? 1.0, true);
if (!frigateControls) {
setMuted(videoRef.current?.muted);
}
onVolumeChange={() => {
setVolume(videoRef.current?.volume ?? 1.0, true);
if (!frigateControls) {
setMuted(videoRef.current?.muted);
}
}}
onPlay={() => {
setIsPlaying(true);
}}
onPlay={() => {
setIsPlaying(true);
if (isMobile) {
setControls(true);
setMobileCtrlTimeout(
setTimeout(() => setControls(false), 4000),
);
}
}}
onPlaying={onPlaying}
onPause={() => {
setIsPlaying(false);
clearTimeout(bufferTimeout);
if (isMobile) {
setControls(true);
setMobileCtrlTimeout(setTimeout(() => setControls(false), 4000));
}
}}
onPlaying={onPlaying}
onPause={() => {
setIsPlaying(false);
clearTimeout(bufferTimeout);
if (isMobile && mobileCtrlTimeout) {
clearTimeout(mobileCtrlTimeout);
}
}}
onWaiting={() => {
if (onError != undefined) {
if (videoRef.current?.paused) {
return;
}
setBufferTimeout(
setTimeout(() => {
if (
document.visibilityState === "visible" &&
videoRef.current
) {
onError("stalled");
}
}, 3000),
);
}
}}
onProgress={() => {
if (onError != undefined) {
if (videoRef.current?.paused) {
return;
}
if (bufferTimeout) {
clearTimeout(bufferTimeout);
setBufferTimeout(undefined);
}
}
}}
onTimeUpdate={() => {
if (!onTimeUpdate) {
if (isMobile && mobileCtrlTimeout) {
clearTimeout(mobileCtrlTimeout);
}
}}
onWaiting={() => {
if (onError != undefined) {
if (videoRef.current?.paused) {
return;
}
const frameTime = getVideoTime();
if (frameTime) {
onTimeUpdate(frameTime);
setBufferTimeout(
setTimeout(() => {
if (
document.visibilityState === "visible" &&
videoRef.current
) {
onError("stalled");
}
}, 3000),
);
}
}}
onProgress={() => {
if (onError != undefined) {
if (videoRef.current?.paused) {
return;
}
}}
onLoadedData={() => {
onPlayerLoaded?.();
handleLoadedMetadata();
if (videoRef.current) {
if (playbackRate) {
videoRef.current.playbackRate = playbackRate;
}
if (bufferTimeout) {
clearTimeout(bufferTimeout);
setBufferTimeout(undefined);
}
}
}}
onTimeUpdate={() => {
if (!onTimeUpdate) {
return;
}
if (volume) {
videoRef.current.volume = volume;
}
const frameTime = getVideoTime();
if (frameTime) {
onTimeUpdate(frameTime);
}
}}
onLoadedData={() => {
onPlayerLoaded?.();
handleLoadedMetadata();
if (videoRef.current) {
if (playbackRate) {
videoRef.current.playbackRate = playbackRate;
}
}}
onEnded={() => {
if (onClipEnded) {
onClipEnded(getVideoTime() ?? 0);
if (volume) {
videoRef.current.volume = volume;
}
}}
onError={(e) => {
if (
!hlsRef.current &&
}
}}
onEnded={() => {
if (onClipEnded) {
onClipEnded(getVideoTime() ?? 0);
}
}}
onError={(e) => {
if (
!hlsRef.current &&
// @ts-expect-error code does exist
unsupportedErrorCodes.includes(e.target.error.code) &&
videoRef.current
) {
setLoadedMetadata(false);
setUseHlsCompat(true);
} else {
toast.error(
// @ts-expect-error code does exist
unsupportedErrorCodes.includes(e.target.error.code) &&
videoRef.current
) {
setLoadedMetadata(false);
setUseHlsCompat(true);
} else {
toast.error(
// @ts-expect-error code does exist
`Failed to play recordings (error ${e.target.error.code}): ${e.target.error.message}`,
{
position: "top-center",
},
);
}
}}
/>
</div>
`Failed to play recordings (error ${e.target.error.code}): ${e.target.error.message}`,
{
position: "top-center",
},
);
}
}}
/>
</TransformComponent>
</TransformWrapper>
);

View File

@ -1,11 +1,4 @@
import {
ReactNode,
useCallback,
useEffect,
useMemo,
useRef,
useState,
} from "react";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useApiHost } from "@/api";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
@ -47,7 +40,6 @@ type DynamicVideoPlayerProps = {
setFullResolution: React.Dispatch<React.SetStateAction<VideoResolutionType>>;
toggleFullscreen: () => void;
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
transformedOverlay?: ReactNode;
};
export default function DynamicVideoPlayer({
className,
@ -66,7 +58,6 @@ export default function DynamicVideoPlayer({
setFullResolution,
toggleFullscreen,
containerRef,
transformedOverlay,
}: DynamicVideoPlayerProps) {
const { t } = useTranslation(["components/player"]);
const apiHost = useApiHost();
@ -321,7 +312,6 @@ export default function DynamicVideoPlayer({
isDetailMode={isDetailMode}
camera={contextCamera || camera}
currentTimeOverride={currentTime}
transformedOverlay={transformedOverlay}
/>
)}
<PreviewPlayer

View File

@ -1,4 +1,4 @@
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useEffect, useMemo, useRef, useState } from "react";
import { TrackingDetailsSequence } from "@/types/timeline";
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
import { useDetailStream } from "@/context/detail-stream-context";
@ -33,7 +33,6 @@ import { MdAutoAwesome } from "react-icons/md";
import { isPWA } from "@/utils/isPWA";
import { isInIframe } from "@/utils/isIFrame";
import { GenAISummaryDialog } from "../overlay/chip/GenAISummaryChip";
import { Separator } from "../ui/separator";
type DetailStreamProps = {
reviewItems?: ReviewSegment[];
@ -50,8 +49,7 @@ export default function DetailStream({
}: DetailStreamProps) {
const { data: config } = useSWR<FrigateConfig>("config");
const { t } = useTranslation("views/events");
const { annotationOffset, selectedObjectIds, setSelectedObjectIds } =
useDetailStream();
const { annotationOffset } = useDetailStream();
const scrollRef = useRef<HTMLDivElement>(null);
const [activeReviewId, setActiveReviewId] = useState<string | undefined>(
@ -69,69 +67,9 @@ export default function DetailStream({
true,
);
// When the settings panel opens, pin to the nearest review with detections
// so the user can visually align the bounding box using the offset slider
const pinnedDetectTimestampRef = useRef<number | null>(null);
const wasControlsExpandedRef = useRef(false);
const selectedBeforeExpandRef = useRef<string[]>([]);
const onSeekCheckPlaying = useCallback(
(timestamp: number) => {
onSeek(timestamp, isPlaying);
},
[onSeek, isPlaying],
);
useEffect(() => {
if (controlsExpanded && !wasControlsExpandedRef.current) {
selectedBeforeExpandRef.current = selectedObjectIds;
const items = (reviewItems ?? []).filter(
(r) => r.data?.detections?.length > 0,
);
if (items.length > 0) {
// Pick the nearest review to current effective time
let nearest = items[0];
let minDiff = Math.abs(effectiveTime - nearest.start_time);
for (const r of items) {
const diff = Math.abs(effectiveTime - r.start_time);
if (diff < minDiff) {
nearest = r;
minDiff = diff;
}
}
const nearestId = `review-${nearest.id ?? nearest.start_time ?? Math.floor(nearest.start_time ?? 0)}`;
setActiveReviewId(nearestId);
const detectionId = nearest.data.detections[0];
setSelectedObjectIds([detectionId]);
// Use the detection's actual start timestamp (parsed from its ID)
// rather than review.start_time, which can be >10ms away from any
// lifecycle event and would fail the bounding-box TOLERANCE check.
const detectTimestamp = parseFloat(detectionId);
pinnedDetectTimestampRef.current = detectTimestamp;
const recordTime = detectTimestamp + annotationOffset / 1000;
onSeek(recordTime, false);
}
}
if (!controlsExpanded && wasControlsExpandedRef.current) {
pinnedDetectTimestampRef.current = null;
setSelectedObjectIds(selectedBeforeExpandRef.current);
}
wasControlsExpandedRef.current = controlsExpanded;
// Only trigger on expand/collapse transition
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [controlsExpanded]);
// Re-seek on annotation offset change while settings panel is open
useEffect(() => {
const pinned = pinnedDetectTimestampRef.current;
if (!controlsExpanded || pinned == null) return;
const recordTime = pinned + annotationOffset / 1000;
onSeek(recordTime, false);
}, [controlsExpanded, annotationOffset, onSeek]);
const onSeekCheckPlaying = (timestamp: number) => {
onSeek(timestamp, isPlaying);
};
// Ensure we initialize the active review when reviewItems first arrive.
// This helps when the component mounts while the video is already
@ -276,12 +214,6 @@ export default function DetailStream({
/>
<div className="relative flex h-full flex-col">
{controlsExpanded && (
<div
className="absolute inset-0 z-20 cursor-pointer bg-black/50"
onClick={() => setControlsExpanded(false)}
/>
)}
<div
ref={scrollRef}
className="scrollbar-container flex-1 overflow-y-auto overflow-x-hidden pb-14"
@ -335,9 +267,8 @@ export default function DetailStream({
)}
</button>
{controlsExpanded && (
<div className="space-y-4 px-3 pb-5 pt-2">
<div className="space-y-3 px-3 pb-3">
<AnnotationOffsetSlider />
<Separator />
<div className="flex flex-col gap-1">
<div className="flex items-center justify-between">
<label className="text-sm font-medium">

View File

@ -25,7 +25,6 @@ export type MotionReviewTimelineProps = {
timestampSpread: number;
timelineStart: number;
timelineEnd: number;
scrollToTime?: number;
showHandlebar?: boolean;
handlebarTime?: number;
setHandlebarTime?: React.Dispatch<React.SetStateAction<number>>;
@ -59,7 +58,6 @@ export function MotionReviewTimeline({
timestampSpread,
timelineStart,
timelineEnd,
scrollToTime,
showHandlebar = false,
handlebarTime,
setHandlebarTime,
@ -178,15 +176,6 @@ export function MotionReviewTimeline({
[],
);
// allow callers to request the timeline center on a specific time
useEffect(() => {
if (scrollToTime == undefined) return;
setTimeout(() => {
scrollToSegment(alignStartDateToTimeline(scrollToTime), true, "auto");
}, 0);
}, [scrollToTime, scrollToSegment, alignStartDateToTimeline]);
// keep handlebar centered when zooming
useEffect(() => {
setTimeout(() => {

View File

@ -343,12 +343,9 @@ export function ReviewTimeline({
useEffect(() => {
if (onHandlebarDraggingChange) {
// Keep existing callback name but treat it as a generic dragging signal.
// This allows consumers (e.g. export-handle timelines) to correctly
// enable preview scrubbing while dragging export handles.
onHandlebarDraggingChange(isDragging);
onHandlebarDraggingChange(isDraggingHandlebar);
}
}, [isDragging, onHandlebarDraggingChange]);
}, [isDraggingHandlebar, onHandlebarDraggingChange]);
const isHandlebarInNoRecordingPeriod = useMemo(() => {
if (!getRecordingAvailability || handlebarTime === undefined) return false;

View File

@ -1,26 +0,0 @@
import * as React from "react"
import * as ProgressPrimitive from "@radix-ui/react-progress"
import { cn } from "@/lib/utils"
const Progress = React.forwardRef<
React.ElementRef<typeof ProgressPrimitive.Root>,
React.ComponentPropsWithoutRef<typeof ProgressPrimitive.Root>
>(({ className, value, ...props }, ref) => (
<ProgressPrimitive.Root
ref={ref}
className={cn(
"relative h-4 w-full overflow-hidden rounded-full bg-secondary",
className
)}
{...props}
>
<ProgressPrimitive.Indicator
className="h-full w-full flex-1 bg-primary transition-all"
style={{ transform: `translateX(-${100 - (value || 0)}%)` }}
/>
</ProgressPrimitive.Root>
))
Progress.displayName = ProgressPrimitive.Root.displayName
export { Progress }

View File

@ -8,19 +8,14 @@ import {
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
import { MotionData, ReviewSegment } from "@/types/review";
import { useCallback, useEffect, useMemo, useState } from "react";
import { AudioDetection, ObjectType } from "@/types/ws";
import { useTimelineUtils } from "./use-timeline-utils";
import { AudioDetection, ObjectType } from "@/types/ws";
import useDeepMemo from "./use-deep-memo";
import { isEqual } from "lodash";
import { useAutoFrigateStats } from "./use-stats";
import useSWR from "swr";
import { getAttributeLabels } from "@/utils/iconUtil";
export type MotionOnlyRange = {
start_time: number;
end_time: number;
};
type useCameraActivityReturn = {
enabled?: boolean;
activeTracking: boolean;
@ -209,9 +204,9 @@ export function useCameraMotionNextTimestamp(
return [];
}
const ranges: [number, number][] = [];
let currentSegmentStart: number | null = null;
let currentSegmentEnd: number | null = null;
const ranges = [];
let currentSegmentStart = null;
let currentSegmentEnd = null;
// align motion start to timeline start
const offset =
@ -220,19 +215,13 @@ export function useCameraMotionNextTimestamp(
segmentDuration;
const startIndex = Math.abs(Math.floor(offset / 15));
const now = Date.now() / 1000;
for (
let i = startIndex;
i < motionData.length;
i = i + segmentDuration / 15
) {
const motionStart = motionData[i]?.start_time;
if (motionStart == undefined) {
continue;
}
const motionStart = motionData[i].start_time;
const motionEnd = motionStart + segmentDuration;
const segmentMotion = motionData
@ -241,10 +230,10 @@ export function useCameraMotionNextTimestamp(
const overlappingReviewItems = reviewItems.some(
(item) =>
(item.start_time >= motionStart && item.start_time < motionEnd) ||
((item.end_time ?? now) > motionStart &&
(item.end_time ?? now) <= motionEnd) ||
((item.end_time ?? Date.now() / 1000) > motionStart &&
(item.end_time ?? Date.now() / 1000) <= motionEnd) ||
(item.start_time <= motionStart &&
(item.end_time ?? now) >= motionEnd),
(item.end_time ?? Date.now() / 1000) >= motionEnd),
);
if (!segmentMotion || overlappingReviewItems) {
@ -252,14 +241,16 @@ export function useCameraMotionNextTimestamp(
currentSegmentStart = motionStart;
}
currentSegmentEnd = motionEnd;
} else if (currentSegmentStart !== null && currentSegmentEnd !== null) {
ranges.push([currentSegmentStart, currentSegmentEnd]);
currentSegmentStart = null;
currentSegmentEnd = null;
} else {
if (currentSegmentStart !== null) {
ranges.push([currentSegmentStart, currentSegmentEnd]);
currentSegmentStart = null;
currentSegmentEnd = null;
}
}
}
if (currentSegmentStart !== null && currentSegmentEnd !== null) {
if (currentSegmentStart !== null) {
ranges.push([currentSegmentStart, currentSegmentEnd]);
}
@ -313,93 +304,3 @@ export function useCameraMotionNextTimestamp(
return nextTimestamp;
}
export function useCameraMotionOnlyRanges(
segmentDuration: number,
reviewItems: ReviewSegment[],
motionData: MotionData[],
) {
const motionOnlyRanges = useMemo(() => {
if (!motionData?.length || !reviewItems) {
return [];
}
const fallbackBucketDuration = Math.max(1, segmentDuration / 2);
const normalizedMotionData = Array.from(
motionData
.reduce((accumulator, item) => {
const currentMotion = accumulator.get(item.start_time) ?? 0;
accumulator.set(
item.start_time,
Math.max(currentMotion, item.motion ?? 0),
);
return accumulator;
}, new Map<number, number>())
.entries(),
)
.map(([start_time, motion]) => ({ start_time, motion }))
.sort((left, right) => left.start_time - right.start_time);
const bucketRanges: MotionOnlyRange[] = [];
const now = Date.now() / 1000;
for (let i = 0; i < normalizedMotionData.length; i++) {
const motionStart = normalizedMotionData[i].start_time;
const motionEnd = motionStart + fallbackBucketDuration;
const overlappingReviewItems = reviewItems.some(
(item) =>
(item.start_time >= motionStart && item.start_time < motionEnd) ||
((item.end_time ?? now) > motionStart &&
(item.end_time ?? now) <= motionEnd) ||
(item.start_time <= motionStart &&
(item.end_time ?? now) >= motionEnd),
);
const isMotionOnlySegment =
(normalizedMotionData[i].motion ?? 0) > 0 && !overlappingReviewItems;
if (!isMotionOnlySegment) {
continue;
}
bucketRanges.push({
start_time: motionStart,
end_time: motionEnd,
});
}
if (!bucketRanges.length) {
return [];
}
const mergedRanges = bucketRanges.reduce<MotionOnlyRange[]>(
(ranges, range) => {
if (!ranges.length) {
return [range];
}
const previousRange = ranges[ranges.length - 1];
const isContiguous =
range.start_time <= previousRange.end_time + 0.001 &&
range.start_time >= previousRange.end_time - 0.001;
if (isContiguous) {
previousRange.end_time = Math.max(
previousRange.end_time,
range.end_time,
);
return ranges;
}
ranges.push(range);
return ranges;
},
[],
);
return mergedRanges;
}, [motionData, reviewItems, segmentDuration]);
return motionOnlyRanges;
}

View File

@ -1,4 +1,4 @@
import { useCallback, useContext, useEffect, useMemo, useRef } from "react";
import { useCallback, useContext, useEffect, useMemo } from "react";
import { useLocation, useNavigate, useSearchParams } from "react-router-dom";
import { usePersistence } from "./use-persistence";
import { useUserPersistence } from "./use-user-persistence";
@ -12,28 +12,20 @@ export function useOverlayState<S>(
const location = useLocation();
const navigate = useNavigate();
const locationRef = useRef(location);
locationRef.current = location;
const currentLocationState = useMemo(() => location.state, [location]);
const setOverlayStateValue = useCallback(
(value: S, replace: boolean = false) => {
const loc = locationRef.current;
const currentValue = loc.state?.[key] as S | undefined;
if (Object.is(currentValue, value)) {
return;
}
const newLocationState = { ...loc.state };
const newLocationState = { ...currentLocationState };
newLocationState[key] = value;
navigate(loc.pathname + (preserveSearch ? loc.search : ""), {
navigate(location.pathname + (preserveSearch ? location.search : ""), {
state: newLocationState,
replace,
});
},
// locationRef is stable so we don't need it in deps
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
[key, navigate, preserveSearch],
[key, currentLocationState, navigate],
);
const overlayStateValue = useMemo<S | undefined>(
@ -55,9 +47,7 @@ export function usePersistedOverlayState<S extends string>(
] {
const location = useLocation();
const navigate = useNavigate();
const locationRef = useRef(location);
locationRef.current = location;
const currentLocationState = useMemo(() => location.state, [location]);
// currently selected value
@ -73,21 +63,14 @@ export function usePersistedOverlayState<S extends string>(
const setOverlayStateValue = useCallback(
(value: S | undefined, replace: boolean = false) => {
const loc = locationRef.current;
const currentValue = loc.state?.[key] as S | undefined;
if (Object.is(currentValue, value)) {
return;
}
setPersistedValue(value);
const newLocationState = { ...loc.state };
const newLocationState = { ...currentLocationState };
newLocationState[key] = value;
navigate(loc.pathname, { state: newLocationState, replace });
navigate(location.pathname, { state: newLocationState, replace });
},
// locationRef is stable so we don't need it in deps
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
[key, navigate, setPersistedValue],
[key, currentLocationState, navigate],
);
return [
@ -115,9 +98,7 @@ export function useUserPersistedOverlayState<S extends string>(
const { auth } = useContext(AuthContext);
const location = useLocation();
const navigate = useNavigate();
const locationRef = useRef(location);
locationRef.current = location;
const currentLocationState = useMemo(() => location.state, [location]);
// currently selected value from URL state
const overlayStateValue = useMemo<S | undefined>(
@ -131,21 +112,14 @@ export function useUserPersistedOverlayState<S extends string>(
const setOverlayStateValue = useCallback(
(value: S | undefined, replace: boolean = false) => {
const loc = locationRef.current;
const currentValue = loc.state?.[key] as S | undefined;
if (Object.is(currentValue, value)) {
return;
}
setPersistedValue(value);
const newLocationState = { ...loc.state };
const newLocationState = { ...currentLocationState };
newLocationState[key] = value;
navigate(loc.pathname, { state: newLocationState, replace });
navigate(location.pathname, { state: newLocationState, replace });
},
// locationRef is stable so we don't need it in deps
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
[key, navigate, setPersistedValue],
[key, currentLocationState, navigate, setPersistedValue],
);
// Don't return a value until auth has finished loading
@ -168,21 +142,17 @@ export function useHashState<S extends string>(): [
const location = useLocation();
const navigate = useNavigate();
const locationRef = useRef(location);
locationRef.current = location;
const setHash = useCallback(
(value: S | undefined) => {
const loc = locationRef.current;
if (!value) {
navigate(loc.pathname);
navigate(location.pathname);
} else {
navigate(`${loc.pathname}#${value}`, { state: loc.state });
navigate(`${location.pathname}#${value}`, { state: location.state });
}
},
// locationRef is stable so we don't need it in deps
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
[navigate],
[location, navigate],
);
const hash = useMemo(

View File

@ -116,11 +116,6 @@ export function useUserPersistence<S>(
return;
}
// Skip reload if we're already loaded for this key
if (loadedKeyRef.current === namespacedKey) {
return;
}
// Reset state when key changes - this prevents stale writes
loadedKeyRef.current = null;
migrationAttemptedRef.current = false;

View File

@ -1,6 +1,3 @@
/** ONNX embedding models that require local model downloads. GenAI providers are not in this list. */
export const JINA_EMBEDDING_MODELS = ["jinav1", "jinav2"] as const;
export const supportedLanguageKeys = [
"en",
"es",

View File

@ -1,6 +1,5 @@
import ActivityIndicator from "@/components/indicators/activity-indicator";
import useApiFilter from "@/hooks/use-api-filter";
import { useAllowedCameras } from "@/hooks/use-allowed-cameras";
import { useCameraPreviews } from "@/hooks/use-camera-previews";
import { useTimezone } from "@/hooks/use-date-utils";
import { useOverlayState, useSearchEffect } from "@/hooks/use-overlay-state";
@ -22,7 +21,6 @@ import {
getEndOfDayTimestamp,
} from "@/utils/dateUtil";
import EventView from "@/views/events/EventView";
import MotionSearchView from "@/views/motion-search/MotionSearchView";
import { RecordingView } from "@/views/recording/RecordingView";
import axios from "axios";
import { useCallback, useEffect, useMemo, useState } from "react";
@ -36,7 +34,6 @@ export default function Events() {
revalidateOnFocus: false,
});
const timezone = useTimezone(config);
const allowedCameras = useAllowedCameras();
// recordings viewer
@ -55,74 +52,6 @@ export default function Events() {
undefined,
false,
);
const [motionPreviewsCamera, setMotionPreviewsCamera] = useOverlayState<
string | undefined
>("motionPreviewsCamera", undefined);
const [motionSearchCamera, setMotionSearchCamera] = useState<string | null>(
null,
);
const [motionSearchDay, setMotionSearchDay] = useState<Date | undefined>(
undefined,
);
const motionSearchCameras = useMemo(() => {
if (!config?.cameras) {
return [] as string[];
}
return Object.keys(config.cameras).filter((cam) =>
allowedCameras.includes(cam),
);
}, [allowedCameras, config?.cameras]);
const selectedMotionSearchCamera = useMemo(() => {
if (!motionSearchCamera) {
return null;
}
if (motionSearchCameras.includes(motionSearchCamera)) {
return motionSearchCamera;
}
return motionSearchCameras[0] ?? null;
}, [motionSearchCamera, motionSearchCameras]);
const motionSearchTimeRange = useMemo(() => {
if (motionSearchDay) {
return {
after: getBeginningOfDayTimestamp(new Date(motionSearchDay)),
before: getEndOfDayTimestamp(new Date(motionSearchDay)),
};
}
const now = Date.now() / 1000;
return {
after: now - 86400,
before: now,
};
}, [motionSearchDay]);
const closeMotionSearch = useCallback(() => {
setMotionSearchCamera(null);
setMotionSearchDay(undefined);
setBeforeTs(Date.now() / 1000);
}, []);
const handleMotionSearchCameraSelect = useCallback((camera: string) => {
setMotionSearchCamera(camera);
}, []);
const handleMotionSearchDaySelect = useCallback((day: Date | undefined) => {
if (day == undefined) {
setMotionSearchDay(undefined);
return;
}
const normalizedDay = new Date(day);
normalizedDay.setHours(0, 0, 0, 0);
setMotionSearchDay(normalizedDay);
}, []);
const [notificationTab, setNotificationTab] =
useState<TimelineType>("timeline");
@ -579,24 +508,7 @@ export default function Events() {
);
}
} else {
return motionSearchCamera ? (
!config || !selectedMotionSearchCamera ? (
<ActivityIndicator />
) : (
<MotionSearchView
config={config}
cameras={motionSearchCameras}
selectedCamera={selectedMotionSearchCamera}
onCameraSelect={handleMotionSearchCameraSelect}
cameraLocked={true}
selectedDay={motionSearchDay}
onDaySelect={handleMotionSearchDaySelect}
timeRange={motionSearchTimeRange}
timezone={timezone}
onBack={closeMotionSearch}
/>
)
) : (
return (
<EventView
reviewItems={reviewItems}
currentReviewItems={currentItems}
@ -613,11 +525,6 @@ export default function Events() {
markItemAsReviewed={markItemAsReviewed}
markAllItemsAsReviewed={markAllItemsAsReviewed}
onOpenRecording={setRecording}
motionPreviewsCamera={motionPreviewsCamera ?? null}
setMotionPreviewsCamera={(camera) =>
setMotionPreviewsCamera(camera ?? undefined)
}
setMotionSearchCamera={setMotionSearchCamera}
pullLatestData={reloadData}
updateFilter={onUpdateFilter}
/>

View File

@ -23,7 +23,6 @@ import { toast } from "sonner";
import useSWR from "swr";
import useSWRInfinite from "swr/infinite";
import { useDocDomain } from "@/hooks/use-doc-domain";
import { JINA_EMBEDDING_MODELS } from "@/lib/const";
const API_LIMIT = 25;
@ -294,12 +293,7 @@ export default function Explore() {
const modelVersion = config?.semantic_search.model || "jinav1";
const modelSize = config?.semantic_search.model_size || "small";
// GenAI providers have no local models to download
const isGenaiEmbeddings =
typeof modelVersion === "string" &&
!(JINA_EMBEDDING_MODELS as readonly string[]).includes(modelVersion);
// Text model state (skipped for GenAI - no local models)
// Text model state
const { payload: textModelState } = useModelState(
modelVersion === "jinav1"
? "jinaai/jina-clip-v1-text_model_fp16.onnx"
@ -334,10 +328,6 @@ export default function Explore() {
);
const allModelsLoaded = useMemo(() => {
if (isGenaiEmbeddings) {
return true;
}
return (
textModelState === "downloaded" &&
textTokenizerState === "downloaded" &&
@ -345,7 +335,6 @@ export default function Explore() {
visionFeatureExtractorState === "downloaded"
);
}, [
isGenaiEmbeddings,
textModelState,
textTokenizerState,
visionModelState,
@ -369,11 +358,10 @@ export default function Explore() {
!defaultViewLoaded ||
(config?.semantic_search.enabled &&
(!reindexState ||
(!isGenaiEmbeddings &&
(!textModelState ||
!textTokenizerState ||
!visionModelState ||
!visionFeatureExtractorState))))
!textModelState ||
!textTokenizerState ||
!visionModelState ||
!visionFeatureExtractorState))
) {
return (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />

View File

@ -1,112 +0,0 @@
import { useEffect, useMemo, useState, useCallback } from "react";
import { useTranslation } from "react-i18next";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import { useTimezone } from "@/hooks/use-date-utils";
import MotionSearchView from "@/views/motion-search/MotionSearchView";
import {
getBeginningOfDayTimestamp,
getEndOfDayTimestamp,
} from "@/utils/dateUtil";
import { useAllowedCameras } from "@/hooks/use-allowed-cameras";
import { useSearchEffect } from "@/hooks/use-overlay-state";
import ActivityIndicator from "@/components/indicators/activity-indicator";
export default function MotionSearch() {
const { t } = useTranslation(["views/motionSearch"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const timezone = useTimezone(config);
useEffect(() => {
document.title = t("documentTitle");
}, [t]);
// Get allowed cameras
const allowedCameras = useAllowedCameras();
const cameras = useMemo(() => {
if (!config?.cameras) return [];
return Object.keys(config.cameras).filter((cam) =>
allowedCameras.includes(cam),
);
}, [config?.cameras, allowedCameras]);
// Selected camera state
const [selectedCamera, setSelectedCamera] = useState<string | null>(null);
const [cameraLocked, setCameraLocked] = useState(false);
useSearchEffect("camera", (camera: string) => {
if (cameras.length > 0 && cameras.includes(camera)) {
setSelectedCamera(camera);
setCameraLocked(true);
}
return false;
});
// Initialize with first camera when available (only if not set by camera param)
useEffect(() => {
if (cameras.length === 0) return;
if (!selectedCamera) {
setSelectedCamera(cameras[0]);
}
}, [cameras, selectedCamera]);
// Time range state - default to last 24 hours
const [selectedDay, setSelectedDay] = useState<Date | undefined>(undefined);
const timeRange = useMemo(() => {
if (selectedDay) {
return {
after: getBeginningOfDayTimestamp(new Date(selectedDay)),
before: getEndOfDayTimestamp(new Date(selectedDay)),
};
}
// Default to last 24 hours
const now = Date.now() / 1000;
return {
after: now - 86400,
before: now,
};
}, [selectedDay]);
const handleCameraSelect = useCallback((camera: string) => {
setSelectedCamera(camera);
}, []);
const handleDaySelect = useCallback((day: Date | undefined) => {
if (day == undefined) {
setSelectedDay(undefined);
return;
}
const normalizedDay = new Date(day);
normalizedDay.setHours(0, 0, 0, 0);
setSelectedDay(normalizedDay);
}, []);
if (!config || cameras.length === 0) {
return (
<div className="flex size-full items-center justify-center">
<ActivityIndicator />
</div>
);
}
return (
<MotionSearchView
config={config}
cameras={cameras}
selectedCamera={selectedCamera ?? null}
onCameraSelect={handleCameraSelect}
cameraLocked={cameraLocked}
selectedDay={selectedDay}
onDaySelect={handleDaySelect}
timeRange={timeRange}
timezone={timezone}
/>
);
}

View File

@ -40,8 +40,7 @@ import UsersView from "@/views/settings/UsersView";
import RolesView from "@/views/settings/RolesView";
import UiSettingsView from "@/views/settings/UiSettingsView";
import FrigatePlusSettingsView from "@/views/settings/FrigatePlusSettingsView";
import MediaSyncSettingsView from "@/views/settings/MediaSyncSettingsView";
import RegionGridSettingsView from "@/views/settings/RegionGridSettingsView";
import MaintenanceSettingsView from "@/views/settings/MaintenanceSettingsView";
import SystemDetectionModelSettingsView from "@/views/settings/SystemDetectionModelSettingsView";
import {
SingleSectionPage,
@ -155,8 +154,7 @@ const allSettingsViews = [
"roles",
"notifications",
"frigateplus",
"mediaSync",
"regionGrid",
"maintenance",
] as const;
type SettingsType = (typeof allSettingsViews)[number];
@ -446,10 +444,7 @@ const settingsGroups = [
},
{
label: "maintenance",
items: [
{ key: "mediaSync", component: MediaSyncSettingsView },
{ key: "regionGrid", component: RegionGridSettingsView },
],
items: [{ key: "maintenance", component: MaintenanceSettingsView }],
},
];
@ -476,10 +471,15 @@ const CAMERA_SELECT_BUTTON_PAGES = [
"masksAndZones",
"motionTuner",
"triggers",
"regionGrid",
];
const ALLOWED_VIEWS_FOR_VIEWER = ["profileSettings", "notifications"];
const ALLOWED_VIEWS_FOR_VIEWER = ["ui", "debug", "notifications"];
const LARGE_BOTTOM_MARGIN_PAGES = [
"masksAndZones",
"motionTuner",
"maintenance",
];
// keys for camera sections
const CAMERA_SECTION_MAPPING: Record<string, SettingsType> = {
@ -1355,9 +1355,9 @@ export default function Settings() {
)}
</div>
</div>
<SidebarProvider className="relative h-full min-h-0 flex-1">
<Sidebar variant="inset" className="absolute h-full pl-0 pt-0">
<SidebarContent className="scrollbar-container overflow-y-auto border-r-[1px] border-secondary bg-background py-2">
<SidebarProvider>
<Sidebar variant="inset" className="relative mb-8 pl-0 pt-0">
<SidebarContent className="scrollbar-container mb-24 overflow-y-auto border-r-[1px] border-secondary bg-background py-2">
<SidebarMenu>
{settingsGroups.map((group) => {
const filteredItems = group.items.filter((item) =>
@ -1445,7 +1445,8 @@ export default function Settings() {
<SidebarInset>
<div
className={cn(
"scrollbar-container flex-1 overflow-y-auto pl-2 pr-0 pt-2",
"scrollbar-container mb-16 flex-1 overflow-y-auto p-2 pr-0",
LARGE_BOTTOM_MARGIN_PAGES.includes(pageToggle) && "mb-24",
)}
>
{(() => {

View File

@ -106,7 +106,6 @@ export interface CameraConfig {
frame_height: number;
improve_contrast: boolean;
lightning_threshold: number;
skip_motion_threshold: number | null;
mask: {
[maskId: string]: {
friendly_name?: string;

View File

@ -1,46 +0,0 @@
/**
* Types for the Motion Search feature
*/
export interface MotionSearchResult {
timestamp: number;
change_percentage: number;
}
export interface MotionSearchRequest {
start_time: number;
end_time: number;
polygon_points: number[][];
parallel?: boolean;
threshold?: number;
min_area?: number;
frame_skip?: number;
max_results?: number;
}
export interface MotionSearchStartResponse {
success: boolean;
message: string;
job_id: string;
}
export interface MotionSearchMetrics {
segments_scanned: number;
segments_processed: number;
metadata_inactive_segments: number;
heatmap_roi_skip_segments: number;
fallback_full_range_segments: number;
frames_decoded: number;
wall_time_seconds: number;
segments_with_errors: number;
}
export interface MotionSearchStatusResponse {
success: boolean;
message: string;
status: "queued" | "running" | "success" | "failed" | "cancelled";
results?: MotionSearchResult[];
total_frames_processed?: number;
error_message?: string;
metrics?: MotionSearchMetrics;
}

View File

@ -11,7 +11,6 @@ export type Recording = {
duration: number;
motion: number;
objects: number;
motion_heatmap?: Record<string, number> | null;
dBFS: number;
};

Some files were not shown because too many files have changed in this diff Show More