Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
b75d6f61c7
Merge 5199e10086 into 2dcaeb6809 2026-04-22 13:12:23 +00:00
58 changed files with 213 additions and 2328 deletions

View File

@ -87,43 +87,43 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
# intel packages use zst compression so we need to update dpkg
apt-get install -y dpkg
# use intel apt repo for libmfx1 (legacy QSV, pre-Gen12)
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
# intel-media-va-driver-non-free is built from source in the
# intel-media-driver Dockerfile stage for Battlemage (Xe2) support
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libmfx1
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
libmfx1 libmfxgen1 libvpl2
# upgrade libva2, oneVPL runtime, and libvpl2 from trixie for Battlemage support
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
apt-get -qq update
apt-get -qq install -y -t trixie libva2 libva-drm2 libzstd1
apt-get -qq install -y -t trixie libmfx-gen1.2 libvpl2
rm -f /etc/apt/sources.list.d/trixie.list
apt-get -qq update
apt-get -qq install -y ocl-icd-libopencl1
# install libtbb12 for NPU support
apt-get -qq install -y libtbb12
# install legacy and standard intel compute packages
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
# install legacy and standard intel icd and level-zero-gpu
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
# newer intel packages (gmmlib 22.9+, igc 2.32+) require libstdc++ >= 13.1 and libzstd >= 1.5.5
echo "deb http://deb.debian.org/debian trixie main" > /etc/apt/sources.list.d/trixie.list
apt-get -qq update
apt-get -qq install -y -t trixie libstdc++6 libzstd1
rm -f /etc/apt/sources.list.d/trixie.list
apt-get -qq update
# needed core package
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libigdgmm12_22.9.0_amd64.deb
dpkg -i libigdgmm12_22.9.0_amd64.deb
rm libigdgmm12_22.9.0_amd64.deb
# legacy compute-runtime packages
# legacy packages
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-level-zero-gpu-legacy1_1.5.30872.36_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
# standard compute-runtime packages
# standard packages
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/intel-opencl-icd_26.14.37833.4-0_amd64.deb
wget https://github.com/intel/compute-runtime/releases/download/26.14.37833.4/libze-intel-gpu1_26.14.37833.4-0_amd64.deb
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.32.7/intel-igc-opencl-2_2.32.7+21184_amd64.deb
@ -137,10 +137,6 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
dpkg -i *.deb
rm *.deb
apt-get -qq install -f -y
# Battlemage uses the xe kernel driver, but the VA-API driver is still iHD.
# The oneVPL runtime may look for a driver named after the kernel module.
ln -sf /usr/lib/x86_64-linux-gnu/dri/iHD_drv_video.so /usr/lib/x86_64-linux-gnu/dri/xe_drv_video.so
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then

View File

@ -11,7 +11,7 @@ joserfc == 1.2.*
cryptography == 44.0.*
pathvalidate == 3.3.*
markupsafe == 3.0.*
python-multipart == 0.0.26
python-multipart == 0.0.20
# Classification Model Training
tensorflow == 2.19.* ; platform_machine == 'aarch64'
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'

View File

@ -39,10 +39,6 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
## [kiosk-monitor](https://github.com/extremeshok/kiosk-monitor)
[kiosk-monitor](https://github.com/extremeshok/kiosk-monitor) is a Raspberry Pi watchdog that runs Chromium fullscreen on a Frigate dashboard (optionally with VLC on a second monitor for an RTSP camera stream), auto-restarts on frozen screens or unreachable URLs, and ships a Birdseye-aware Chromium helper that auto-sizes the grid to the display.
## [Periscope](https://github.com/maksz42/periscope)
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.

View File

@ -111,16 +111,26 @@ TCP ensures that all data packets arrive in the correct order. This is crucial f
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
### Frigate is slow to start up with a "probing detect stream" message in the logs
### Frigate hangs on startup with a "probing detect stream" message in the logs
When `detect.width` and `detect.height` are not set, Frigate probes each camera's detect stream on startup (and when saving the config) to auto-detect its resolution. For RTSP streams Frigate probes with ffprobe and automatically retries over TCP if UDP doesn't respond, with a 5 second timeout per attempt. A camera that cannot be reached over either transport will add up to ~10 seconds to startup before Frigate falls through with default dimensions, which may show up as width `0` and height `0` in Camera Probe Info under System Metrics.
On startup, Frigate probes each camera's detect stream with OpenCV to auto-detect its resolution. OpenCV's FFmpeg backend may attempt RTSP over UDP during this probe regardless of the `-rtsp_transport tcp` in your `input_args` or preset. For cameras that do not respond to UDP (common on some Reolink models and others behind firewalls that block UDP), the probe can hang indefinitely and block Frigate from finishing startup, or it can return zeroed-out dimensions that show up as width `0` and height `0` in Camera Probe Info under System Metrics.
To skip the probe entirely and make startup instant, set `detect.width` and `detect.height` explicitly in your camera config:
There are two ways to avoid this:
```yaml
cameras:
1. Set `detect.width` and `detect.height` explicitly in your camera config. When both are set, Frigate skips the auto-detect probe entirely:
```yaml
cameras:
my_camera:
detect:
width: 1280
height: 720
```
```
2. Force OpenCV's FFmpeg backend to use TCP for RTSP by setting the environment variable on your Frigate container:
```
OPENCV_FFMPEG_CAPTURE_OPTIONS=rtsp_transport;tcp
```
This is a process-wide setting and applies to all cameras. If you have any cameras that require `preset-rtsp-udp`, use option 1 instead.

View File

@ -10897,9 +10897,9 @@
"license": "MIT"
},
"node_modules/express/node_modules/path-to-regexp": {
"version": "0.1.13",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz",
"integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA==",
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT"
},
"node_modules/express/node_modules/range-parser": {

View File

@ -36,7 +36,6 @@ from frigate.api.defs.response.chat_response import (
)
from frigate.api.defs.tags import Tags
from frigate.api.event import events
from frigate.config import FrigateConfig
from frigate.genai.utils import build_assistant_message_for_conversation
from frigate.jobs.vlm_watch import (
get_vlm_watch_job,
@ -402,38 +401,9 @@ def get_tools() -> JSONResponse:
return JSONResponse(content={"tools": tools})
def _resolve_zones(
zones: List[str],
config: FrigateConfig,
target_cameras: List[str],
) -> List[str]:
"""Map zone names to their canonical config keys, case-insensitively.
LLMs frequently echo a user's casing ("Front Yard") instead of the
configured key ("front_yard"). The downstream zone filter is a SQLite GLOB
over the JSON-encoded zones column, which is case-sensitive so an
unnormalized name silently returns zero matches. Build a lookup over the
relevant cameras' configured zones and substitute when we find a match;
unknown names pass through so behavior matches what the model asked for.
"""
if not zones:
return zones
lookup: Dict[str, str] = {}
for camera_id in target_cameras:
camera_config = config.cameras.get(camera_id)
if camera_config is None:
continue
for zone_name in camera_config.zones.keys():
lookup.setdefault(zone_name.lower(), zone_name)
return [lookup.get(z.lower(), z) for z in zones]
async def _execute_search_objects(
arguments: Dict[str, Any],
allowed_cameras: List[str],
config: FrigateConfig,
) -> JSONResponse:
"""
Execute the search_objects tool.
@ -467,11 +437,6 @@ async def _execute_search_objects(
# Convert zones array to comma-separated string if provided
zones = arguments.get("zones")
if isinstance(zones, list):
camera_arg = arguments.get("camera")
target_cameras = (
[camera_arg] if camera_arg and camera_arg != "all" else allowed_cameras
)
zones = _resolve_zones(zones, config, target_cameras)
zones = ",".join(zones)
elif zones is None:
zones = "all"
@ -563,11 +528,6 @@ async def _execute_find_similar_objects(
sub_labels = arguments.get("sub_labels")
zones = arguments.get("zones")
if zones:
zones = _resolve_zones(
zones, request.app.frigate_config, cameras or list(allowed_cameras)
)
similarity_mode = arguments.get("similarity_mode", "fused")
if similarity_mode not in ("visual", "semantic", "fused"):
similarity_mode = "fused"
@ -695,9 +655,7 @@ async def execute_tool(
logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}")
if tool_name == "search_objects":
return await _execute_search_objects(
arguments, allowed_cameras, request.app.frigate_config
)
return await _execute_search_objects(arguments, allowed_cameras)
if tool_name == "find_similar_objects":
result = await _execute_find_similar_objects(
@ -877,9 +835,7 @@ async def _execute_tool_internal(
This is used by the chat completion endpoint to execute tools.
"""
if tool_name == "search_objects":
response = await _execute_search_objects(
arguments, allowed_cameras, request.app.frigate_config
)
response = await _execute_search_objects(arguments, allowed_cameras)
try:
if hasattr(response, "body"):
body_str = response.body.decode("utf-8")
@ -943,9 +899,6 @@ async def _execute_start_camera_watch(
await require_camera_access(camera, request=request)
if zones:
zones = _resolve_zones(zones, config, [camera])
genai_manager = request.app.genai_manager
chat_client = genai_manager.chat_client
if chat_client is None or not chat_client.supports_vision:

View File

@ -754,15 +754,6 @@ def events_search(
status_code=404,
)
if search_event.camera not in allowed_cameras:
return JSONResponse(
content={
"success": False,
"message": "Event not found",
},
status_code=404,
)
thumb_result = context.search_thumbnail(search_event)
thumb_ids = {result[0]: result[1] for result in thumb_result}
search_results = {

View File

@ -35,7 +35,7 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.recordings])
@router.get("/recordings/storage", dependencies=[Depends(require_role(["admin"]))])
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"

View File

@ -549,14 +549,6 @@ class WebPushClient(Communicator):
logger.debug(f"Sending camera monitoring push notification for {camera_name}")
for user in self.web_pushers:
if not self._user_has_camera_access(user, camera):
logger.debug(
"Skipping notification for user %s - no access to camera %s",
user,
camera,
)
continue
self.send_push_notification(
user=user,
payload=payload,

View File

@ -133,61 +133,6 @@ class FaceRecognizer(ABC):
return 0.0
def build_class_mean(
embs: list[np.ndarray],
trim: float = 0.15,
outlier_threshold: float = 0.30,
min_keep_frac: float = 0.7,
max_iters: int = 3,
) -> np.ndarray:
"""Build a class-mean embedding with two-layer outlier protection.
Layer 1 (iterative, vector-wise): drop whole embeddings whose cosine
similarity to the current class mean is below ``outlier_threshold``.
Catches mislabeled or corrupted training samples (wrong face in the
folder, full-frame screenshots, extreme crops) that per-dimension
trimming cannot detect.
Layer 2 (per-dimension): ``scipy.stats.trim_mean`` on the retained set
to smooth per-component noise (lighting, expression, alignment jitter).
Collections with fewer than 5 images bypass outlier rejection too few
samples to establish a reliable class center.
"""
arr = np.stack(embs, axis=0)
if len(arr) < 5:
return np.asarray(stats.trim_mean(arr, trim, axis=0))
keep = np.ones(len(arr), dtype=bool)
floor = max(5, int(np.ceil(min_keep_frac * len(arr))))
for _ in range(max_iters):
mean = stats.trim_mean(arr[keep], trim, axis=0)
m_norm = mean / (np.linalg.norm(mean) + 1e-9)
e_norms = arr / (np.linalg.norm(arr, axis=1, keepdims=True) + 1e-9)
cos = e_norms @ m_norm
new_keep = cos >= outlier_threshold
if new_keep.sum() < floor:
top = np.argsort(-cos)[:floor]
new_keep = np.zeros(len(arr), dtype=bool)
new_keep[top] = True
if np.array_equal(new_keep, keep):
break
keep = new_keep
dropped = int((~keep).sum())
if dropped:
logger.debug(
f"Vector-wise outlier filter dropped {dropped}/{len(arr)} embeddings"
)
return np.asarray(stats.trim_mean(arr[keep], trim, axis=0))
def similarity_to_confidence(
cosine_similarity: float,
median: float = 0.3,
@ -284,7 +229,7 @@ class FaceNetRecognizer(FaceRecognizer):
for name, embs in face_embeddings_map.items():
if embs:
self.mean_embs[name] = build_class_mean(embs)
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
logger.debug("Finished building ArcFace model")
@ -395,7 +340,7 @@ class ArcFaceRecognizer(FaceRecognizer):
for name, embs in face_embeddings_map.items():
if embs:
self.mean_embs[name] = build_class_mean(embs)
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
logger.debug("Finished building ArcFace model")

View File

@ -39,8 +39,6 @@ logger = logging.getLogger(__name__)
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
MIN_RECORDING_DURATION = 10
MAX_IMAGE_TOKENS = 24000
MAX_FRAMES_PER_SECOND = 2
class ReviewDescriptionProcessor(PostProcessorApi):
@ -62,22 +60,14 @@ class ReviewDescriptionProcessor(PostProcessorApi):
def calculate_frame_count(
self,
camera: str,
duration: float,
image_source: ImageSourceEnum = ImageSourceEnum.preview,
height: int = 480,
) -> int:
"""Calculate optimal number of frames based on event duration, context size,
image source, and resolution.
"""Calculate optimal number of frames based on context size, image source, and resolution.
Per-image token cost is asked of the GenAI provider so providers that know
their model's true cost (e.g. llama.cpp can probe the loaded mmproj) can
diverge from the default ~1-token-per-1250-pixels heuristic. The frame
budget is bounded by:
- remaining context window after prompt + response reservations
- a fixed MAX_IMAGE_TOKENS ceiling
- MAX_FRAMES_PER_SECOND x duration, to avoid drowning short events in
near-duplicate frames where the model latches onto the redundant middle
and skips the start/end action
Token usage varies by resolution: larger images (ultra-wide aspect ratios) use more tokens.
Estimates ~1 token per 1250 pixels. Targets 98% context utilization with safety margin.
Capped at 20 frames.
"""
client = self.genai_manager.description_client
@ -115,15 +105,14 @@ class ReviewDescriptionProcessor(PostProcessorApi):
width = target_width
height = int(target_width / aspect_ratio)
tokens_per_image = client.estimate_image_tokens(width, height)
pixels_per_image = width * height
tokens_per_image = pixels_per_image / 1250
prompt_tokens = 3800
response_tokens = 300
context_budget = context_size - prompt_tokens - response_tokens
image_token_budget = min(context_budget, MAX_IMAGE_TOKENS)
max_frames_by_tokens = int(image_token_budget / tokens_per_image)
max_frames_by_duration = int(duration * MAX_FRAMES_PER_SECOND)
max_frames = min(max_frames_by_tokens, max_frames_by_duration)
return max(max_frames, 3)
available_tokens = context_size - prompt_tokens - response_tokens
max_frames = int(available_tokens / tokens_per_image)
return min(max(max_frames, 3), 20)
def process_data(
self, data: dict[str, Any], data_type: PostProcessDataEnum
@ -387,9 +376,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
all_frames.append(os.path.join(preview_dir, file))
frame_count = len(all_frames)
desired_frame_count = self.calculate_frame_count(
camera, duration=end_time - start_time
)
desired_frame_count = self.calculate_frame_count(camera)
if frame_count <= desired_frame_count:
return all_frames
@ -413,7 +400,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
"""Get frames from recordings at specified timestamps."""
duration = end_time - start_time
desired_frame_count = self.calculate_frame_count(
camera, duration, ImageSourceEnum.recordings, height
camera, ImageSourceEnum.recordings, height
)
# Calculate evenly spaced timestamps throughout the duration

View File

@ -4,15 +4,11 @@ from pydantic import BaseModel, ConfigDict, Field
class ReviewMetadata(BaseModel):
model_config = ConfigDict(extra="ignore", protected_namespaces=())
observations: list[str] = Field(
default_factory=list,
description="Chronological list of significant observations from the frames, written before the scene narrative is composed.",
)
title: str = Field(
description="A short title characterizing what took place and where, under 10 words."
)
scene: str = Field(
description="A chronological narrative of what happens from start to finish.",
description="A chronological narrative of what happens from start to finish."
)
shortSummary: str = Field(
description="A brief 2-sentence summary of the scene, suitable for notifications."

View File

@ -310,10 +310,6 @@ class EmbeddingMaintainer(threading.Thread):
self._handle_custom_classification_update(topic, payload)
return
if topic == "config/genai":
self.config.genai = payload
self.genai_manager.update_config(self.config)
# Broadcast to all processors — each decides if the topic is relevant
for processor in self.realtime_processors:
processor.update_config(topic, payload)

View File

@ -151,50 +151,6 @@ Each line represents a detection state, not necessarily unique individuals. The
if "other_concerns" in schema.get("required", []):
schema["required"].remove("other_concerns")
# Length hints injected into the schema as suggestions to the model
# (enforced by grammar-based providers like llama.cpp) but kept off the
# Pydantic model so a non-compliant response does not fail validation.
length_hints = {
"scene": {"minLength": 120, "maxLength": 600},
"shortSummary": {"minLength": 70, "maxLength": 100},
}
for field, hints in length_hints.items():
prop = schema.get("properties", {}).get(field)
if prop is not None:
prop.update(hints)
# observations is a chain-of-thought-by-schema field: forcing the model
# to enumerate concrete facts before writing scene/title surfaces details
# the narrative would otherwise gloss past (e.g. brief vehicle arrivals
# overshadowed by a longer activity). The minItems floor scales with
# event duration so longer clips get more observations.
observations_prop = schema.get("properties", {}).get("observations")
if observations_prop is not None:
duration_seconds = float(review_data.get("duration") or 0)
min_observations = max(3, round(duration_seconds / 5))
max_observations = min_observations + 8
observations_prop["description"] = (
"Enumerate the significant observations across all frames, in "
"chronological order, BEFORE composing the scene narrative. "
"Include the very start of the activity — for example, a "
"vehicle entering the frame or pulling into the driveway — "
"even if it lasts only a few frames and the rest of the clip "
"is dominated by a longer activity. Include each arrival, "
"departure, motion event, object handled, and notable change "
"in position or state. Each item is a single concrete fact "
"written as a complete sentence (e.g., 'A blue sedan turns "
"from the street into the driveway', 'Nick exits the driver "
"side carrying a plant pot'). Do not summarize, interpret, or "
"assign meaning here — that belongs in the scene field."
)
observations_prop["minItems"] = min_observations
observations_prop["maxItems"] = max_observations
observations_prop["items"] = {"type": "string", "minLength": 20}
required = schema.setdefault("required", [])
if "observations" not in required:
required.append("observations")
# OpenAI strict mode requires additionalProperties: false on all objects
schema["additionalProperties"] = False
@ -388,14 +344,6 @@ Guidelines:
"""Get the context window size for this provider in tokens."""
return 4096
def estimate_image_tokens(self, width: int, height: int) -> float:
"""Estimate prompt tokens consumed by a single image of the given dimensions.
Default heuristic: ~1 token per 1250 pixels. Providers that can measure or
know their model's exact image-token cost should override.
"""
return (width * height) / 1250
def embed(
self,
texts: list[str] | None = None,

View File

@ -42,8 +42,6 @@ class LlamaCppClient(GenAIClient):
_supports_vision: bool
_supports_audio: bool
_supports_tools: bool
_image_token_cache: dict[tuple[int, int], int]
_text_baseline_tokens: int | None
def _init_provider(self) -> str | None:
"""Initialize the client and query model metadata from the server."""
@ -54,8 +52,6 @@ class LlamaCppClient(GenAIClient):
self._supports_vision = False
self._supports_audio = False
self._supports_tools = False
self._image_token_cache = {}
self._text_baseline_tokens = None
base_url = (
self.genai_config.base_url.rstrip("/")
@ -276,91 +272,6 @@ class LlamaCppClient(GenAIClient):
return self._context_size
return 4096
def estimate_image_tokens(self, width: int, height: int) -> float:
"""Probe the llama.cpp server to learn the model's image-token cost at the
requested dimensions.
llama.cpp's image tokenization is a deterministic function of dimensions and
the loaded mmproj, so the result is cached per (width, height) for the
lifetime of the process. Falls back to the base pixel heuristic if the
server is unreachable or the response is malformed.
"""
if self.provider is None:
return super().estimate_image_tokens(width, height)
cached = self._image_token_cache.get((width, height))
if cached is not None:
return cached
try:
baseline = self._probe_baseline_tokens()
with_image = self._probe_image_prompt_tokens(width, height)
tokens = max(1, with_image - baseline)
except Exception as e:
logger.debug(
"llama.cpp image-token probe failed for %dx%d (%s); using heuristic",
width,
height,
e,
)
return super().estimate_image_tokens(width, height)
self._image_token_cache[(width, height)] = tokens
logger.debug(
"llama.cpp model '%s' uses ~%d tokens for %dx%d images",
self.genai_config.model,
tokens,
width,
height,
)
return tokens
def _probe_baseline_tokens(self) -> int:
"""Return prompt_tokens for a minimal text-only request. Cached after first call."""
if self._text_baseline_tokens is not None:
return self._text_baseline_tokens
self._text_baseline_tokens = self._probe_prompt_tokens(
[{"type": "text", "text": "."}]
)
return self._text_baseline_tokens
def _probe_image_prompt_tokens(self, width: int, height: int) -> int:
"""Return prompt_tokens for a single synthetic image plus minimal text."""
img = Image.new("RGB", (width, height), (128, 128, 128))
buf = io.BytesIO()
img.save(buf, format="JPEG", quality=60)
encoded = base64.b64encode(buf.getvalue()).decode("utf-8")
return self._probe_prompt_tokens(
[
{"type": "text", "text": "."},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{encoded}"},
},
]
)
def _probe_prompt_tokens(self, content: list[dict[str, Any]]) -> int:
"""POST a 1-token chat completion and return reported prompt_tokens.
Uses a generous timeout to absorb a cold model load on the first probe
when the server lazily loads models on demand (e.g. llama-swap).
"""
payload = {
"model": self.genai_config.model,
"messages": [{"role": "user", "content": content}],
"max_tokens": 1,
}
response = requests.post(
f"{self.provider}/v1/chat/completions",
json=payload,
timeout=60,
)
response.raise_for_status()
return int(response.json()["usage"]["prompt_tokens"])
def _build_payload(
self,
messages: list[dict[str, Any]],

View File

@ -113,15 +113,6 @@ class OllamaClient(GenAIClient):
schema = response_format.get("json_schema", {}).get("schema")
if schema:
ollama_options["format"] = self._clean_schema_for_ollama(schema)
logger.debug(
"Ollama generate request: model=%s, prompt_len=%s, image_count=%s, "
"has_format=%s, options=%s",
self.genai_config.model,
len(prompt),
len(images) if images else 0,
"format" in ollama_options,
{k: v for k, v in ollama_options.items() if k != "format"},
)
result = self.provider.generate(
self.genai_config.model,
prompt,
@ -129,24 +120,9 @@ class OllamaClient(GenAIClient):
**ollama_options,
)
logger.debug(
"Ollama generate response: done=%s, done_reason=%s, eval_count=%s, "
"prompt_eval_count=%s, response_len=%s",
result.get("done"),
result.get("done_reason"),
result.get("eval_count"),
result.get("prompt_eval_count"),
len(result.get("response", "") or ""),
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
)
response_text = str(result["response"]).strip()
if not response_text:
logger.warning(
"Ollama returned a blank response for model %s (done_reason=%s, "
"eval_count=%s). Check model output, ensure thinking is disabled.",
self.genai_config.model,
result.get("done_reason"),
result.get("eval_count"),
)
return response_text
return str(result["response"]).strip()
except (
TimeoutException,
ResponseError,

View File

@ -80,23 +80,7 @@ class OpenAIClient(GenAIClient):
and hasattr(result, "choices")
and len(result.choices) > 0
):
message = result.choices[0].message
content = message.content
if not content:
# When reasoning is enabled for some OpenAI backends the actual response
# is incorrectly placed in reasoning_content instead of content.
# This is buggy/incorrect behavior — reasoning should not be
# enabled for these models.
reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
logger.warning(
"Response content was empty but reasoning_content was provided; "
"reasoning appears to be enabled and should be disabled for this model."
)
content = reasoning_content
return str(content.strip()) if content else None
return str(result.choices[0].message.content.strip())
return None
except (TimeoutException, Exception) as e:
logger.warning("OpenAI returned an error: %s", str(e))

View File

@ -19,7 +19,6 @@ import numpy as np
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT
from frigate.output.ws_auth import ws_has_camera_access
from frigate.util.image import (
SharedMemoryFrameManager,
copy_yuv_to_position,
@ -237,14 +236,12 @@ class BroadcastThread(threading.Thread):
converter: FFMpegConverter,
websocket_server: Any,
stop_event: MpEvent,
config: FrigateConfig,
):
super().__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
self.config = config
def run(self) -> None:
while not self.stop_event.is_set():
@ -259,7 +256,6 @@ class BroadcastThread(threading.Thread):
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
and ws_has_camera_access(ws, self.camera, self.config)
):
try:
ws.send(buf, binary=True)
@ -810,11 +806,7 @@ class Birdseye:
config.birdseye.restream,
)
self.broadcaster = BroadcastThread(
"birdseye",
self.converter,
websocket_server,
stop_event,
config,
"birdseye", self.converter, websocket_server, stop_event
)
self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event)
self.frame_manager = SharedMemoryFrameManager()

View File

@ -7,8 +7,7 @@ import threading
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
from frigate.config import CameraConfig, FfmpegConfig, FrigateConfig
from frigate.output.ws_auth import ws_has_camera_access
from frigate.config import CameraConfig, FfmpegConfig
logger = logging.getLogger(__name__)
@ -103,14 +102,12 @@ class BroadcastThread(threading.Thread):
converter: FFMpegConverter,
websocket_server: Any,
stop_event: MpEvent,
config: FrigateConfig,
):
super().__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
self.config = config
def run(self) -> None:
while not self.stop_event.is_set():
@ -125,7 +122,6 @@ class BroadcastThread(threading.Thread):
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
and ws_has_camera_access(ws, self.camera, self.config)
):
try:
ws.send(buf, binary=True)
@ -139,11 +135,7 @@ class BroadcastThread(threading.Thread):
class JsmpegCamera:
def __init__(
self,
config: CameraConfig,
frigate_config: FrigateConfig,
stop_event: MpEvent,
websocket_server: Any,
self, config: CameraConfig, stop_event: MpEvent, websocket_server: Any
) -> None:
self.config = config
self.input: queue.Queue[bytes] = queue.Queue(maxsize=config.detect.fps)
@ -162,11 +154,7 @@ class JsmpegCamera:
config.live.quality,
)
self.broadcaster = BroadcastThread(
config.name or "",
self.converter,
websocket_server,
stop_event,
frigate_config,
config.name or "", self.converter, websocket_server, stop_event
)
self.converter.start()

View File

@ -32,7 +32,6 @@ from frigate.const import (
from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder
from frigate.output.ws_auth import ws_has_camera_access
from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
from frigate.util.process import FrigateProcess
@ -103,7 +102,7 @@ class OutputProcess(FrigateProcess):
) -> None:
camera_config = self.config.cameras[camera]
jsmpeg_cameras[camera] = JsmpegCamera(
camera_config, self.config, self.stop_event, websocket_server
camera_config, self.stop_event, websocket_server
)
preview_recorders[camera] = PreviewRecorder(camera_config)
preview_write_times[camera] = 0
@ -263,7 +262,6 @@ class OutputProcess(FrigateProcess):
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera)
and ws_has_camera_access(ws, camera, self.config)
for ws in websocket_server.manager
):
# write to the converter for the camera if clients are listening to the specific camera
@ -277,7 +275,6 @@ class OutputProcess(FrigateProcess):
self.config.birdseye.restream
or any(
ws.environ["PATH_INFO"].endswith("birdseye")
and ws_has_camera_access(ws, "birdseye", self.config)
for ws in websocket_server.manager
)
)

View File

@ -1,43 +0,0 @@
"""Authorization helpers for JSMPEG websocket clients."""
from typing import Any
from frigate.config import FrigateConfig
from frigate.models import User
def _get_valid_ws_roles(ws: Any, config: FrigateConfig) -> list[str]:
role_header = ws.environ.get("HTTP_REMOTE_ROLE", "")
roles = [
role.strip()
for role in role_header.split(config.proxy.separator)
if role.strip()
]
return [role for role in roles if role in config.auth.roles]
def ws_has_camera_access(ws: Any, camera_name: str, config: FrigateConfig) -> bool:
"""Return True when a websocket client is authorized for the camera path."""
roles = _get_valid_ws_roles(ws, config)
if not roles:
return False
roles_dict = config.auth.roles
# Birdseye is a composite stream, so only users with unrestricted access
# should receive it.
if camera_name == "birdseye":
return any(role == "admin" or not roles_dict.get(role) for role in roles)
all_camera_names = set(config.cameras.keys())
for role in roles:
if role == "admin" or not roles_dict.get(role):
return True
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
if camera_name in allowed_cameras:
return True
return False

View File

@ -23,26 +23,6 @@ class TestHttpApp(BaseTestHttp):
response_json = response.json()
assert response_json == self.test_stats
def test_recordings_storage_requires_admin(self):
stats = Mock(spec=StatsEmitter)
stats.get_latest_stats.return_value = self.test_stats
app = super().create_app(stats)
app.storage_maintainer = Mock()
app.storage_maintainer.calculate_camera_usages.return_value = {
"front_door": {"usage": 2.0},
}
with AuthTestClient(app) as client:
response = client.get(
"/recordings/storage",
headers={"remote-user": "viewer", "remote-role": "viewer"},
)
assert response.status_code == 403
response = client.get("/recordings/storage")
assert response.status_code == 200
assert response.json()["front_door"]["usage_percent"] == 25.0
def test_config_set_in_memory_replaces_objects_track_list(self):
self.minimal_config["cameras"]["front_door"]["objects"] = {
"track": ["person", "car"],

View File

@ -219,25 +219,6 @@ class TestHttpApp(BaseTestHttp):
assert len(events) == 1
assert events[0]["id"] == event_id
def test_similarity_search_hides_unauthorized_anchor_event(self):
mock_embeddings = Mock()
self.app.frigate_config.semantic_search.enabled = True
self.app.embeddings = mock_embeddings
with AuthTestClient(self.app) as client:
super().insert_mock_event("hidden.anchor", camera="back_door")
response = client.get(
"/events/search",
params={
"search_type": "similarity",
"event_id": "hidden.anchor",
},
)
assert response.status_code == 404
assert response.json()["message"] == "Event not found"
mock_embeddings.search_thumbnail.assert_not_called()
def test_get_good_event(self):
id = "123456.random"

View File

@ -145,12 +145,9 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
embeddings=embeddings,
frigate_config=SimpleNamespace(
semantic_search=SimpleNamespace(enabled=semantic_enabled),
cameras={"driveway": object()},
auth=SimpleNamespace(roles={"admin": [], "viewer": ["driveway"]}),
proxy=SimpleNamespace(separator=","),
),
)
return SimpleNamespace(app=app, headers={})
return SimpleNamespace(app=app)
def test_semantic_search_disabled_returns_error(self):
req = self._make_request(semantic_enabled=False)
@ -183,7 +180,7 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
_execute_find_similar_objects(
req,
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
allowed_cameras=["driveway"],
allowed_cameras=["nonexistent_cam"],
)
)
self.assertEqual(result["results"], [])

View File

@ -1,57 +0,0 @@
"""Tests for JSMPEG websocket authorization."""
import unittest
from types import SimpleNamespace
from frigate.config import FrigateConfig
from frigate.output.ws_auth import ws_has_camera_access
class TestWsHasCameraAccess(unittest.TestCase):
def setUp(self):
self.config = FrigateConfig(
mqtt={"host": "mqtt"},
auth={"roles": {"limited_user": ["front_door"]}},
cameras={
"front_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {"height": 1080, "width": 1920, "fps": 5},
},
"back_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
]
},
"detect": {"height": 1080, "width": 1920, "fps": 5},
},
},
)
def _make_ws(self, role: str):
return SimpleNamespace(environ={"HTTP_REMOTE_ROLE": role})
def test_restricted_role_only_gets_allowed_camera(self):
ws = self._make_ws("limited_user")
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
self.assertFalse(ws_has_camera_access(ws, "back_door", self.config))
def test_unrestricted_role_can_access_any_camera(self):
ws = self._make_ws("viewer")
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
self.assertTrue(ws_has_camera_access(ws, "back_door", self.config))
def test_birdseye_requires_unrestricted_access(self):
self.assertTrue(
ws_has_camera_access(self._make_ws("admin"), "birdseye", self.config)
)
self.assertTrue(
ws_has_camera_access(self._make_ws("viewer"), "birdseye", self.config)
)
self.assertFalse(
ws_has_camera_access(self._make_ws("limited_user"), "birdseye", self.config)
)

View File

@ -1,29 +0,0 @@
"""Tests for camera monitoring notification authorization."""
import unittest
from types import SimpleNamespace
from unittest.mock import MagicMock
from frigate.comms.webpush import WebPushClient
class TestCameraMonitoringNotifications(unittest.TestCase):
def test_send_camera_monitoring_filters_by_camera_access(self):
client = WebPushClient.__new__(WebPushClient)
client.config = SimpleNamespace(
cameras={"front_door": SimpleNamespace(friendly_name=None)}
)
client.web_pushers = {"allowed": [], "denied": []}
client.user_cameras = {"allowed": {"front_door"}, "denied": set()}
client.check_registrations = MagicMock()
client.cleanup_registrations = MagicMock()
client.send_push_notification = MagicMock()
client.send_camera_monitoring(
{"camera": "front_door", "message": "Monitoring condition met"}
)
self.assertEqual(client.send_push_notification.call_count, 1)
self.assertEqual(
client.send_push_notification.call_args.kwargs["user"], "allowed"
)

View File

@ -24,12 +24,8 @@ from frigate.log import redirect_output_to_logger, suppress_stderr_during
from frigate.models import Event, Recordings, ReviewSegment
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
from frigate.util.file import get_event_thumbnail_bytes, load_event_snapshot_image
from frigate.util.image import (
calculate_region,
get_image_from_recording,
relative_box_to_absolute,
)
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording
from frigate.util.process import FrigateProcess
BATCH_SIZE = 16
@ -717,7 +713,7 @@ def collect_object_classification_examples(
This function:
1. Queries events for the specified label
2. Selects 100 balanced events across different cameras and times
3. Crops each event's clean snapshot around the object bounding box
3. Retrieves thumbnails for selected events (with 33% center crop applied)
4. Selects 24 most visually distinct thumbnails
5. Saves to dataset directory
@ -836,80 +832,29 @@ def _select_balanced_events(
def _extract_event_thumbnails(events: list[Event], output_dir: str) -> list[str]:
"""
Extract a training image for each event.
Preferred path: load the full-frame clean snapshot and crop around the
stored bounding box with the same calculate_region(..., max(w, h), 1.0)
call the live ObjectClassificationProcessor uses, so wizard examples
are framed like inference-time inputs.
Fallback: if no clean snapshot exists (snapshots disabled, or only a
legacy annotated JPG is on disk), center-crop the stored thumbnail
using a step ladder sized from the box/region area ratio.
Extract thumbnails from events and save to disk.
Args:
events: List of Event objects
output_dir: Directory to save crops
output_dir: Directory to save thumbnails
Returns:
List of paths to successfully extracted images
List of paths to successfully extracted thumbnail images
"""
image_paths = []
thumbnail_paths = []
for idx, event in enumerate(events):
try:
img = _load_event_classification_crop(event)
if img is None:
continue
resized = cv2.resize(img, (224, 224))
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
cv2.imwrite(output_path, resized)
image_paths.append(output_path)
except Exception as e:
logger.debug(f"Failed to extract image for event {event.id}: {e}")
continue
return image_paths
def _load_event_classification_crop(event: Event) -> np.ndarray | None:
"""Prefer a snapshot-based object crop; fall back to a center-cropped thumbnail."""
if event.data and "box" in event.data:
snapshot, _ = load_event_snapshot_image(event, clean_only=True)
if snapshot is not None:
abs_box = relative_box_to_absolute(snapshot.shape, event.data["box"])
if abs_box is not None:
xmin, ymin, xmax, ymax = abs_box
box_w = xmax - xmin
box_h = ymax - ymin
if box_w > 0 and box_h > 0:
x1, y1, x2, y2 = calculate_region(
snapshot.shape,
xmin,
ymin,
xmax,
ymax,
max(box_w, box_h),
1.0,
)
cropped = snapshot[y1:y2, x1:x2]
if cropped.size > 0:
return cropped
thumbnail_bytes = get_event_thumbnail_bytes(event)
if not thumbnail_bytes:
return None
if thumbnail_bytes:
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if img is None or img.size == 0:
return None
if img is not None:
height, width = img.shape[:2]
crop_size = 1.0
crop_size = 1.0
if event.data and "box" in event.data and "region" in event.data:
box = event.data["box"]
region = event.data["region"]
@ -917,6 +862,7 @@ def _load_event_classification_crop(event: Event) -> np.ndarray | None:
if len(box) == 4 and len(region) == 4:
box_w, box_h = box[2], box[3]
region_w, region_h = region[2], region[3]
box_area = (box_w * box_h) / (region_w * region_h)
if box_area < 0.05:
@ -932,10 +878,20 @@ def _load_event_classification_crop(event: Event) -> np.ndarray | None:
crop_width = int(width * crop_size)
crop_height = int(height * crop_size)
x1 = (width - crop_width) // 2
y1 = (height - crop_height) // 2
cropped = img[y1 : y1 + crop_height, x1 : x1 + crop_width]
if cropped.size == 0:
return None
x2 = x1 + crop_width
y2 = y1 + crop_height
return cropped
cropped = img[y1:y2, x1:x2]
resized = cv2.resize(cropped, (224, 224))
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
cv2.imwrite(output_path, resized)
thumbnail_paths.append(output_path)
except Exception as e:
logger.debug(f"Failed to extract thumbnail for event {event.id}: {e}")
continue
return thumbnail_paths

View File

@ -711,11 +711,8 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
else:
format_entries = None
def run(rtsp_transport: Optional[str] = None) -> sp.CompletedProcess:
cmd = [ffmpeg.ffprobe_path]
if rtsp_transport:
cmd += ["-rtsp_transport", rtsp_transport]
cmd += [
ffprobe_cmd = [
ffmpeg.ffprobe_path,
"-timeout",
"1000000",
"-print_format",
@ -723,32 +720,14 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
"-show_entries",
f"stream={stream_entries}",
]
# Add format entries for detailed mode
if detailed and format_entries:
cmd.extend(["-show_entries", f"format={format_entries}"])
cmd.extend(["-loglevel", "error", clean_path])
try:
return sp.run(cmd, capture_output=True, timeout=6)
except sp.TimeoutExpired as e:
logger.info(
"ffprobe timed out while probing %s (transport=%s)",
clean_camera_user_pass(path),
rtsp_transport or "default",
)
return sp.CompletedProcess(
args=cmd,
returncode=1,
stdout=e.stdout or b"",
stderr=(e.stderr or b"") + b"\nffprobe timed out",
)
ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
result = run()
ffprobe_cmd.extend(["-loglevel", "error", clean_path])
# For RTSP: retry with explicit TCP transport if the first attempt failed
# (default UDP may be blocked)
if result.returncode != 0 and clean_path.startswith("rtsp://"):
result = run(rtsp_transport="tcp")
return result
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
@ -828,15 +807,10 @@ async def get_video_properties(
) -> dict[str, Any]:
async def probe_with_ffprobe(
url: str,
rtsp_transport: Optional[str] = None,
) -> tuple[bool, int, int, Optional[str], float]:
"""Fallback using ffprobe: returns (valid, width, height, codec, duration)."""
cmd = [ffmpeg.ffprobe_path]
if rtsp_transport:
cmd += ["-rtsp_transport", rtsp_transport]
cmd += [
"-rw_timeout",
"5000000",
cmd = [
ffmpeg.ffprobe_path,
"-v",
"quiet",
"-print_format",
@ -845,23 +819,11 @@ async def get_video_properties(
"-show_streams",
url,
]
proc = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
try:
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=6)
except asyncio.TimeoutError:
logger.info(
"ffprobe timed out while probing %s (transport=%s)",
clean_camera_user_pass(url),
rtsp_transport or "default",
)
proc.kill()
await proc.wait()
return False, 0, 0, None, -1
stdout, _ = await proc.communicate()
if proc.returncode != 0:
return False, 0, 0, None, -1
@ -910,27 +872,13 @@ async def get_video_properties(
cap.release()
return valid, width, height, fourcc, duration
is_rtsp = url.startswith("rtsp://")
if is_rtsp:
# skip cv2 for RTSP: its FFmpeg backend has a hardcoded ~30s internal
# timeout that cannot be shortened per-call, and ffprobe bounded by
# -rw_timeout handles RTSP probing reliably
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
else:
# try cv2 first for local files, HTTP, RTMP
# try cv2 first
has_video, width, height, fourcc, duration = probe_with_cv2(url)
# fallback to ffprobe if needed
if not has_video or (get_duration and duration < 0):
has_video, width, height, fourcc, duration = await probe_with_ffprobe(url)
# last resort for RTSP: try TCP transport, since default UDP may be blocked
if (not has_video or (get_duration and duration < 0)) and is_rtsp:
has_video, width, height, fourcc, duration = await probe_with_ffprobe(
url, rtsp_transport="tcp"
)
result: dict[str, Any] = {"has_valid_video": has_video}
if has_video:
result.update({"width": width, "height": height})

View File

@ -1,376 +0,0 @@
#!/usr/bin/env python3
"""Analyze keyframe and timestamp structure of Frigate recording segments.
This is a diagnostic tool for investigating seek precision / GOP behavior on
recorded segments. It does not modify anything.
ffprobe is only available inside the Frigate container, at
/usr/lib/ffmpeg/$DEFAULT_FFMPEG_VERSION/bin/ffprobe
This script auto-resolves that path from the DEFAULT_FFMPEG_VERSION env var
(or falls back to scanning /usr/lib/ffmpeg/*/bin/ffprobe). Pass --ffprobe to
override if needed.
All recording segments on the filesystem are in UTC. The --timestamp flag
expects a UTC Unix timestamp.
Typical use:
# Inside the Frigate container (or wherever recordings are mounted)
python3 analyze_recording_keyframes.py <camera_name>
# Analyze 10 most recent segments
python3 analyze_recording_keyframes.py <camera_name> --count 10
# Locate the segment that contains a specific UTC Unix timestamp and
# show it plus surrounding segments
python3 analyze_recording_keyframes.py <camera> --timestamp 1713471234.567
# Custom recordings directory
python3 analyze_recording_keyframes.py <camera> --recordings-dir /media/frigate/recordings
# Override the ffprobe path explicitly
python3 analyze_recording_keyframes.py <camera> --ffprobe /usr/lib/ffmpeg/7.0/bin/ffprobe
"""
import argparse
import datetime
import json
import os
import subprocess
import sys
from pathlib import Path
from statistics import mean, median, stdev
def resolve_ffprobe_path(override: str | None) -> str:
"""Resolve the ffprobe binary path.
Inside the Frigate container, ffprobe lives at
/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe the exact version
depends on the image build and is exposed as an env var.
"""
if override:
return override
version = os.environ.get("DEFAULT_FFMPEG_VERSION", "")
if version:
path = f"/usr/lib/ffmpeg/{version}/bin/ffprobe"
if Path(path).is_file():
return path
# Fall back to scanning the Frigate ffmpeg install root.
for candidate in sorted(Path("/usr/lib/ffmpeg").glob("*/bin/ffprobe")):
if candidate.is_file():
return str(candidate)
print(
"Could not locate ffprobe. Pass --ffprobe <path> or set "
"DEFAULT_FFMPEG_VERSION.",
file=sys.stderr,
)
sys.exit(1)
def find_recent_segments(recordings_dir: Path, camera: str, count: int) -> list[Path]:
"""Return the N most recent .mp4 segments for the given camera.
Expected layout: <recordings_dir>/<YYYY-MM-DD>/<HH>/<camera>/<MM>.<SS>.mp4
"""
pattern = f"*/*/{camera}/*.mp4"
segments = sorted(recordings_dir.glob(pattern))
return segments[-count:]
def find_segments_near_timestamp(
recordings_dir: Path, camera: str, target_ts: float, count: int
) -> tuple[list[Path], Path | None]:
"""Return `count` segments centered on the one containing `target_ts`.
Also returns the specific segment that should contain the timestamp, so
callers can highlight it in output.
"""
pattern = f"*/*/{camera}/*.mp4"
with_ts: list[tuple[float, Path]] = []
for seg in sorted(recordings_dir.glob(pattern)):
ts = filename_to_timestamp(seg)
if ts is not None:
with_ts.append((ts, seg))
if not with_ts:
return [], None
# Largest filename_ts that is <= target_ts — that's the segment that
# should contain the timestamp (Frigate catalogs segments by filename).
target_idx = -1
for i, (ts, _) in enumerate(with_ts):
if ts <= target_ts:
target_idx = i
else:
break
if target_idx < 0:
# target_ts is before the earliest segment we have — just return the
# first `count` segments so the user can see what's available.
window = with_ts[:count]
return [seg for _, seg in window], None
half = count // 2
start = max(0, target_idx - half)
end = min(len(with_ts), start + count)
start = max(0, end - count)
window = with_ts[start:end]
return [seg for _, seg in window], with_ts[target_idx][1]
def filename_to_timestamp(segment: Path) -> float | None:
"""Parse the wall-clock time from Frigate's segment path layout."""
try:
date = segment.parent.parent.parent.name # YYYY-MM-DD
hour = segment.parent.parent.name # HH
mm_ss = segment.stem # MM.SS
minute, second = mm_ss.split(".")
dt = datetime.datetime.strptime(
f"{date} {hour}:{minute}:{second}",
"%Y-%m-%d %H:%M:%S",
).replace(tzinfo=datetime.timezone.utc)
return dt.timestamp()
except (ValueError, IndexError):
return None
def run_ffprobe(ffprobe: str, args: list[str]) -> dict:
"""Run ffprobe and return parsed JSON, or empty dict on failure."""
result = subprocess.run(
[ffprobe, "-v", "error", *args, "-of", "json"],
capture_output=True,
text=True,
check=False,
)
if result.returncode != 0:
print(f" ffprobe error: {result.stderr.strip()}", file=sys.stderr)
return {}
try:
return json.loads(result.stdout)
except json.JSONDecodeError:
return {}
def get_format_info(ffprobe: str, segment: Path) -> tuple[dict, dict]:
"""Return (format_dict, stream_dict) for the first video stream."""
data = run_ffprobe(
ffprobe,
[
"-show_entries",
"format=duration,start_time",
"-show_entries",
"stream=codec_name,profile,r_frame_rate,width,height",
"-select_streams",
"v:0",
str(segment),
],
)
fmt = data.get("format", {})
streams = data.get("streams") or [{}]
return fmt, streams[0]
def get_video_packets(ffprobe: str, segment: Path) -> list[dict]:
"""Return video packets with pts_time and flags."""
data = run_ffprobe(
ffprobe,
[
"-select_streams",
"v",
"-show_entries",
"packet=pts_time,dts_time,flags",
str(segment),
],
)
return data.get("packets", [])
def analyze(ffprobe: str, segment: Path, highlight: bool = False) -> None:
marker = " <-- contains target timestamp" if highlight else ""
print(f"\n=== {segment} ==={marker}")
fmt, stream = get_format_info(ffprobe, segment)
duration = float(fmt.get("duration", 0) or 0)
start_time = float(fmt.get("start_time", 0) or 0)
codec = stream.get("codec_name", "?")
profile = stream.get("profile", "?")
width = stream.get("width", "?")
height = stream.get("height", "?")
fps = stream.get("r_frame_rate", "?/1")
filename_ts = filename_to_timestamp(segment)
filename_iso = (
datetime.datetime.fromtimestamp(
filename_ts, tz=datetime.timezone.utc
).isoformat()
if filename_ts is not None
else "?"
)
print(f" Codec: {codec} ({profile}) {width}x{height} {fps}")
print(f" Filename time: {filename_ts} ({filename_iso})")
print(f" Format duration: {duration:.3f}s")
print(f" Format start: {start_time:.3f}s (PTS offset of first packet)")
packets = get_video_packets(ffprobe, segment)
if not packets:
print(" (no video packets)")
return
keyframe_times: list[float] = []
first_pts: float | None = None
last_pts: float | None = None
for pkt in packets:
pts_str = pkt.get("pts_time")
if pts_str is None or pts_str == "N/A":
continue
pts = float(pts_str)
if first_pts is None:
first_pts = pts
last_pts = pts
if "K" in pkt.get("flags", ""):
keyframe_times.append(pts)
total_packets = len(packets)
kf_count = len(keyframe_times)
print(f" Video packets: {total_packets}")
print(f" Keyframes: {kf_count}")
if first_pts is not None and last_pts is not None:
print(
f" Packet PTS: first={first_pts:.3f}s last={last_pts:.3f}s "
f"span={last_pts - first_pts:.3f}s"
)
if keyframe_times:
print(
f" Keyframe PTS: first={keyframe_times[0]:.3f}s "
f"last={keyframe_times[-1]:.3f}s"
)
formatted = ", ".join(f"{t:.3f}" for t in keyframe_times)
print(f" Keyframe times: [{formatted}]")
if len(keyframe_times) >= 2:
gaps = [b - a for a, b in zip(keyframe_times, keyframe_times[1:])]
avg_fps_estimate = (
total_packets / (last_pts - first_pts)
if last_pts and first_pts is not None and last_pts > first_pts
else 0
)
print(
f" GOP gaps (s): min={min(gaps):.3f} max={max(gaps):.3f} "
f"mean={mean(gaps):.3f} median={median(gaps):.3f}"
)
if len(gaps) > 1:
print(f" stdev={stdev(gaps):.3f}")
print(
f" Est. mean GOP: ~{mean(gaps) * avg_fps_estimate:.1f} frames"
if avg_fps_estimate
else ""
)
if max(gaps) > 5:
print(
" !! Max GOP > 5s — consistent with adaptive/smart codec "
"(even if 'Smart Codec' is off in the UI, some cameras still "
"produce irregular GOPs under specific encoder profiles)"
)
elif kf_count == 1:
print(" !! Only one keyframe in segment — very long GOP")
# Report how well filename time aligns with first-packet PTS.
# (Filename time is what Frigate uses as recording.start_time in the DB.)
if filename_ts is not None and first_pts is not None:
print(
f" Notes: first packet PTS is {first_pts:.3f}s into the file; "
f"Frigate treats filename time as PTS=0 for seek math."
)
def main() -> None:
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("camera", help="Camera name (matches the recordings subfolder)")
parser.add_argument(
"--count",
type=int,
default=5,
help="Number of most recent segments to analyze (default: 5)",
)
parser.add_argument(
"--recordings-dir",
default="/media/frigate/recordings",
help="Path to the recordings directory (default: /media/frigate/recordings)",
)
parser.add_argument(
"--ffprobe",
default=None,
help=(
"Full path to the ffprobe binary. Defaults to the Frigate-bundled "
"binary at /usr/lib/ffmpeg/$DEFAULT_FFMPEG_VERSION/bin/ffprobe."
),
)
parser.add_argument(
"--timestamp",
type=float,
default=None,
help=(
"Unix timestamp (UTC seconds, decimals allowed) to locate. The "
"script finds the segment that should contain this time and "
"analyzes it plus surrounding segments (count controls the "
"window). All on-disk segments are stored in UTC, so pass a UTC "
"Unix timestamp."
),
)
args = parser.parse_args()
ffprobe = resolve_ffprobe_path(args.ffprobe)
recordings_dir = Path(args.recordings_dir)
if not recordings_dir.is_dir():
print(
f"Recordings directory not found: {recordings_dir}",
file=sys.stderr,
)
sys.exit(1)
target_segment: Path | None = None
if args.timestamp is not None:
segments, target_segment = find_segments_near_timestamp(
recordings_dir, args.camera, args.timestamp, args.count
)
target_iso = datetime.datetime.fromtimestamp(
args.timestamp, tz=datetime.timezone.utc
).isoformat()
mode = f"around timestamp {args.timestamp} ({target_iso})"
else:
segments = find_recent_segments(recordings_dir, args.camera, args.count)
mode = "most recent"
if not segments:
print(
f"No segments found for camera '{args.camera}' under {recordings_dir}",
file=sys.stderr,
)
sys.exit(1)
if args.timestamp is not None and target_segment is None:
print(
f"!! Target timestamp {args.timestamp} is before the earliest "
f"segment on disk; showing the earliest available segments instead.",
file=sys.stderr,
)
print(
f"Analyzing {len(segments)} {mode} segment(s) for camera "
f"'{args.camera}' under {recordings_dir} (ffprobe: {ffprobe})"
)
for segment in segments:
analyze(ffprobe, segment, highlight=(segment == target_segment))
if __name__ == "__main__":
main()

View File

@ -1,783 +0,0 @@
"""
Face recognition investigation script.
Standalone replica of Frigate's ArcFace pipeline (see
frigate/data_processing/common/face/model.py and
frigate/embeddings/onnx/face_embedding.py) for analyzing a face collection
outside the running service. Useful for:
- Diagnosing why a person's collection produces false positives
- Finding outlier/contaminating training images
- Inspecting the effect of the shipped vector-wise outlier filter
Layout:
- Core pipeline: LandmarkAligner, ArcFaceEmbedder, arcface_preprocess,
similarity_to_confidence, blur_reduction all mirroring the production
code exactly
- Default run: summarize positive and negative sets against a baseline
trim_mean class representation
- Optional diagnostics (flags): vector-outlier filter behavior, degenerate
"tiny crop" embedding clustering, and multi-identity contamination
Usage:
python3 face_investigate.py \\
--positive <positive_folder> \\
--negative <negative_folder> \\
[--model-cache /path/to/model_cache] \\
[--vector-outlier] [--degenerate] [--contamination]
The positive folder should contain training images for a single identity
(same layout as FACE_DIR/<name>/*.webp). The negative folder should contain
runtime crops to test against a mix of true matches and misfires.
"""
from __future__ import annotations
import argparse
import os
import sys
from dataclasses import dataclass
from typing import Iterable
import cv2
import numpy as np
import onnxruntime as ort
from PIL import Image
from scipy import stats
ARCFACE_INPUT_SIZE = 112
# ---------------------------------------------------------------------------
# Replicated Frigate pipeline
# ---------------------------------------------------------------------------
def _process_image_frigate(image: np.ndarray) -> Image.Image:
"""Mirror BaseEmbedding._process_image for an ndarray input.
NOTE: Frigate passes the output of `cv2.imread` (BGR) directly in. PIL's
`Image.fromarray` does NOT reorder channels, so the embedder effectively
receives a BGR-ordered tensor. We replicate that faithfully here. (Tested
swapping to RGB produces near-identical embeddings; this model is
robust to channel order.)
"""
return Image.fromarray(image)
def arcface_preprocess(image_bgr: np.ndarray) -> np.ndarray:
"""Mirror ArcfaceEmbedding._preprocess_inputs."""
pil = _process_image_frigate(image_bgr)
width, height = pil.size
if width != ARCFACE_INPUT_SIZE or height != ARCFACE_INPUT_SIZE:
if width > height:
new_height = int(((height / width) * ARCFACE_INPUT_SIZE) // 4 * 4)
pil = pil.resize((ARCFACE_INPUT_SIZE, new_height))
else:
new_width = int(((width / height) * ARCFACE_INPUT_SIZE) // 4 * 4)
pil = pil.resize((new_width, ARCFACE_INPUT_SIZE))
og = np.array(pil).astype(np.float32)
og_h, og_w, channels = og.shape
frame = np.zeros(
(ARCFACE_INPUT_SIZE, ARCFACE_INPUT_SIZE, channels), dtype=np.float32
)
x_center = (ARCFACE_INPUT_SIZE - og_w) // 2
y_center = (ARCFACE_INPUT_SIZE - og_h) // 2
frame[y_center : y_center + og_h, x_center : x_center + og_w] = og
frame = (frame / 127.5) - 1.0
frame = np.transpose(frame, (2, 0, 1))
frame = np.expand_dims(frame, axis=0)
return frame
class LandmarkAligner:
"""Mirror FaceRecognizer.align_face."""
def __init__(self, landmark_model_path: str):
if not os.path.exists(landmark_model_path):
raise FileNotFoundError(landmark_model_path)
self.detector = cv2.face.createFacemarkLBF()
self.detector.loadModel(landmark_model_path)
def align(
self, image: np.ndarray, out_w: int, out_h: int
) -> tuple[np.ndarray, dict]:
land_image = (
cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if image.ndim == 3 else image
)
_, lands = self.detector.fit(
land_image, np.array([(0, 0, land_image.shape[1], land_image.shape[0])])
)
landmarks = lands[0][0]
leftEyePts = landmarks[42:48]
rightEyePts = landmarks[36:42]
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180
dist = float(np.sqrt((dX**2) + (dY**2)))
desiredRightEyeX = 1.0 - 0.35
desiredDist = (desiredRightEyeX - 0.35) * out_w
scale = desiredDist / dist if dist > 0 else 1.0
eyesCenter = (
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
)
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
tX = out_w * 0.5
tY = out_h * 0.35
M[0, 2] += tX - eyesCenter[0]
M[1, 2] += tY - eyesCenter[1]
aligned = cv2.warpAffine(
image, M, (out_w, out_h), flags=cv2.INTER_CUBIC
)
info = dict(
angle=float(angle),
eye_dist_px=dist,
scale=float(scale),
landmarks=landmarks,
)
return aligned, info
class ArcFaceEmbedder:
def __init__(self, model_path: str):
self.session = ort.InferenceSession(
model_path, providers=["CPUExecutionProvider"]
)
self.input_name = self.session.get_inputs()[0].name
def embed(self, image_bgr: np.ndarray) -> np.ndarray:
tensor = arcface_preprocess(image_bgr)
out = self.session.run(None, {self.input_name: tensor})[0]
return out.squeeze()
def similarity_to_confidence(
cos_sim: float,
median: float = 0.3,
range_width: float = 0.6,
slope_factor: float = 12,
) -> float:
slope = slope_factor / range_width
return float(1.0 / (1.0 + np.exp(-slope * (cos_sim - median))))
def laplacian_variance(image: np.ndarray) -> float:
return float(cv2.Laplacian(image, cv2.CV_64F).var())
def blur_reduction(variance: float) -> float:
if variance < 120:
return 0.06
elif variance < 160:
return 0.04
elif variance < 200:
return 0.02
elif variance < 250:
return 0.01
return 0.0
def cosine(a: np.ndarray, b: np.ndarray) -> float:
denom = np.linalg.norm(a) * np.linalg.norm(b)
if denom == 0:
return 0.0
return float(np.dot(a, b) / denom)
def l2(v: np.ndarray) -> np.ndarray:
return v / (np.linalg.norm(v) + 1e-9)
# ---------------------------------------------------------------------------
# Sample loading
# ---------------------------------------------------------------------------
@dataclass
class FaceSample:
path: str
shape: tuple[int, int]
embedding: np.ndarray
blur_var: float
align_info: dict
def load_folder(
folder: str, aligner: LandmarkAligner, embedder: ArcFaceEmbedder
) -> list[FaceSample]:
samples: list[FaceSample] = []
names = sorted(os.listdir(folder))
for name in names:
if name.startswith("."):
continue
path = os.path.join(folder, name)
if not os.path.isfile(path):
continue
img = cv2.imread(path)
if img is None:
print(f" [skip unreadable] {name}")
continue
aligned, info = aligner.align(img, img.shape[1], img.shape[0])
emb = embedder.embed(aligned)
samples.append(
FaceSample(
path=path,
shape=(img.shape[1], img.shape[0]),
embedding=emb,
blur_var=laplacian_variance(img),
align_info=info,
)
)
return samples
def trimmed_mean(embs: Iterable[np.ndarray], trim: float = 0.15) -> np.ndarray:
arr = np.stack(list(embs), axis=0)
return stats.trim_mean(arr, trim, axis=0)
# ---------------------------------------------------------------------------
# Baseline analyses (always run)
# ---------------------------------------------------------------------------
def summarize_positive(samples: list[FaceSample], mean_emb: np.ndarray) -> None:
"""Summary of training set: per-sample cos to class mean, intra-class stats.
Outliers with cos far below the rest are likely degrading the mean
they'd be the first candidates the shipped vector-outlier filter drops.
"""
print("\n" + "=" * 78)
print(f"POSITIVE SET ANALYSIS ({len(samples)} images)")
print("=" * 78)
rows = []
for s in samples:
cs = cosine(s.embedding, mean_emb)
conf = similarity_to_confidence(cs)
red = blur_reduction(s.blur_var)
rows.append(
dict(
name=os.path.basename(s.path),
shape=f"{s.shape[0]}x{s.shape[1]}",
eye_px=s.align_info["eye_dist_px"],
angle=s.align_info["angle"] + 180,
blur=s.blur_var,
cos=cs,
conf=conf,
red=red,
adj_conf=max(0.0, conf - red),
)
)
rows.sort(key=lambda r: r["cos"])
sims = np.array([r["cos"] for r in rows])
print(
f"\nCosine-to-trimmed-mean: mean={sims.mean():.3f} std={sims.std():.3f} "
f"min={sims.min():.3f} max={sims.max():.3f}"
)
print("\n-- Worst matches (bottom 10, most likely hurting the mean) --")
print(
f"{'cos':>6} {'conf':>6} {'blur':>7} {'eyes':>6} "
f"{'angle':>6} {'shape':>9} name"
)
for r in rows[:10]:
print(
f"{r['cos']:6.3f} {r['conf']:6.3f} {r['blur']:7.1f} "
f"{r['eye_px']:6.1f} {r['angle']:6.1f} {r['shape']:>9} {r['name']}"
)
print("\n-- Best matches (top 5) --")
for r in rows[-5:][::-1]:
print(
f"{r['cos']:6.3f} {r['conf']:6.3f} {r['blur']:7.1f} "
f"{r['eye_px']:6.1f} {r['angle']:6.1f} {r['shape']:>9} {r['name']}"
)
# Pairwise analysis — flags embeddings poorly correlated with the rest
print("\n-- Pairwise intra-class similarity (mean cos vs. other positives) --")
embs = np.stack([s.embedding for s in samples], axis=0)
norms = embs / (np.linalg.norm(embs, axis=1, keepdims=True) + 1e-9)
sim_matrix = norms @ norms.T
np.fill_diagonal(sim_matrix, np.nan)
mean_pairwise = np.nanmean(sim_matrix, axis=1)
names = [os.path.basename(s.path) for s in samples]
ordered = sorted(zip(names, mean_pairwise), key=lambda t: t[1])
print(f"{'mean_cos':>9} name")
for nm, mp in ordered[:10]:
print(f"{mp:9.3f} {nm}")
print(f"\n overall mean pairwise cos: {np.nanmean(sim_matrix):.3f}")
print(f" median pairwise cos: {np.nanmedian(sim_matrix):.3f}")
def summarize_negative(
neg_samples: list[FaceSample],
mean_emb: np.ndarray,
pos_samples: list[FaceSample],
) -> None:
"""Score each negative against the class mean, then show its top-3
nearest positives. High-scoring negatives that match specific outlier
positives hint at training-set contamination.
"""
print("\n" + "=" * 78)
print(f"NEGATIVE SET ANALYSIS ({len(neg_samples)} images)")
print("=" * 78)
print(
f"\n{'cos':>6} {'conf':>6} {'red':>5} {'adj':>5} "
f"{'blur':>7} {'eyes':>6} {'shape':>9} name"
)
for s in neg_samples:
cs = cosine(s.embedding, mean_emb)
conf = similarity_to_confidence(cs)
red = blur_reduction(s.blur_var)
print(
f"{cs:6.3f} {conf:6.3f} {red:5.2f} {max(0, conf - red):5.2f} "
f"{s.blur_var:7.1f} {s.align_info['eye_dist_px']:6.1f} "
f"{s.shape[0]}x{s.shape[1]:<5} {os.path.basename(s.path)}"
)
print("\n-- For each negative, top-3 most similar positives --")
pos_embs = np.stack([p.embedding for p in pos_samples])
pos_norm = pos_embs / (np.linalg.norm(pos_embs, axis=1, keepdims=True) + 1e-9)
for s in neg_samples:
v = s.embedding / (np.linalg.norm(s.embedding) + 1e-9)
sims = pos_norm @ v
idx = np.argsort(-sims)[:3]
print(f"\n {os.path.basename(s.path)}:")
for i in idx:
print(
f" {sims[i]:6.3f} {os.path.basename(pos_samples[i].path)} "
f"blur={pos_samples[i].blur_var:.1f} "
f"eyes={pos_samples[i].align_info['eye_dist_px']:.1f}"
)
# ---------------------------------------------------------------------------
# Optional diagnostics
# ---------------------------------------------------------------------------
def vector_outlier_test(
pos: list[FaceSample], neg: list[FaceSample], base_trim: float = 0.15
) -> None:
"""Measure the shipped vector-wise outlier filter at various thresholds.
The production filter at `build_class_mean` in
frigate/data_processing/common/face/model.py uses T=0.30. This test
sweeps T so you can see which images would be dropped on a new collection
and how that affects the negative scores.
Algorithm: iteratively recompute trim_mean on the kept set, drop any
embedding with cos < T to that mean, repeat until converged. Floor at
50% of the collection to avoid collapse.
"""
print("\n" + "=" * 78)
print("VECTOR-WISE OUTLIER PRE-FILTER — layered on trim_mean(0.15)")
print("=" * 78)
all_embs = np.stack([s.embedding for s in pos])
def iterative_mean(
embs: np.ndarray,
threshold: float,
iters: int = 3,
min_keep_frac: float = 0.5,
) -> tuple[np.ndarray, np.ndarray]:
keep = np.ones(len(embs), dtype=bool)
floor = max(5, int(np.ceil(min_keep_frac * len(embs))))
for _ in range(iters):
m = stats.trim_mean(embs[keep], base_trim, axis=0)
m_norm = m / (np.linalg.norm(m) + 1e-9)
e_norms = embs / (np.linalg.norm(embs, axis=1, keepdims=True) + 1e-9)
cos_to_mean = e_norms @ m_norm
new_keep = cos_to_mean >= threshold
if new_keep.sum() < floor:
top_idx = np.argsort(-cos_to_mean)[:floor]
new_keep = np.zeros_like(new_keep)
new_keep[top_idx] = True
if np.array_equal(new_keep, keep):
break
keep = new_keep
final = stats.trim_mean(embs[keep], base_trim, axis=0)
return final, keep
provisional = stats.trim_mean(all_embs, base_trim, axis=0)
p_norm = provisional / (np.linalg.norm(provisional) + 1e-9)
e_norms_all = all_embs / (np.linalg.norm(all_embs, axis=1, keepdims=True) + 1e-9)
cos_to_prov = e_norms_all @ p_norm
print("\nDistribution of cos(positive, provisional trim_mean):")
print(
f" min={cos_to_prov.min():.3f} p10={np.percentile(cos_to_prov, 10):.3f} "
f"p25={np.percentile(cos_to_prov, 25):.3f} "
f"median={np.median(cos_to_prov):.3f} "
f"p75={np.percentile(cos_to_prov, 75):.3f} max={cos_to_prov.max():.3f}"
)
baseline_mean = stats.trim_mean(all_embs, base_trim, axis=0)
baseline_pos = np.array([cosine(p.embedding, baseline_mean) for p in pos])
baseline_neg = (
np.array([cosine(n.embedding, baseline_mean) for n in neg])
if neg
else np.array([])
)
baseline_conf_neg = np.array(
[similarity_to_confidence(c) for c in baseline_neg]
)
print(
f"\nBaseline (trim_mean only, {len(pos)} images):"
f"\n pos cos min={baseline_pos.min():.3f} "
f"mean={baseline_pos.mean():.3f} max={baseline_pos.max():.3f}"
)
if len(neg):
print(
f" neg cos min={baseline_neg.min():.3f} "
f"mean={baseline_neg.mean():.3f} max={baseline_neg.max():.3f}"
)
print(
f" neg conf min={baseline_conf_neg.min():.3f} "
f"mean={baseline_conf_neg.mean():.3f} max={baseline_conf_neg.max():.3f}"
)
print(
f" margin (pos.min - neg.max): "
f"{baseline_pos.min() - baseline_neg.max():+.3f}"
)
print("\nIterative (refine mean → drop vectors with cos<T → repeat):")
print(
f"\n{'T':>5} {'kept':>6} {'pos min':>7} {'pos mean':>8} "
f"{'neg max':>7} {'neg mean':>8} {'neg conf.max':>12} {'margin':>7}"
)
for T in [0.15, 0.20, 0.25, 0.28, 0.30, 0.33, 0.36, 0.40]:
mean, keep = iterative_mean(all_embs, T)
pos_sims = np.array([cosine(p.embedding, mean) for p in pos])
neg_sims = (
np.array([cosine(n.embedding, mean) for n in neg])
if neg
else np.array([])
)
neg_conf = np.array([similarity_to_confidence(c) for c in neg_sims])
margin = pos_sims.min() - (neg_sims.max() if len(neg_sims) else 0)
print(
f"{T:5.2f} {int(keep.sum()):>3}/{len(pos):<2} "
f"{pos_sims.min():7.3f} {pos_sims.mean():8.3f} "
f"{neg_sims.max() if len(neg_sims) else float('nan'):7.3f} "
f"{neg_sims.mean() if len(neg_sims) else float('nan'):8.3f} "
f"{neg_conf.max() if len(neg_conf) else float('nan'):12.3f} "
f"{margin:+7.3f}"
)
# Show which images get dropped at the shipped threshold + neighbors
for T_show in (0.25, 0.30, 0.33):
_, keep = iterative_mean(all_embs, T_show)
print(
f"\nAt T={T_show}, the {int((~keep).sum())} dropped positives are:"
)
final_mean = stats.trim_mean(all_embs[keep], base_trim, axis=0)
m_n = final_mean / (np.linalg.norm(final_mean) + 1e-9)
for i, (p, k) in enumerate(zip(pos, keep)):
if not k:
e_n = p.embedding / (np.linalg.norm(p.embedding) + 1e-9)
cos_final = float(e_n @ m_n)
print(
f" cos_to_clean_mean={cos_final:6.3f} "
f"shape={p.shape[0]}x{p.shape[1]} "
f"eyes={p.align_info['eye_dist_px']:6.1f} "
f"blur={p.blur_var:7.1f} "
f"{os.path.basename(p.path)}"
)
def degenerate_embedding_test(
pos: list[FaceSample], neg: list[FaceSample]
) -> None:
"""Detect whether negatives and low-quality positives share a degenerate
'tiny/noisy face' region of the embedding space.
Signal: if neg-to-neg cos is higher than pos-to-pos cos, the negatives
aren't really per-identity embeddings — they're dominated by upsample /
low-resolution artifacts that all map to a similar corner of embedding
space regardless of who the face belongs to.
Also rebuilds the mean using only high-intra-similarity positives to
show whether a cleaner training set separates the negatives.
"""
print("\n" + "=" * 78)
print("DEGENERATE-EMBEDDING TEST")
print("=" * 78)
pos_embs = np.stack([l2(s.embedding) for s in pos])
neg_embs = np.stack([l2(s.embedding) for s in neg])
nn = neg_embs @ neg_embs.T
np.fill_diagonal(nn, np.nan)
pp = pos_embs @ pos_embs.T
np.fill_diagonal(pp, np.nan)
pn = pos_embs @ neg_embs.T
print(
f"\n neg<->neg mean cos : {np.nanmean(nn):.3f} "
f"(how tightly negatives cluster together)"
)
print(
f" pos<->pos mean cos : {np.nanmean(pp):.3f} "
f"(how tightly positives cluster)"
)
print(
f" pos<->neg mean cos : {pn.mean():.3f} "
f"(cross-class — should be low for a clean class)"
)
if np.nanmean(nn) > np.nanmean(pp):
print(
"\n >> neg<->neg > pos<->pos: negatives cluster more tightly than\n"
" positives. This is the degenerate-embedding signature —\n"
" upsampled tiny crops share a common 'face-like blob' region\n"
" regardless of identity."
)
mean_intra = np.nanmean(pp, axis=1)
for thresh in (0.30, 0.33, 0.36):
keep = mean_intra >= thresh
if keep.sum() < 5:
continue
clean_embs = [pos[i].embedding for i in range(len(pos)) if keep[i]]
clean_mean = stats.trim_mean(np.stack(clean_embs), 0.15, axis=0)
neg_scores = np.array([cosine(n.embedding, clean_mean) for n in neg])
neg_confs = np.array([similarity_to_confidence(c) for c in neg_scores])
pos_scores = np.array(
[
cosine(pos[i].embedding, clean_mean)
for i in range(len(pos))
if keep[i]
]
)
print(
f"\n mean_intra >= {thresh}: keeping {int(keep.sum())}/{len(pos)} positives"
)
print(
f" pos cos vs mean : min={pos_scores.min():.3f} "
f"mean={pos_scores.mean():.3f} max={pos_scores.max():.3f}"
)
print(
f" neg cos vs mean : min={neg_scores.min():.3f} "
f"mean={neg_scores.mean():.3f} max={neg_scores.max():.3f}"
)
print(
f" neg conf : min={neg_confs.min():.3f} "
f"mean={neg_confs.mean():.3f} max={neg_confs.max():.3f}"
)
print(
f" margin (pos.min - neg.max): "
f"{pos_scores.min() - neg_scores.max():+.3f}"
)
def contamination_analysis(
pos: list[FaceSample], neg: list[FaceSample]
) -> None:
"""Check whether the positive collection contains a second identity.
Two signals:
(a) Per-positive: if an image is closer to at least one negative than
to the rest of the positive class, it's likely a mislabeled face.
(b) 2-means split of the positive embeddings: if one cluster center
lands close to the negative mean, that cluster is a contaminating
sub-identity that's pulling the class mean toward the negatives.
"""
print("\n" + "=" * 78)
print("CONTAMINATION ANALYSIS")
print("=" * 78)
pos_embs = np.stack([l2(s.embedding) for s in pos])
neg_embs = np.stack([l2(s.embedding) for s in neg])
pos_names = [os.path.basename(s.path) for s in pos]
pos_pos = pos_embs @ pos_embs.T
np.fill_diagonal(pos_pos, np.nan)
pos_neg = pos_embs @ neg_embs.T
mean_intra = np.nanmean(pos_pos, axis=1)
max_to_neg = pos_neg.max(axis=1)
mean_to_neg = pos_neg.mean(axis=1)
print(
"\nPositives closer to a negative than to their own class avg"
"\n(these are candidates for mislabeled images):"
)
print(
f"\n{'max_neg':>7} {'mean_neg':>8} {'mean_intra':>10} "
f"{'delta':>6} name"
)
rows = list(zip(pos_names, max_to_neg, mean_to_neg, mean_intra))
rows.sort(key=lambda r: -(r[1] - r[3]))
for nm, mxn, mnn, mi in rows[:15]:
delta = mxn - mi
marker = " <<" if delta > 0 else ""
print(f"{mxn:7.3f} {mnn:8.3f} {mi:10.3f} {delta:6.3f} {nm}{marker}")
# 2-means in cosine space (no sklearn dependency).
print("\n2-means split of positive embeddings (cosine space):")
rng = np.random.default_rng(0)
best = None
for _ in range(5):
idx = rng.choice(len(pos_embs), 2, replace=False)
centers = pos_embs[idx].copy()
for _ in range(50):
sims = pos_embs @ centers.T
labels = np.argmax(sims, axis=1)
new_centers = np.stack(
[
l2(pos_embs[labels == k].mean(axis=0))
if np.any(labels == k)
else centers[k]
for k in range(2)
]
)
if np.allclose(new_centers, centers):
break
centers = new_centers
tight = float(np.mean([sims[i, labels[i]] for i in range(len(labels))]))
if best is None or tight > best[0]:
best = (tight, labels.copy(), centers.copy())
_, labels, centers = best
sizes = [int((labels == k).sum()) for k in range(2)]
neg_mean = l2(neg_embs.mean(axis=0))
print(
f" cluster 0: size={sizes[0]:>2} "
f"center<->other_center_cos={float(centers[0] @ centers[1]):.3f} "
f"center<->neg_mean_cos={float(centers[0] @ neg_mean):.3f}"
)
print(
f" cluster 1: size={sizes[1]:>2} "
f"center<->neg_mean_cos={float(centers[1] @ neg_mean):.3f}"
)
neg_aligned = 0 if centers[0] @ neg_mean > centers[1] @ neg_mean else 1
print(
f"\n cluster {neg_aligned} is more similar to the negatives — "
f"its members are the contamination candidates:"
)
for i, lbl in enumerate(labels):
if lbl == neg_aligned:
print(
f" max_to_neg={max_to_neg[i]:.3f} "
f"mean_intra={mean_intra[i]:.3f} {pos_names[i]}"
)
keep_mask = labels != neg_aligned
if keep_mask.sum() >= 3:
clean_embs = [pos[i].embedding for i in range(len(pos)) if keep_mask[i]]
clean_mean = stats.trim_mean(np.stack(clean_embs), 0.15, axis=0)
print(
f"\n Rebuilding class mean from the OTHER cluster "
f"({keep_mask.sum()} images):"
)
print(f" {'cos':>6} {'conf':>6} name")
for n in neg:
cs = cosine(n.embedding, clean_mean)
cf = similarity_to_confidence(cs)
print(f" {cs:6.3f} {cf:6.3f} {os.path.basename(n.path)}")
# ---------------------------------------------------------------------------
# main
# ---------------------------------------------------------------------------
def main() -> int:
ap = argparse.ArgumentParser(
description="Analyze a face recognition collection outside Frigate.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
ap.add_argument("--positive", required=True, help="Training folder for one identity")
ap.add_argument(
"--negative",
default=None,
help="Runtime-crop folder to score against (optional)",
)
ap.add_argument(
"--model-cache",
default="/config/model_cache",
help="Directory containing facedet/arcface.onnx and facedet/landmarkdet.yaml",
)
ap.add_argument(
"--trim",
type=float,
default=0.15,
help="trim_mean proportion (Frigate uses 0.15)",
)
ap.add_argument(
"--vector-outlier",
action="store_true",
help="Sweep the vector-wise outlier filter threshold",
)
ap.add_argument(
"--degenerate",
action="store_true",
help="Test whether negatives share a degenerate embedding region",
)
ap.add_argument(
"--contamination",
action="store_true",
help="Check whether the positive folder contains a second identity",
)
args = ap.parse_args()
arcface_path = os.path.join(args.model_cache, "facedet", "arcface.onnx")
landmark_path = os.path.join(args.model_cache, "facedet", "landmarkdet.yaml")
for p in (arcface_path, landmark_path):
if not os.path.exists(p):
print(f"ERROR: model file not found: {p}")
return 1
print(f"Loading ArcFace from {arcface_path}")
embedder = ArcFaceEmbedder(arcface_path)
print(f"Loading landmark model from {landmark_path}")
aligner = LandmarkAligner(landmark_path)
print(f"\nLoading positives from {args.positive} ...")
pos = load_folder(args.positive, aligner, embedder)
print(f" {len(pos)} positives loaded")
neg: list[FaceSample] = []
if args.negative:
print(f"\nLoading negatives from {args.negative} ...")
neg = load_folder(args.negative, aligner, embedder)
print(f" {len(neg)} negatives loaded")
if not pos:
print("no positive samples — aborting")
return 1
mean_emb = trimmed_mean([s.embedding for s in pos], trim=args.trim)
summarize_positive(pos, mean_emb)
if neg:
summarize_negative(neg, mean_emb, pos)
if args.vector_outlier:
vector_outlier_test(pos, neg, args.trim)
if args.degenerate and neg:
degenerate_embedding_test(pos, neg)
if args.contamination and neg:
contamination_analysis(pos, neg)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,114 +1,4 @@
import { test, expect } from "../fixtures/frigate-test";
import {
expectBodyInteractive,
waitForBodyInteractive,
} from "../helpers/overlay-interaction";
test.describe("Export Page - Delete race @high", () => {
// Empirical guard for radix-ui/primitives#3445: when a modal DropdownMenu
// opens an AlertDialog and the AlertDialog's confirm action causes the
// parent's optimistic cache update to unmount the card, we want to know
// whether the deduped react-dismissable-layer (1.1.11) handles the
// pointer-events stack cleanup or whether `modal={false}` is still
// required on the DropdownMenu. The classic "canonical" pattern, distinct
// from the FaceSelectionDialog auto-unmount race already covered by
// face-library.spec.ts.
test("deleting an export via dropdown→alert→confirm leaves body interactive", async ({
frigateApp,
}) => {
if (frigateApp.isMobile) {
test.skip();
return;
}
const initialExports = [
{
id: "export-race-001",
camera: "front_door",
name: "Race - Test Export",
date: 1775490731.3863528,
video_path: "/exports/export-race-001.mp4",
thumb_path: "/exports/export-race-001-thumb.jpg",
in_progress: false,
export_case_id: null,
},
];
let deleted = false;
await frigateApp.installDefaults({
exports: initialExports,
});
// Flip /api/export to empty after the delete POST is observed so the
// page's SWR mutate sees the export gone.
await frigateApp.page.route("**/api/export**", async (route) => {
const payload = deleted ? [] : initialExports;
await route.fulfill({ json: payload });
});
await frigateApp.page.route("**/api/exports/delete", async (route) => {
deleted = true;
const delayMs = Number(
(globalThis as { process?: { env?: Record<string, string> } }).process
?.env?.DELETE_DELAY_MS ?? "100",
);
if (delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
await route.fulfill({ json: { success: true } });
});
await frigateApp.goto("/export");
await expect(frigateApp.page.getByText("Race - Test Export")).toBeVisible({
timeout: 5_000,
});
// Open the kebab menu on the export card. The kebab uses the
// (misleading) aria-label "Edit name" from ExportCard's source — it
// wraps the FiMoreVertical icon. There is exactly one such button on
// the page once we have a single export rendered.
const kebab = frigateApp.page
.getByRole("button", { name: /edit name/i })
.first();
await expect(kebab).toBeVisible({ timeout: 5_000 });
await kebab.click();
const menu = frigateApp.page
.locator('[role="menu"], [data-radix-menu-content]')
.first();
await expect(menu).toBeVisible({ timeout: 3_000 });
// Delete Export
await menu
.getByRole("menuitem", { name: /delete export/i })
.first()
.click();
// AlertDialog at page level. The confirm button's accessible name is
// "Delete Export" (its aria-label), the visible text is just "Delete".
const confirm = frigateApp.page.getByRole("alertdialog");
await expect(confirm).toBeVisible({ timeout: 3_000 });
await confirm
.getByRole("button", { name: /^delete export$/i })
.first()
.click();
// The card optimistically disappears, the dialog closes, and body
// pointer-events must come unstuck.
await expect(
frigateApp.page.getByText("Race - Test Export"),
).not.toBeVisible({ timeout: 5_000 });
await waitForBodyInteractive(frigateApp.page, 5_000);
await expectBodyInteractive(frigateApp.page);
// Sanity: another page-level button still responds.
const newCase = frigateApp.page.getByRole("button", { name: /new case/i });
await expect(newCase).toBeVisible({ timeout: 3_000 });
await newCase.click();
await expect(
frigateApp.page.getByRole("dialog").filter({ hasText: /create case/i }),
).toBeVisible({ timeout: 3_000 });
});
});
test.describe("Export Page - Overview @high", () => {
test("renders uncategorized exports and case cards from mock data", async ({

View File

@ -358,158 +358,6 @@ test.describe("FaceSelectionDialog @high", () => {
await frigateApp.page.keyboard.press("Escape");
await expect(menu).not.toBeVisible({ timeout: 3_000 });
});
test("classifying the last image in a group leaves body interactive", async ({
frigateApp,
}) => {
// Regression guard for the stuck body pointer-events bug when the
// last image in a grouped-recognition detail Dialog is classified.
// Tracked upstream at radix-ui/primitives#3445.
//
// Root cause: when the user clicks a FaceSelectionDialog menu item,
// the modal DropdownMenu enters its exit animation (Radix's Presence
// keeps it in the DOM with data-state="closed" until animationend).
// While that is in flight the classify axios resolves, SWR removes
// the image from /api/faces, the parent's map no longer renders the
// grouped card, and React unmounts the subtree — including the still-
// animating DropdownMenu's Presence container. DismissableLayer's
// shared modal-layer stack can't reconcile the interrupted exit, so
// the `body { pointer-events: none }` entry it put on mount is never
// popped and the rest of the UI becomes unclickable.
//
// The fix is `modal={false}` on the FaceSelectionDialog's
// DropdownMenu (desktop path only). With modal=false the DropdownMenu
// never puts an entry on DismissableLayer's body-pointer-events stack
// in the first place, so there's nothing to leak when its Presence is
// torn down mid-animation. The Radix-community-documented workaround
// for #3445.
//
// The bug only reproduces when the mock resolves fast enough that
// the parent unmounts before the dropdown's exit animation finishes.
// Measured window via a 3x sweep on the pre-fix build: 0200 ms
// triggers it; 300 ms+ no longer reproduces. Production LAN networks
// sit comfortably inside the bad window, while `npm run dev` seems
// to mask it via React StrictMode's double-effect scheduling.
const EVENT_ID = "1775487131.3863528-race";
const initialFaces = withGroupedTrainingAttempt(basicFacesMock(), {
eventId: EVENT_ID,
attempts: [
{ timestamp: 1775487131.3863528, label: "unknown", score: 0.95 },
],
});
let classified = false;
await frigateApp.installDefaults({
faces: initialFaces,
events: [
{
id: EVENT_ID,
label: "person",
sub_label: null,
camera: "front_door",
start_time: 1775487131.3863528,
end_time: 1775487161.3863528,
false_positive: false,
zones: ["front_yard"],
thumbnail: null,
has_clip: true,
has_snapshot: true,
retain_indefinitely: false,
plus_id: null,
model_hash: "abc123",
detector_type: "cpu",
model_type: "ssd",
data: {
top_score: 0.92,
score: 0.92,
region: [0.1, 0.1, 0.5, 0.8],
box: [0.2, 0.15, 0.45, 0.75],
area: 0.18,
ratio: 0.6,
type: "object",
path_data: [],
},
},
],
});
// Re-route /api/faces to flip to the "train empty" payload once the
// classify POST has been received. Registered AFTER installDefaults so
// Playwright's LIFO route matching hits this handler first.
await frigateApp.page.route("**/api/faces", async (route) => {
const payload = classified ? basicFacesMock() : initialFaces;
await route.fulfill({ json: payload });
});
// Hold the classify POST briefly. The race opens when the parent
// unmounts before the dropdown's exit animation finishes (~200ms
// in Radix). 100ms keeps us comfortably inside that window and
// reliably triggered the bug in a 3x sweep across 0/50/100/200ms
// on the pre-fix build. CLASSIFY_DELAY_MS overrides for local sweeps.
const delayMs = Number(
(globalThis as { process?: { env?: Record<string, string> } }).process
?.env?.CLASSIFY_DELAY_MS ?? "100",
);
await frigateApp.page.route(
"**/api/faces/train/*/classify",
async (route) => {
classified = true;
if (delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
await route.fulfill({ json: { success: true } });
},
);
await frigateApp.goto("/faces");
// Open the grouped detail Dialog.
const groupedImage = frigateApp.page
.locator('img[src*="clips/faces/train/"]')
.first();
await expect(groupedImage).toBeVisible({ timeout: 5_000 });
await groupedImage.locator("xpath=..").click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({
has: frigateApp.page.locator('img[src*="clips/faces/train/"]'),
})
.first();
await expect(dialog).toBeVisible({ timeout: 5_000 });
// Single attempt → single `+` trigger.
const triggers = dialog.locator('[aria-haspopup="menu"]');
await expect(triggers).toHaveCount(1);
await triggers.first().click();
const menu = frigateApp.page
.locator('[role="menu"], [data-radix-menu-content]')
.first();
await expect(menu).toBeVisible({ timeout: 5_000 });
await menu.getByRole("menuitem", { name: /^alice$/i }).click();
// The Dialog must leave the tree cleanly, and body must recover.
await expect(dialog).not.toBeVisible({ timeout: 5_000 });
// Give Radix's exit animation + cleanup a comfortable margin on top of
// the ~300ms simulated network delay.
await waitForBodyInteractive(frigateApp.page, 5_000);
await expectBodyInteractive(frigateApp.page);
// User-visible confirmation: click something outside the dialog
// and assert it actually responds.
const librarySelector = frigateApp.page
.getByRole("button")
.filter({ hasText: /\(\d+\)/ })
.first();
await librarySelector.click();
await expect(
frigateApp.page
.locator('[role="menu"], [data-radix-menu-content]')
.first(),
).toBeVisible({ timeout: 3_000 });
});
});
test.describe("Face Library — mobile @high @mobile", () => {

14
web/package-lock.json generated
View File

@ -54,7 +54,7 @@
"immer": "^10.1.1",
"js-yaml": "^4.1.1",
"konva": "^10.2.3",
"lodash": "^4.18.1",
"lodash": "^4.17.23",
"lucide-react": "^0.577.0",
"monaco-yaml": "^5.4.1",
"next-themes": "^0.4.6",
@ -9636,15 +9636,15 @@
}
},
"node_modules/lodash": {
"version": "4.18.1",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
"version": "4.17.23",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"license": "MIT"
},
"node_modules/lodash-es": {
"version": "4.18.1",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.18.1.tgz",
"integrity": "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==",
"version": "4.17.23",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"license": "MIT"
},
"node_modules/lodash.merge": {

View File

@ -68,7 +68,7 @@
"immer": "^10.1.1",
"js-yaml": "^4.1.1",
"konva": "^10.2.3",
"lodash": "^4.18.1",
"lodash": "^4.17.23",
"lucide-react": "^0.577.0",
"monaco-yaml": "^5.4.1",
"next-themes": "^0.4.6",

View File

@ -257,7 +257,6 @@
"export": "Export",
"actions": "Actions",
"uiPlayground": "UI Playground",
"features": "Features",
"faceLibrary": "Face Library",
"classification": "Classification",
"chat": "Chat",

View File

@ -415,7 +415,6 @@
"audioCodecGood": "Audio codec is {{codec}}.",
"resolutionHigh": "A resolution of {{resolution}} may cause increased resource usage.",
"resolutionLow": "A resolution of {{resolution}} may be too low for reliable detection of small objects.",
"resolutionUnknown": "The resolution of this stream could not be probed. You should manually set the detect resolution in Settings or your config.",
"noAudioWarning": "No audio detected for this stream, recordings will not have audio.",
"audioCodecRecordError": "The AAC audio codec is required to support audio in recordings.",
"audioCodecRequired": "An audio stream is required to support audio detection.",

View File

@ -17,9 +17,6 @@ import { useUserPersistence } from "@/hooks/use-user-persistence";
import { Skeleton } from "../ui/skeleton";
import { Button } from "../ui/button";
import { FaCircleCheck } from "react-icons/fa6";
import { FaExclamationTriangle } from "react-icons/fa";
import { MdOutlinePersonSearch } from "react-icons/md";
import { ThreatLevel } from "@/types/review";
import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next";
import { getTranslatedLabel } from "@/utils/i18n";
@ -130,11 +127,6 @@ export function AnimatedEventCard({
true,
);
const threatLevel = useMemo<ThreatLevel>(
() => (event.data.metadata?.potential_threat_level ?? 0) as ThreatLevel,
[event],
);
const aspectRatio = useMemo(() => {
if (
!config ||
@ -160,15 +152,7 @@ export function AnimatedEventCard({
<Tooltip>
<TooltipTrigger asChild>
<Button
className={cn(
"absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 transition-opacity",
threatLevel === ThreatLevel.SECURITY_CONCERN &&
"pointer-events-auto opacity-100",
threatLevel === ThreatLevel.NEEDS_REVIEW &&
"pointer-events-auto opacity-100",
threatLevel === ThreatLevel.NORMAL &&
"pointer-events-none opacity-0 group-hover:pointer-events-auto group-hover:opacity-100",
)}
className="pointer-events-none absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 transition-opacity group-hover:pointer-events-auto group-hover:opacity-100"
size="xs"
aria-label={t("markAsReviewed")}
onClick={async () => {
@ -176,13 +160,7 @@ export function AnimatedEventCard({
updateEvents();
}}
>
{threatLevel === ThreatLevel.SECURITY_CONCERN ? (
<FaExclamationTriangle className="size-3 text-white" />
) : threatLevel === ThreatLevel.NEEDS_REVIEW ? (
<MdOutlinePersonSearch className="size-3 text-white" />
) : (
<FaCircleCheck className="size-3 text-white" />
)}
</Button>
</TooltipTrigger>
<TooltipContent>{t("markAsReviewed")}</TooltipContent>

View File

@ -155,40 +155,14 @@ export function MessageBubble({
) : (
<div
className={cn(
"[&>*:last-child]:inline",
!isComplete &&
"[&>p:last-child]:inline after:ml-0.5 after:inline-block after:h-4 after:w-2 after:animate-cursor-blink after:rounded-sm after:bg-foreground after:align-middle after:content-['']",
"after:ml-0.5 after:inline-block after:h-4 after:w-2 after:animate-cursor-blink after:rounded-sm after:bg-foreground after:align-middle after:content-['']",
)}
>
<ReactMarkdown
remarkPlugins={[remarkGfm]}
components={{
p: ({ node: _n, ...props }) => (
<p className="my-2 first:mt-0 last:mb-0" {...props} />
),
ul: ({ node: _n, ...props }) => (
<ul
className="my-2 list-disc space-y-1 pl-6 first:mt-0 last:mb-0"
{...props}
/>
),
ol: ({ node: _n, ...props }) => (
<ol
className="my-2 list-decimal space-y-1 pl-6 first:mt-0 last:mb-0"
{...props}
/>
),
li: ({ node: _n, ...props }) => (
<li className="pl-1" {...props} />
),
code: ({ node: _n, className, ...props }) => (
<code
className={cn(
"rounded bg-foreground/10 px-1 py-0.5 font-mono text-sm",
className,
)}
{...props}
/>
),
table: ({ node: _n, ...props }) => (
<table
className="my-2 w-full border-collapse border border-border"

View File

@ -14,6 +14,7 @@ import Step3ChooseExamples, {
Step3FormData,
} from "./wizard/Step3ChooseExamples";
import { cn } from "@/lib/utils";
import { isDesktop } from "react-device-detect";
import axios from "axios";
const OBJECT_STEPS = [
@ -152,9 +153,13 @@ export default function ClassificationModelWizardDialog({
>
<DialogContent
className={cn(
"scrollbar-container max-h-[90%] overflow-y-auto",
wizardState.currentStep == 0 && "xl:max-h-[80%]",
wizardState.currentStep > 0 && "md:max-w-[70%] xl:max-h-[80%]",
"",
isDesktop &&
wizardState.currentStep == 0 &&
"max-h-[90%] overflow-y-auto xl:max-h-[80%]",
isDesktop &&
wizardState.currentStep > 0 &&
"max-h-[90%] max-w-[70%] overflow-y-auto xl:max-h-[80%]",
)}
onInteractOutside={(e) => {
e.preventDefault();

View File

@ -218,7 +218,7 @@ export default function CameraReviewClassification({
<Label
className={cn(
"flex flex-row items-center text-base",
alertsZonesModified && "text-unsaved",
alertsZonesModified && "text-danger",
)}
>
<Trans ns="views/settings">cameraReview.review.alerts</Trans>
@ -286,7 +286,7 @@ export default function CameraReviewClassification({
<Label
className={cn(
"flex flex-row items-center text-base",
detectionsZonesModified && "text-unsaved",
detectionsZonesModified && "text-danger",
)}
>
<Trans ns="views/settings">

View File

@ -1012,7 +1012,7 @@ export function ConfigSection({
>
{hasChanges && (
<div className="flex items-center gap-2">
<span className="text-sm text-unsaved">
<span className="text-sm text-danger">
{t("unsavedChanges", {
ns: "views/settings",
defaultValue: "You have unsaved changes",
@ -1299,7 +1299,7 @@ export function ConfigSection({
{hasChanges && (
<Badge
variant="secondary"
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
>
{t("button.modified", {
ns: "common",

View File

@ -154,7 +154,7 @@ export function KnownPlatesField(props: FieldProps) {
<div className="flex items-center justify-between">
<div>
<CardTitle
className={cn("text-sm", isModified && "text-unsaved")}
className={cn("text-sm", isModified && "text-danger")}
>
{title}
</CardTitle>

View File

@ -142,7 +142,7 @@ export function ReplaceRulesField(props: FieldProps) {
<div className="flex items-center justify-between">
<div>
<CardTitle
className={cn("text-sm", isModified && "text-unsaved")}
className={cn("text-sm", isModified && "text-danger")}
>
{title}
</CardTitle>

View File

@ -497,7 +497,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
htmlFor={id}
className={cn(
"text-sm font-medium",
isModified && "text-unsaved",
isModified && "text-danger",
hasFieldErrors && "text-destructive",
)}
>
@ -516,7 +516,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
return (
<Label
htmlFor={id}
className={cn("text-sm font-medium", isModified && "text-unsaved")}
className={cn("text-sm font-medium", isModified && "text-danger")}
>
{finalLabel}
{required && <span className="ml-1 text-destructive">*</span>}
@ -535,7 +535,7 @@ export function FieldTemplate(props: FieldTemplateProps) {
htmlFor={id}
className={cn(
"text-sm font-medium",
isModified && "text-unsaved",
isModified && "text-danger",
hasFieldErrors && "text-destructive",
)}
>

View File

@ -467,7 +467,7 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
<CardTitle
className={cn(
"flex items-center text-sm",
hasModifiedDescendants && "text-unsaved",
hasModifiedDescendants && "text-danger",
)}
>
{inferredLabel}

View File

@ -6,7 +6,6 @@ import {
LuLifeBuoy,
LuList,
LuLogOut,
LuMessageSquare,
LuMoon,
LuSquarePen,
LuScanFace,
@ -483,15 +482,8 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
</Link>
</>
)}
</DropdownMenuGroup>
{isMobile && isAdmin && (
{isAdmin && isMobile && config?.face_recognition.enabled && (
<>
<DropdownMenuLabel className="mt-1">
{t("menu.features")}
</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuGroup className="flex flex-col">
{config?.face_recognition.enabled && (
<Link to="/faces">
<MenuItem
className="flex w-full items-center p-2 text-sm"
@ -501,7 +493,10 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
<span>{t("menu.faceLibrary")}</span>
</MenuItem>
</Link>
</>
)}
{isAdmin && isMobile && (
<>
<Link to="/classification">
<MenuItem
className="flex w-full items-center p-2 text-sm"
@ -511,20 +506,9 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
<span>{t("menu.classification")}</span>
</MenuItem>
</Link>
{config?.genai?.model !== "none" && (
<Link to="/chat">
<MenuItem
className="flex w-full items-center p-2 text-sm"
aria-label={t("menu.chat")}
>
<LuMessageSquare className="mr-2 size-4" />
<span>{t("menu.chat")}</span>
</MenuItem>
</Link>
)}
</DropdownMenuGroup>
</>
)}
</DropdownMenuGroup>
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
{t("menu.appearance")}
</DropdownMenuLabel>

View File

@ -124,7 +124,7 @@ export default function ClassificationSelectionDialog({
/>
<Tooltip>
<Selector {...(isDesktop ? { modal: false } : {})}>
<Selector>
<SelectorTrigger asChild>
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
</SelectorTrigger>

View File

@ -85,7 +85,7 @@ export default function FaceSelectionDialog({
)}
<Tooltip>
<Selector {...(isDesktop ? { modal: false } : {})}>
<Selector>
<SelectorTrigger asChild>
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
</SelectorTrigger>

View File

@ -607,38 +607,23 @@ function StreamIssues({
}
}
if (stream.roles.includes("detect") && stream.testResult) {
const probedResolution = stream.testResult.resolution;
let probedWidth = 0;
let probedHeight = 0;
if (probedResolution) {
const [w, h] = probedResolution.split("x").map(Number);
if (!isNaN(w) && !isNaN(h)) {
probedWidth = w;
probedHeight = h;
}
}
if (probedWidth <= 0 || probedHeight <= 0) {
result.push({
type: "error",
message: t("cameraWizard.step4.issues.resolutionUnknown"),
});
} else {
const minDimension = Math.min(probedWidth, probedHeight);
const maxDimension = Math.max(probedWidth, probedHeight);
if (stream.roles.includes("detect") && stream.resolution) {
const [width, height] = stream.resolution.split("x").map(Number);
if (!isNaN(width) && !isNaN(height) && width > 0 && height > 0) {
const minDimension = Math.min(width, height);
const maxDimension = Math.max(width, height);
if (minDimension > 1080) {
result.push({
type: "warning",
message: t("cameraWizard.step4.issues.resolutionHigh", {
resolution: probedResolution,
resolution: stream.resolution,
}),
});
} else if (maxDimension < 640) {
result.push({
type: "error",
message: t("cameraWizard.step4.issues.resolutionLow", {
resolution: probedResolution,
resolution: stream.resolution,
}),
});
}

View File

@ -1435,7 +1435,7 @@ export default function Settings() {
/>
)}
{showUnsavedDot && (
<span className="inline-block size-2 rounded-full bg-unsaved" />
<span className="inline-block size-2 rounded-full bg-danger" />
)}
</div>
)}
@ -1516,7 +1516,7 @@ export default function Settings() {
<div className="sticky bottom-0 z-50 mt-2 bg-background p-4">
<div className="flex flex-col items-center gap-2">
<div className="flex items-center gap-2">
<span className="text-sm text-unsaved">
<span className="text-sm text-danger">
{t("unsavedChanges", {
ns: "views/settings",
defaultValue: "You have unsaved changes",

View File

@ -79,11 +79,11 @@ const PROFILE_COLORS: ProfileColor[] = [
bgMuted: "bg-green-400/20",
},
{
bg: "bg-fuchsia-500",
text: "text-fuchsia-500",
dot: "bg-fuchsia-500",
border: "border-fuchsia-500",
bgMuted: "bg-fuchsia-500/20",
bg: "bg-amber-400",
text: "text-amber-400",
dot: "bg-amber-400",
border: "border-amber-400",
bgMuted: "bg-amber-400/20",
},
{
bg: "bg-slate-400",
@ -93,11 +93,11 @@ const PROFILE_COLORS: ProfileColor[] = [
bgMuted: "bg-slate-400/20",
},
{
bg: "bg-stone-500",
text: "text-stone-500",
dot: "bg-stone-500",
border: "border-stone-500",
bgMuted: "bg-stone-500/20",
bg: "bg-orange-300",
text: "text-orange-300",
dot: "bg-orange-300",
border: "border-orange-300",
bgMuted: "bg-orange-300/20",
},
{
bg: "bg-blue-300",

View File

@ -389,7 +389,7 @@ export default function LiveCameraView({
return "mse";
}, [lowBandwidth, mic, webRTC, isRestreamed]);
useKeyboardListener(["m", "Escape"], (key, modifiers) => {
useKeyboardListener(["m"], (key, modifiers) => {
if (!modifiers.down) {
return true;
}
@ -407,12 +407,6 @@ export default function LiveCameraView({
return true;
}
break;
case "Escape":
if (!fullscreen) {
navigate(-1);
return true;
}
break;
}
return false;

View File

@ -380,9 +380,7 @@ export default function Go2RtcStreamsSettingsView({
>
{hasChanges && (
<div className="flex items-center gap-2">
<span className="text-sm text-unsaved">
{t("unsavedChanges")}
</span>
<span className="text-sm text-danger">{t("unsavedChanges")}</span>
</div>
)}
<div className="flex w-full items-center gap-2 md:w-auto">

View File

@ -212,7 +212,7 @@ export function SingleSectionPage({
{sectionStatus.hasChanges && (
<Badge
variant="secondary"
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
>
{t("button.modified", {
ns: "common",
@ -250,7 +250,7 @@ export function SingleSectionPage({
{sectionStatus.hasChanges && (
<Badge
variant="secondary"
className="cursor-default bg-unsaved text-xs text-black hover:bg-unsaved"
className="cursor-default bg-danger text-xs text-white hover:bg-danger"
>
{t("button.modified", { ns: "common", defaultValue: "Modified" })}
</Badge>

View File

@ -65,7 +65,6 @@ module.exports = {
ring: "hsl(var(--ring))",
danger: "#ef4444",
success: "#22c55e",
unsaved: "#f59e0b",
background: "hsl(var(--background))",
background_alt: "hsl(var(--background-alt))",
foreground: "hsl(var(--foreground))",