mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-09 15:05:26 +03:00
Compare commits
10 Commits
54d73bbb8d
...
8446936262
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8446936262 | ||
|
|
4171efcd79 | ||
|
|
0ea8924727 | ||
|
|
1a1994ca17 | ||
|
|
819e8de172 | ||
|
|
ea246384bf | ||
|
|
d8f70b7fed | ||
|
|
434ef358a2 | ||
|
|
fe269b77b8 | ||
|
|
77831304a7 |
@ -36,6 +36,7 @@ from frigate.api.defs.response.chat_response import (
|
|||||||
)
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.api.event import events
|
from frigate.api.event import events
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
from frigate.genai.utils import build_assistant_message_for_conversation
|
from frigate.genai.utils import build_assistant_message_for_conversation
|
||||||
from frigate.jobs.vlm_watch import (
|
from frigate.jobs.vlm_watch import (
|
||||||
get_vlm_watch_job,
|
get_vlm_watch_job,
|
||||||
@ -401,9 +402,38 @@ def get_tools() -> JSONResponse:
|
|||||||
return JSONResponse(content={"tools": tools})
|
return JSONResponse(content={"tools": tools})
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_zones(
|
||||||
|
zones: List[str],
|
||||||
|
config: FrigateConfig,
|
||||||
|
target_cameras: List[str],
|
||||||
|
) -> List[str]:
|
||||||
|
"""Map zone names to their canonical config keys, case-insensitively.
|
||||||
|
|
||||||
|
LLMs frequently echo a user's casing ("Front Yard") instead of the
|
||||||
|
configured key ("front_yard"). The downstream zone filter is a SQLite GLOB
|
||||||
|
over the JSON-encoded zones column, which is case-sensitive — so an
|
||||||
|
unnormalized name silently returns zero matches. Build a lookup over the
|
||||||
|
relevant cameras' configured zones and substitute when we find a match;
|
||||||
|
unknown names pass through so behavior matches what the model asked for.
|
||||||
|
"""
|
||||||
|
if not zones:
|
||||||
|
return zones
|
||||||
|
|
||||||
|
lookup: Dict[str, str] = {}
|
||||||
|
for camera_id in target_cameras:
|
||||||
|
camera_config = config.cameras.get(camera_id)
|
||||||
|
if camera_config is None:
|
||||||
|
continue
|
||||||
|
for zone_name in camera_config.zones.keys():
|
||||||
|
lookup.setdefault(zone_name.lower(), zone_name)
|
||||||
|
|
||||||
|
return [lookup.get(z.lower(), z) for z in zones]
|
||||||
|
|
||||||
|
|
||||||
async def _execute_search_objects(
|
async def _execute_search_objects(
|
||||||
arguments: Dict[str, Any],
|
arguments: Dict[str, Any],
|
||||||
allowed_cameras: List[str],
|
allowed_cameras: List[str],
|
||||||
|
config: FrigateConfig,
|
||||||
) -> JSONResponse:
|
) -> JSONResponse:
|
||||||
"""
|
"""
|
||||||
Execute the search_objects tool.
|
Execute the search_objects tool.
|
||||||
@ -437,6 +467,11 @@ async def _execute_search_objects(
|
|||||||
# Convert zones array to comma-separated string if provided
|
# Convert zones array to comma-separated string if provided
|
||||||
zones = arguments.get("zones")
|
zones = arguments.get("zones")
|
||||||
if isinstance(zones, list):
|
if isinstance(zones, list):
|
||||||
|
camera_arg = arguments.get("camera")
|
||||||
|
target_cameras = (
|
||||||
|
[camera_arg] if camera_arg and camera_arg != "all" else allowed_cameras
|
||||||
|
)
|
||||||
|
zones = _resolve_zones(zones, config, target_cameras)
|
||||||
zones = ",".join(zones)
|
zones = ",".join(zones)
|
||||||
elif zones is None:
|
elif zones is None:
|
||||||
zones = "all"
|
zones = "all"
|
||||||
@ -528,6 +563,11 @@ async def _execute_find_similar_objects(
|
|||||||
sub_labels = arguments.get("sub_labels")
|
sub_labels = arguments.get("sub_labels")
|
||||||
zones = arguments.get("zones")
|
zones = arguments.get("zones")
|
||||||
|
|
||||||
|
if zones:
|
||||||
|
zones = _resolve_zones(
|
||||||
|
zones, request.app.frigate_config, cameras or list(allowed_cameras)
|
||||||
|
)
|
||||||
|
|
||||||
similarity_mode = arguments.get("similarity_mode", "fused")
|
similarity_mode = arguments.get("similarity_mode", "fused")
|
||||||
if similarity_mode not in ("visual", "semantic", "fused"):
|
if similarity_mode not in ("visual", "semantic", "fused"):
|
||||||
similarity_mode = "fused"
|
similarity_mode = "fused"
|
||||||
@ -655,7 +695,9 @@ async def execute_tool(
|
|||||||
logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}")
|
logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}")
|
||||||
|
|
||||||
if tool_name == "search_objects":
|
if tool_name == "search_objects":
|
||||||
return await _execute_search_objects(arguments, allowed_cameras)
|
return await _execute_search_objects(
|
||||||
|
arguments, allowed_cameras, request.app.frigate_config
|
||||||
|
)
|
||||||
|
|
||||||
if tool_name == "find_similar_objects":
|
if tool_name == "find_similar_objects":
|
||||||
result = await _execute_find_similar_objects(
|
result = await _execute_find_similar_objects(
|
||||||
@ -835,7 +877,9 @@ async def _execute_tool_internal(
|
|||||||
This is used by the chat completion endpoint to execute tools.
|
This is used by the chat completion endpoint to execute tools.
|
||||||
"""
|
"""
|
||||||
if tool_name == "search_objects":
|
if tool_name == "search_objects":
|
||||||
response = await _execute_search_objects(arguments, allowed_cameras)
|
response = await _execute_search_objects(
|
||||||
|
arguments, allowed_cameras, request.app.frigate_config
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
if hasattr(response, "body"):
|
if hasattr(response, "body"):
|
||||||
body_str = response.body.decode("utf-8")
|
body_str = response.body.decode("utf-8")
|
||||||
@ -899,6 +943,9 @@ async def _execute_start_camera_watch(
|
|||||||
|
|
||||||
await require_camera_access(camera, request=request)
|
await require_camera_access(camera, request=request)
|
||||||
|
|
||||||
|
if zones:
|
||||||
|
zones = _resolve_zones(zones, config, [camera])
|
||||||
|
|
||||||
genai_manager = request.app.genai_manager
|
genai_manager = request.app.genai_manager
|
||||||
chat_client = genai_manager.chat_client
|
chat_client = genai_manager.chat_client
|
||||||
if chat_client is None or not chat_client.supports_vision:
|
if chat_client is None or not chat_client.supports_vision:
|
||||||
|
|||||||
@ -754,6 +754,15 @@ def events_search(
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if search_event.camera not in allowed_cameras:
|
||||||
|
return JSONResponse(
|
||||||
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": "Event not found",
|
||||||
|
},
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
thumb_result = context.search_thumbnail(search_event)
|
thumb_result = context.search_thumbnail(search_event)
|
||||||
thumb_ids = {result[0]: result[1] for result in thumb_result}
|
thumb_ids = {result[0]: result[1] for result in thumb_result}
|
||||||
search_results = {
|
search_results = {
|
||||||
|
|||||||
@ -35,7 +35,7 @@ logger = logging.getLogger(__name__)
|
|||||||
router = APIRouter(tags=[Tags.recordings])
|
router = APIRouter(tags=[Tags.recordings])
|
||||||
|
|
||||||
|
|
||||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
@router.get("/recordings/storage", dependencies=[Depends(require_role(["admin"]))])
|
||||||
def get_recordings_storage_usage(request: Request):
|
def get_recordings_storage_usage(request: Request):
|
||||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||||
"storage"
|
"storage"
|
||||||
|
|||||||
@ -549,6 +549,14 @@ class WebPushClient(Communicator):
|
|||||||
logger.debug(f"Sending camera monitoring push notification for {camera_name}")
|
logger.debug(f"Sending camera monitoring push notification for {camera_name}")
|
||||||
|
|
||||||
for user in self.web_pushers:
|
for user in self.web_pushers:
|
||||||
|
if not self._user_has_camera_access(user, camera):
|
||||||
|
logger.debug(
|
||||||
|
"Skipping notification for user %s - no access to camera %s",
|
||||||
|
user,
|
||||||
|
camera,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
self.send_push_notification(
|
self.send_push_notification(
|
||||||
user=user,
|
user=user,
|
||||||
payload=payload,
|
payload=payload,
|
||||||
|
|||||||
@ -133,6 +133,61 @@ class FaceRecognizer(ABC):
|
|||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def build_class_mean(
|
||||||
|
embs: list[np.ndarray],
|
||||||
|
trim: float = 0.15,
|
||||||
|
outlier_threshold: float = 0.30,
|
||||||
|
min_keep_frac: float = 0.7,
|
||||||
|
max_iters: int = 3,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""Build a class-mean embedding with two-layer outlier protection.
|
||||||
|
|
||||||
|
Layer 1 (iterative, vector-wise): drop whole embeddings whose cosine
|
||||||
|
similarity to the current class mean is below ``outlier_threshold``.
|
||||||
|
Catches mislabeled or corrupted training samples (wrong face in the
|
||||||
|
folder, full-frame screenshots, extreme crops) that per-dimension
|
||||||
|
trimming cannot detect.
|
||||||
|
|
||||||
|
Layer 2 (per-dimension): ``scipy.stats.trim_mean`` on the retained set
|
||||||
|
to smooth per-component noise (lighting, expression, alignment jitter).
|
||||||
|
|
||||||
|
Collections with fewer than 5 images bypass outlier rejection — too few
|
||||||
|
samples to establish a reliable class center.
|
||||||
|
"""
|
||||||
|
arr = np.stack(embs, axis=0)
|
||||||
|
|
||||||
|
if len(arr) < 5:
|
||||||
|
return np.asarray(stats.trim_mean(arr, trim, axis=0))
|
||||||
|
|
||||||
|
keep = np.ones(len(arr), dtype=bool)
|
||||||
|
floor = max(5, int(np.ceil(min_keep_frac * len(arr))))
|
||||||
|
|
||||||
|
for _ in range(max_iters):
|
||||||
|
mean = stats.trim_mean(arr[keep], trim, axis=0)
|
||||||
|
m_norm = mean / (np.linalg.norm(mean) + 1e-9)
|
||||||
|
e_norms = arr / (np.linalg.norm(arr, axis=1, keepdims=True) + 1e-9)
|
||||||
|
cos = e_norms @ m_norm
|
||||||
|
new_keep = cos >= outlier_threshold
|
||||||
|
|
||||||
|
if new_keep.sum() < floor:
|
||||||
|
top = np.argsort(-cos)[:floor]
|
||||||
|
new_keep = np.zeros(len(arr), dtype=bool)
|
||||||
|
new_keep[top] = True
|
||||||
|
|
||||||
|
if np.array_equal(new_keep, keep):
|
||||||
|
break
|
||||||
|
keep = new_keep
|
||||||
|
|
||||||
|
dropped = int((~keep).sum())
|
||||||
|
|
||||||
|
if dropped:
|
||||||
|
logger.debug(
|
||||||
|
f"Vector-wise outlier filter dropped {dropped}/{len(arr)} embeddings"
|
||||||
|
)
|
||||||
|
|
||||||
|
return np.asarray(stats.trim_mean(arr[keep], trim, axis=0))
|
||||||
|
|
||||||
|
|
||||||
def similarity_to_confidence(
|
def similarity_to_confidence(
|
||||||
cosine_similarity: float,
|
cosine_similarity: float,
|
||||||
median: float = 0.3,
|
median: float = 0.3,
|
||||||
@ -229,7 +284,7 @@ class FaceNetRecognizer(FaceRecognizer):
|
|||||||
|
|
||||||
for name, embs in face_embeddings_map.items():
|
for name, embs in face_embeddings_map.items():
|
||||||
if embs:
|
if embs:
|
||||||
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
|
self.mean_embs[name] = build_class_mean(embs)
|
||||||
|
|
||||||
logger.debug("Finished building ArcFace model")
|
logger.debug("Finished building ArcFace model")
|
||||||
|
|
||||||
@ -340,7 +395,7 @@ class ArcFaceRecognizer(FaceRecognizer):
|
|||||||
|
|
||||||
for name, embs in face_embeddings_map.items():
|
for name, embs in face_embeddings_map.items():
|
||||||
if embs:
|
if embs:
|
||||||
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
|
self.mean_embs[name] = build_class_mean(embs)
|
||||||
|
|
||||||
logger.debug("Finished building ArcFace model")
|
logger.debug("Finished building ArcFace model")
|
||||||
|
|
||||||
|
|||||||
@ -39,6 +39,8 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
|
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
|
||||||
MIN_RECORDING_DURATION = 10
|
MIN_RECORDING_DURATION = 10
|
||||||
|
MAX_IMAGE_TOKENS = 24000
|
||||||
|
MAX_FRAMES_PER_SECOND = 1
|
||||||
|
|
||||||
|
|
||||||
class ReviewDescriptionProcessor(PostProcessorApi):
|
class ReviewDescriptionProcessor(PostProcessorApi):
|
||||||
@ -60,14 +62,22 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
|||||||
def calculate_frame_count(
|
def calculate_frame_count(
|
||||||
self,
|
self,
|
||||||
camera: str,
|
camera: str,
|
||||||
|
duration: float,
|
||||||
image_source: ImageSourceEnum = ImageSourceEnum.preview,
|
image_source: ImageSourceEnum = ImageSourceEnum.preview,
|
||||||
height: int = 480,
|
height: int = 480,
|
||||||
) -> int:
|
) -> int:
|
||||||
"""Calculate optimal number of frames based on context size, image source, and resolution.
|
"""Calculate optimal number of frames based on event duration, context size,
|
||||||
|
image source, and resolution.
|
||||||
|
|
||||||
Token usage varies by resolution: larger images (ultra-wide aspect ratios) use more tokens.
|
Per-image token cost is asked of the GenAI provider so providers that know
|
||||||
Estimates ~1 token per 1250 pixels. Targets 98% context utilization with safety margin.
|
their model's true cost (e.g. llama.cpp can probe the loaded mmproj) can
|
||||||
Capped at 20 frames.
|
diverge from the default ~1-token-per-1250-pixels heuristic. The frame
|
||||||
|
budget is bounded by:
|
||||||
|
- remaining context window after prompt + response reservations
|
||||||
|
- a fixed MAX_IMAGE_TOKENS ceiling
|
||||||
|
- MAX_FRAMES_PER_SECOND x duration, to avoid drowning short events in
|
||||||
|
near-duplicate frames where the model latches onto the redundant middle
|
||||||
|
and skips the start/end action
|
||||||
"""
|
"""
|
||||||
client = self.genai_manager.description_client
|
client = self.genai_manager.description_client
|
||||||
|
|
||||||
@ -105,14 +115,15 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
|||||||
width = target_width
|
width = target_width
|
||||||
height = int(target_width / aspect_ratio)
|
height = int(target_width / aspect_ratio)
|
||||||
|
|
||||||
pixels_per_image = width * height
|
tokens_per_image = client.estimate_image_tokens(width, height)
|
||||||
tokens_per_image = pixels_per_image / 1250
|
|
||||||
prompt_tokens = 3800
|
prompt_tokens = 3800
|
||||||
response_tokens = 300
|
response_tokens = 300
|
||||||
available_tokens = context_size - prompt_tokens - response_tokens
|
context_budget = context_size - prompt_tokens - response_tokens
|
||||||
max_frames = int(available_tokens / tokens_per_image)
|
image_token_budget = min(context_budget, MAX_IMAGE_TOKENS)
|
||||||
|
max_frames_by_tokens = int(image_token_budget / tokens_per_image)
|
||||||
return min(max(max_frames, 3), 20)
|
max_frames_by_duration = int(duration * MAX_FRAMES_PER_SECOND)
|
||||||
|
max_frames = min(max_frames_by_tokens, max_frames_by_duration)
|
||||||
|
return max(max_frames, 3)
|
||||||
|
|
||||||
def process_data(
|
def process_data(
|
||||||
self, data: dict[str, Any], data_type: PostProcessDataEnum
|
self, data: dict[str, Any], data_type: PostProcessDataEnum
|
||||||
@ -376,7 +387,9 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
|||||||
all_frames.append(os.path.join(preview_dir, file))
|
all_frames.append(os.path.join(preview_dir, file))
|
||||||
|
|
||||||
frame_count = len(all_frames)
|
frame_count = len(all_frames)
|
||||||
desired_frame_count = self.calculate_frame_count(camera)
|
desired_frame_count = self.calculate_frame_count(
|
||||||
|
camera, duration=end_time - start_time
|
||||||
|
)
|
||||||
|
|
||||||
if frame_count <= desired_frame_count:
|
if frame_count <= desired_frame_count:
|
||||||
return all_frames
|
return all_frames
|
||||||
@ -400,7 +413,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
|||||||
"""Get frames from recordings at specified timestamps."""
|
"""Get frames from recordings at specified timestamps."""
|
||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
desired_frame_count = self.calculate_frame_count(
|
desired_frame_count = self.calculate_frame_count(
|
||||||
camera, ImageSourceEnum.recordings, height
|
camera, duration, ImageSourceEnum.recordings, height
|
||||||
)
|
)
|
||||||
|
|
||||||
# Calculate evenly spaced timestamps throughout the duration
|
# Calculate evenly spaced timestamps throughout the duration
|
||||||
|
|||||||
@ -1,21 +1,48 @@
|
|||||||
from pydantic import BaseModel, ConfigDict, Field
|
from typing import Annotated
|
||||||
|
|
||||||
|
from pydantic import BaseModel, ConfigDict, Field, StringConstraints
|
||||||
|
|
||||||
|
ObservationItem = Annotated[str, StringConstraints(min_length=20, max_length=160)]
|
||||||
|
|
||||||
|
|
||||||
class ReviewMetadata(BaseModel):
|
class ReviewMetadata(BaseModel):
|
||||||
model_config = ConfigDict(extra="ignore", protected_namespaces=())
|
model_config = ConfigDict(extra="ignore", protected_namespaces=())
|
||||||
|
|
||||||
|
observations: list[ObservationItem] = Field(
|
||||||
|
...,
|
||||||
|
min_length=3,
|
||||||
|
max_length=15,
|
||||||
|
description=(
|
||||||
|
"Enumerate the significant observations across all frames, in "
|
||||||
|
"chronological order, BEFORE composing the scene narrative. "
|
||||||
|
"Include the very start of the activity — for example, a vehicle "
|
||||||
|
"entering the frame or pulling into the driveway — even if it "
|
||||||
|
"lasts only a few frames and the rest of the clip is dominated "
|
||||||
|
"by a longer activity. Include each arrival, departure, motion "
|
||||||
|
"event, object handled, and notable change in position or state. "
|
||||||
|
"Each item is a single concrete fact written as a complete "
|
||||||
|
"sentence. Do not summarize, interpret, or assign meaning here — "
|
||||||
|
"that belongs in the scene field."
|
||||||
|
),
|
||||||
|
)
|
||||||
title: str = Field(
|
title: str = Field(
|
||||||
description="A short title characterizing what took place and where, under 10 words."
|
max_length=80,
|
||||||
|
description="A short title characterizing what took place and where, under 10 words.",
|
||||||
)
|
)
|
||||||
scene: str = Field(
|
scene: str = Field(
|
||||||
description="A chronological narrative of what happens from start to finish."
|
min_length=150,
|
||||||
|
max_length=600,
|
||||||
|
description="A chronological narrative of what happens from start to finish, drawing directly from the items in observations.",
|
||||||
)
|
)
|
||||||
shortSummary: str = Field(
|
shortSummary: str = Field(
|
||||||
description="A brief 2-sentence summary of the scene, suitable for notifications."
|
min_length=70,
|
||||||
|
max_length=100,
|
||||||
|
description="A brief 2-sentence summary of the scene, suitable for notifications.",
|
||||||
)
|
)
|
||||||
confidence: float = Field(
|
confidence: float = Field(
|
||||||
ge=0.0,
|
ge=0.0,
|
||||||
description="Confidence in the analysis, from 0 to 1.",
|
le=1.0,
|
||||||
|
description="Confidence in the analysis as a decimal between 0.0 and 1.0, where 0.0 means no confidence and 1.0 means complete confidence. Express ONLY as a decimal.",
|
||||||
)
|
)
|
||||||
potential_threat_level: int = Field(
|
potential_threat_level: int = Field(
|
||||||
ge=0,
|
ge=0,
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import importlib
|
import importlib
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -9,6 +10,7 @@ from typing import Any, Callable, Optional
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
from pydantic import ValidationError
|
||||||
|
|
||||||
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
|
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
|
||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR
|
||||||
@ -181,7 +183,35 @@ Each line represents a detection state, not necessarily unique individuals. The
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
metadata = ReviewMetadata.model_validate_json(clean_json)
|
metadata = ReviewMetadata.model_validate_json(clean_json)
|
||||||
|
except ValidationError as ve:
|
||||||
|
# Constraint violations (length, item count, ranges) are logged
|
||||||
|
# at debug and the response is kept anyway — a slightly
|
||||||
|
# off-spec answer is still usable, and dropping the whole
|
||||||
|
# response loses the narrative content the model produced.
|
||||||
|
for err in ve.errors():
|
||||||
|
loc = ".".join(str(p) for p in err["loc"]) or "<root>"
|
||||||
|
logger.debug(
|
||||||
|
"Review metadata soft validation: %s — %s (input: %r)",
|
||||||
|
loc,
|
||||||
|
err["msg"],
|
||||||
|
err.get("input"),
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
raw = json.loads(clean_json)
|
||||||
|
except json.JSONDecodeError as je:
|
||||||
|
logger.error("Failed to parse review description JSON: %s", je)
|
||||||
|
return None
|
||||||
|
# observations is required on the model; fill an empty default
|
||||||
|
# if the response omitted it so attribute access stays safe.
|
||||||
|
raw.setdefault("observations", [])
|
||||||
|
metadata = ReviewMetadata.model_construct(**raw)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to parse review description as the response did not match expected format. {e}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
# Normalize confidence if model returned a percentage (e.g. 85 instead of 0.85)
|
# Normalize confidence if model returned a percentage (e.g. 85 instead of 0.85)
|
||||||
if metadata.confidence > 1.0:
|
if metadata.confidence > 1.0:
|
||||||
metadata.confidence = min(metadata.confidence / 100.0, 1.0)
|
metadata.confidence = min(metadata.confidence / 100.0, 1.0)
|
||||||
@ -194,10 +224,7 @@ Each line represents a detection state, not necessarily unique individuals. The
|
|||||||
metadata.time = review_data["start"]
|
metadata.time = review_data["start"]
|
||||||
return metadata
|
return metadata
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# rarely LLMs can fail to follow directions on output format
|
logger.error(f"Failed to post-process review metadata: {e}")
|
||||||
logger.warning(
|
|
||||||
f"Failed to parse review description as the response did not match expected format. {e}"
|
|
||||||
)
|
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@ -344,6 +371,14 @@ Guidelines:
|
|||||||
"""Get the context window size for this provider in tokens."""
|
"""Get the context window size for this provider in tokens."""
|
||||||
return 4096
|
return 4096
|
||||||
|
|
||||||
|
def estimate_image_tokens(self, width: int, height: int) -> float:
|
||||||
|
"""Estimate prompt tokens consumed by a single image of the given dimensions.
|
||||||
|
|
||||||
|
Default heuristic: ~1 token per 1250 pixels. Providers that can measure or
|
||||||
|
know their model's exact image-token cost should override.
|
||||||
|
"""
|
||||||
|
return (width * height) / 1250
|
||||||
|
|
||||||
def embed(
|
def embed(
|
||||||
self,
|
self,
|
||||||
texts: list[str] | None = None,
|
texts: list[str] | None = None,
|
||||||
|
|||||||
@ -42,6 +42,8 @@ class LlamaCppClient(GenAIClient):
|
|||||||
_supports_vision: bool
|
_supports_vision: bool
|
||||||
_supports_audio: bool
|
_supports_audio: bool
|
||||||
_supports_tools: bool
|
_supports_tools: bool
|
||||||
|
_image_token_cache: dict[tuple[int, int], int]
|
||||||
|
_text_baseline_tokens: int | None
|
||||||
|
|
||||||
def _init_provider(self) -> str | None:
|
def _init_provider(self) -> str | None:
|
||||||
"""Initialize the client and query model metadata from the server."""
|
"""Initialize the client and query model metadata from the server."""
|
||||||
@ -52,6 +54,8 @@ class LlamaCppClient(GenAIClient):
|
|||||||
self._supports_vision = False
|
self._supports_vision = False
|
||||||
self._supports_audio = False
|
self._supports_audio = False
|
||||||
self._supports_tools = False
|
self._supports_tools = False
|
||||||
|
self._image_token_cache = {}
|
||||||
|
self._text_baseline_tokens = None
|
||||||
|
|
||||||
base_url = (
|
base_url = (
|
||||||
self.genai_config.base_url.rstrip("/")
|
self.genai_config.base_url.rstrip("/")
|
||||||
@ -272,6 +276,91 @@ class LlamaCppClient(GenAIClient):
|
|||||||
return self._context_size
|
return self._context_size
|
||||||
return 4096
|
return 4096
|
||||||
|
|
||||||
|
def estimate_image_tokens(self, width: int, height: int) -> float:
|
||||||
|
"""Probe the llama.cpp server to learn the model's image-token cost at the
|
||||||
|
requested dimensions.
|
||||||
|
|
||||||
|
llama.cpp's image tokenization is a deterministic function of dimensions and
|
||||||
|
the loaded mmproj, so the result is cached per (width, height) for the
|
||||||
|
lifetime of the process. Falls back to the base pixel heuristic if the
|
||||||
|
server is unreachable or the response is malformed.
|
||||||
|
"""
|
||||||
|
if self.provider is None:
|
||||||
|
return super().estimate_image_tokens(width, height)
|
||||||
|
|
||||||
|
cached = self._image_token_cache.get((width, height))
|
||||||
|
|
||||||
|
if cached is not None:
|
||||||
|
return cached
|
||||||
|
|
||||||
|
try:
|
||||||
|
baseline = self._probe_baseline_tokens()
|
||||||
|
with_image = self._probe_image_prompt_tokens(width, height)
|
||||||
|
tokens = max(1, with_image - baseline)
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(
|
||||||
|
"llama.cpp image-token probe failed for %dx%d (%s); using heuristic",
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
return super().estimate_image_tokens(width, height)
|
||||||
|
|
||||||
|
self._image_token_cache[(width, height)] = tokens
|
||||||
|
logger.debug(
|
||||||
|
"llama.cpp model '%s' uses ~%d tokens for %dx%d images",
|
||||||
|
self.genai_config.model,
|
||||||
|
tokens,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
)
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
def _probe_baseline_tokens(self) -> int:
|
||||||
|
"""Return prompt_tokens for a minimal text-only request. Cached after first call."""
|
||||||
|
if self._text_baseline_tokens is not None:
|
||||||
|
return self._text_baseline_tokens
|
||||||
|
|
||||||
|
self._text_baseline_tokens = self._probe_prompt_tokens(
|
||||||
|
[{"type": "text", "text": "."}]
|
||||||
|
)
|
||||||
|
return self._text_baseline_tokens
|
||||||
|
|
||||||
|
def _probe_image_prompt_tokens(self, width: int, height: int) -> int:
|
||||||
|
"""Return prompt_tokens for a single synthetic image plus minimal text."""
|
||||||
|
img = Image.new("RGB", (width, height), (128, 128, 128))
|
||||||
|
buf = io.BytesIO()
|
||||||
|
img.save(buf, format="JPEG", quality=60)
|
||||||
|
encoded = base64.b64encode(buf.getvalue()).decode("utf-8")
|
||||||
|
return self._probe_prompt_tokens(
|
||||||
|
[
|
||||||
|
{"type": "text", "text": "."},
|
||||||
|
{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": {"url": f"data:image/jpeg;base64,{encoded}"},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _probe_prompt_tokens(self, content: list[dict[str, Any]]) -> int:
|
||||||
|
"""POST a 1-token chat completion and return reported prompt_tokens.
|
||||||
|
|
||||||
|
Uses a generous timeout to absorb a cold model load on the first probe
|
||||||
|
when the server lazily loads models on demand (e.g. llama-swap).
|
||||||
|
"""
|
||||||
|
payload = {
|
||||||
|
"model": self.genai_config.model,
|
||||||
|
"messages": [{"role": "user", "content": content}],
|
||||||
|
"max_tokens": 1,
|
||||||
|
}
|
||||||
|
response = requests.post(
|
||||||
|
f"{self.provider}/v1/chat/completions",
|
||||||
|
json=payload,
|
||||||
|
timeout=60,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
return int(response.json()["usage"]["prompt_tokens"])
|
||||||
|
|
||||||
def _build_payload(
|
def _build_payload(
|
||||||
self,
|
self,
|
||||||
messages: list[dict[str, Any]],
|
messages: list[dict[str, Any]],
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import numpy as np
|
|||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
|
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
|
||||||
from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT
|
from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT
|
||||||
|
from frigate.output.ws_auth import ws_has_camera_access
|
||||||
from frigate.util.image import (
|
from frigate.util.image import (
|
||||||
SharedMemoryFrameManager,
|
SharedMemoryFrameManager,
|
||||||
copy_yuv_to_position,
|
copy_yuv_to_position,
|
||||||
@ -236,12 +237,14 @@ class BroadcastThread(threading.Thread):
|
|||||||
converter: FFMpegConverter,
|
converter: FFMpegConverter,
|
||||||
websocket_server: Any,
|
websocket_server: Any,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
|
config: FrigateConfig,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.camera = camera
|
self.camera = camera
|
||||||
self.converter = converter
|
self.converter = converter
|
||||||
self.websocket_server = websocket_server
|
self.websocket_server = websocket_server
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
self.config = config
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
@ -256,6 +259,7 @@ class BroadcastThread(threading.Thread):
|
|||||||
if (
|
if (
|
||||||
not ws.terminated
|
not ws.terminated
|
||||||
and ws.environ["PATH_INFO"] == f"/{self.camera}"
|
and ws.environ["PATH_INFO"] == f"/{self.camera}"
|
||||||
|
and ws_has_camera_access(ws, self.camera, self.config)
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
ws.send(buf, binary=True)
|
ws.send(buf, binary=True)
|
||||||
@ -806,7 +810,11 @@ class Birdseye:
|
|||||||
config.birdseye.restream,
|
config.birdseye.restream,
|
||||||
)
|
)
|
||||||
self.broadcaster = BroadcastThread(
|
self.broadcaster = BroadcastThread(
|
||||||
"birdseye", self.converter, websocket_server, stop_event
|
"birdseye",
|
||||||
|
self.converter,
|
||||||
|
websocket_server,
|
||||||
|
stop_event,
|
||||||
|
config,
|
||||||
)
|
)
|
||||||
self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event)
|
self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event)
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
|||||||
@ -7,7 +7,8 @@ import threading
|
|||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from frigate.config import CameraConfig, FfmpegConfig
|
from frigate.config import CameraConfig, FfmpegConfig, FrigateConfig
|
||||||
|
from frigate.output.ws_auth import ws_has_camera_access
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -102,12 +103,14 @@ class BroadcastThread(threading.Thread):
|
|||||||
converter: FFMpegConverter,
|
converter: FFMpegConverter,
|
||||||
websocket_server: Any,
|
websocket_server: Any,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
|
config: FrigateConfig,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.camera = camera
|
self.camera = camera
|
||||||
self.converter = converter
|
self.converter = converter
|
||||||
self.websocket_server = websocket_server
|
self.websocket_server = websocket_server
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
self.config = config
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
@ -122,6 +125,7 @@ class BroadcastThread(threading.Thread):
|
|||||||
if (
|
if (
|
||||||
not ws.terminated
|
not ws.terminated
|
||||||
and ws.environ["PATH_INFO"] == f"/{self.camera}"
|
and ws.environ["PATH_INFO"] == f"/{self.camera}"
|
||||||
|
and ws_has_camera_access(ws, self.camera, self.config)
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
ws.send(buf, binary=True)
|
ws.send(buf, binary=True)
|
||||||
@ -135,7 +139,11 @@ class BroadcastThread(threading.Thread):
|
|||||||
|
|
||||||
class JsmpegCamera:
|
class JsmpegCamera:
|
||||||
def __init__(
|
def __init__(
|
||||||
self, config: CameraConfig, stop_event: MpEvent, websocket_server: Any
|
self,
|
||||||
|
config: CameraConfig,
|
||||||
|
frigate_config: FrigateConfig,
|
||||||
|
stop_event: MpEvent,
|
||||||
|
websocket_server: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.input: queue.Queue[bytes] = queue.Queue(maxsize=config.detect.fps)
|
self.input: queue.Queue[bytes] = queue.Queue(maxsize=config.detect.fps)
|
||||||
@ -154,7 +162,11 @@ class JsmpegCamera:
|
|||||||
config.live.quality,
|
config.live.quality,
|
||||||
)
|
)
|
||||||
self.broadcaster = BroadcastThread(
|
self.broadcaster = BroadcastThread(
|
||||||
config.name or "", self.converter, websocket_server, stop_event
|
config.name or "",
|
||||||
|
self.converter,
|
||||||
|
websocket_server,
|
||||||
|
stop_event,
|
||||||
|
frigate_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.converter.start()
|
self.converter.start()
|
||||||
|
|||||||
@ -32,6 +32,7 @@ from frigate.const import (
|
|||||||
from frigate.output.birdseye import Birdseye
|
from frigate.output.birdseye import Birdseye
|
||||||
from frigate.output.camera import JsmpegCamera
|
from frigate.output.camera import JsmpegCamera
|
||||||
from frigate.output.preview import PreviewRecorder
|
from frigate.output.preview import PreviewRecorder
|
||||||
|
from frigate.output.ws_auth import ws_has_camera_access
|
||||||
from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
|
from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
|
||||||
from frigate.util.process import FrigateProcess
|
from frigate.util.process import FrigateProcess
|
||||||
|
|
||||||
@ -102,7 +103,7 @@ class OutputProcess(FrigateProcess):
|
|||||||
) -> None:
|
) -> None:
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
jsmpeg_cameras[camera] = JsmpegCamera(
|
jsmpeg_cameras[camera] = JsmpegCamera(
|
||||||
camera_config, self.stop_event, websocket_server
|
camera_config, self.config, self.stop_event, websocket_server
|
||||||
)
|
)
|
||||||
preview_recorders[camera] = PreviewRecorder(camera_config)
|
preview_recorders[camera] = PreviewRecorder(camera_config)
|
||||||
preview_write_times[camera] = 0
|
preview_write_times[camera] = 0
|
||||||
@ -262,6 +263,7 @@ class OutputProcess(FrigateProcess):
|
|||||||
# send camera frame to ffmpeg process if websockets are connected
|
# send camera frame to ffmpeg process if websockets are connected
|
||||||
if any(
|
if any(
|
||||||
ws.environ["PATH_INFO"].endswith(camera)
|
ws.environ["PATH_INFO"].endswith(camera)
|
||||||
|
and ws_has_camera_access(ws, camera, self.config)
|
||||||
for ws in websocket_server.manager
|
for ws in websocket_server.manager
|
||||||
):
|
):
|
||||||
# write to the converter for the camera if clients are listening to the specific camera
|
# write to the converter for the camera if clients are listening to the specific camera
|
||||||
@ -275,6 +277,7 @@ class OutputProcess(FrigateProcess):
|
|||||||
self.config.birdseye.restream
|
self.config.birdseye.restream
|
||||||
or any(
|
or any(
|
||||||
ws.environ["PATH_INFO"].endswith("birdseye")
|
ws.environ["PATH_INFO"].endswith("birdseye")
|
||||||
|
and ws_has_camera_access(ws, "birdseye", self.config)
|
||||||
for ws in websocket_server.manager
|
for ws in websocket_server.manager
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|||||||
43
frigate/output/ws_auth.py
Normal file
43
frigate/output/ws_auth.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
"""Authorization helpers for JSMPEG websocket clients."""
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.models import User
|
||||||
|
|
||||||
|
|
||||||
|
def _get_valid_ws_roles(ws: Any, config: FrigateConfig) -> list[str]:
|
||||||
|
role_header = ws.environ.get("HTTP_REMOTE_ROLE", "")
|
||||||
|
roles = [
|
||||||
|
role.strip()
|
||||||
|
for role in role_header.split(config.proxy.separator)
|
||||||
|
if role.strip()
|
||||||
|
]
|
||||||
|
return [role for role in roles if role in config.auth.roles]
|
||||||
|
|
||||||
|
|
||||||
|
def ws_has_camera_access(ws: Any, camera_name: str, config: FrigateConfig) -> bool:
|
||||||
|
"""Return True when a websocket client is authorized for the camera path."""
|
||||||
|
roles = _get_valid_ws_roles(ws, config)
|
||||||
|
|
||||||
|
if not roles:
|
||||||
|
return False
|
||||||
|
|
||||||
|
roles_dict = config.auth.roles
|
||||||
|
|
||||||
|
# Birdseye is a composite stream, so only users with unrestricted access
|
||||||
|
# should receive it.
|
||||||
|
if camera_name == "birdseye":
|
||||||
|
return any(role == "admin" or not roles_dict.get(role) for role in roles)
|
||||||
|
|
||||||
|
all_camera_names = set(config.cameras.keys())
|
||||||
|
|
||||||
|
for role in roles:
|
||||||
|
if role == "admin" or not roles_dict.get(role):
|
||||||
|
return True
|
||||||
|
|
||||||
|
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
|
||||||
|
if camera_name in allowed_cameras:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
@ -23,6 +23,26 @@ class TestHttpApp(BaseTestHttp):
|
|||||||
response_json = response.json()
|
response_json = response.json()
|
||||||
assert response_json == self.test_stats
|
assert response_json == self.test_stats
|
||||||
|
|
||||||
|
def test_recordings_storage_requires_admin(self):
|
||||||
|
stats = Mock(spec=StatsEmitter)
|
||||||
|
stats.get_latest_stats.return_value = self.test_stats
|
||||||
|
app = super().create_app(stats)
|
||||||
|
app.storage_maintainer = Mock()
|
||||||
|
app.storage_maintainer.calculate_camera_usages.return_value = {
|
||||||
|
"front_door": {"usage": 2.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
with AuthTestClient(app) as client:
|
||||||
|
response = client.get(
|
||||||
|
"/recordings/storage",
|
||||||
|
headers={"remote-user": "viewer", "remote-role": "viewer"},
|
||||||
|
)
|
||||||
|
assert response.status_code == 403
|
||||||
|
|
||||||
|
response = client.get("/recordings/storage")
|
||||||
|
assert response.status_code == 200
|
||||||
|
assert response.json()["front_door"]["usage_percent"] == 25.0
|
||||||
|
|
||||||
def test_config_set_in_memory_replaces_objects_track_list(self):
|
def test_config_set_in_memory_replaces_objects_track_list(self):
|
||||||
self.minimal_config["cameras"]["front_door"]["objects"] = {
|
self.minimal_config["cameras"]["front_door"]["objects"] = {
|
||||||
"track": ["person", "car"],
|
"track": ["person", "car"],
|
||||||
|
|||||||
@ -219,6 +219,25 @@ class TestHttpApp(BaseTestHttp):
|
|||||||
assert len(events) == 1
|
assert len(events) == 1
|
||||||
assert events[0]["id"] == event_id
|
assert events[0]["id"] == event_id
|
||||||
|
|
||||||
|
def test_similarity_search_hides_unauthorized_anchor_event(self):
|
||||||
|
mock_embeddings = Mock()
|
||||||
|
self.app.frigate_config.semantic_search.enabled = True
|
||||||
|
self.app.embeddings = mock_embeddings
|
||||||
|
|
||||||
|
with AuthTestClient(self.app) as client:
|
||||||
|
super().insert_mock_event("hidden.anchor", camera="back_door")
|
||||||
|
response = client.get(
|
||||||
|
"/events/search",
|
||||||
|
params={
|
||||||
|
"search_type": "similarity",
|
||||||
|
"event_id": "hidden.anchor",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 404
|
||||||
|
assert response.json()["message"] == "Event not found"
|
||||||
|
mock_embeddings.search_thumbnail.assert_not_called()
|
||||||
|
|
||||||
def test_get_good_event(self):
|
def test_get_good_event(self):
|
||||||
id = "123456.random"
|
id = "123456.random"
|
||||||
|
|
||||||
|
|||||||
@ -145,9 +145,12 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
|
|||||||
embeddings=embeddings,
|
embeddings=embeddings,
|
||||||
frigate_config=SimpleNamespace(
|
frigate_config=SimpleNamespace(
|
||||||
semantic_search=SimpleNamespace(enabled=semantic_enabled),
|
semantic_search=SimpleNamespace(enabled=semantic_enabled),
|
||||||
|
cameras={"driveway": object()},
|
||||||
|
auth=SimpleNamespace(roles={"admin": [], "viewer": ["driveway"]}),
|
||||||
|
proxy=SimpleNamespace(separator=","),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
return SimpleNamespace(app=app)
|
return SimpleNamespace(app=app, headers={})
|
||||||
|
|
||||||
def test_semantic_search_disabled_returns_error(self):
|
def test_semantic_search_disabled_returns_error(self):
|
||||||
req = self._make_request(semantic_enabled=False)
|
req = self._make_request(semantic_enabled=False)
|
||||||
@ -180,7 +183,7 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
|
|||||||
_execute_find_similar_objects(
|
_execute_find_similar_objects(
|
||||||
req,
|
req,
|
||||||
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
|
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
|
||||||
allowed_cameras=["nonexistent_cam"],
|
allowed_cameras=["driveway"],
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
self.assertEqual(result["results"], [])
|
self.assertEqual(result["results"], [])
|
||||||
|
|||||||
57
frigate/test/test_output_ws_auth.py
Normal file
57
frigate/test/test_output_ws_auth.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
"""Tests for JSMPEG websocket authorization."""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from types import SimpleNamespace
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.output.ws_auth import ws_has_camera_access
|
||||||
|
|
||||||
|
|
||||||
|
class TestWsHasCameraAccess(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.config = FrigateConfig(
|
||||||
|
mqtt={"host": "mqtt"},
|
||||||
|
auth={"roles": {"limited_user": ["front_door"]}},
|
||||||
|
cameras={
|
||||||
|
"front_door": {
|
||||||
|
"ffmpeg": {
|
||||||
|
"inputs": [
|
||||||
|
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"detect": {"height": 1080, "width": 1920, "fps": 5},
|
||||||
|
},
|
||||||
|
"back_door": {
|
||||||
|
"ffmpeg": {
|
||||||
|
"inputs": [
|
||||||
|
{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"detect": {"height": 1080, "width": 1920, "fps": 5},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _make_ws(self, role: str):
|
||||||
|
return SimpleNamespace(environ={"HTTP_REMOTE_ROLE": role})
|
||||||
|
|
||||||
|
def test_restricted_role_only_gets_allowed_camera(self):
|
||||||
|
ws = self._make_ws("limited_user")
|
||||||
|
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
|
||||||
|
self.assertFalse(ws_has_camera_access(ws, "back_door", self.config))
|
||||||
|
|
||||||
|
def test_unrestricted_role_can_access_any_camera(self):
|
||||||
|
ws = self._make_ws("viewer")
|
||||||
|
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
|
||||||
|
self.assertTrue(ws_has_camera_access(ws, "back_door", self.config))
|
||||||
|
|
||||||
|
def test_birdseye_requires_unrestricted_access(self):
|
||||||
|
self.assertTrue(
|
||||||
|
ws_has_camera_access(self._make_ws("admin"), "birdseye", self.config)
|
||||||
|
)
|
||||||
|
self.assertTrue(
|
||||||
|
ws_has_camera_access(self._make_ws("viewer"), "birdseye", self.config)
|
||||||
|
)
|
||||||
|
self.assertFalse(
|
||||||
|
ws_has_camera_access(self._make_ws("limited_user"), "birdseye", self.config)
|
||||||
|
)
|
||||||
29
frigate/test/test_webpush_camera_monitoring.py
Normal file
29
frigate/test/test_webpush_camera_monitoring.py
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
"""Tests for camera monitoring notification authorization."""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from types import SimpleNamespace
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
from frigate.comms.webpush import WebPushClient
|
||||||
|
|
||||||
|
|
||||||
|
class TestCameraMonitoringNotifications(unittest.TestCase):
|
||||||
|
def test_send_camera_monitoring_filters_by_camera_access(self):
|
||||||
|
client = WebPushClient.__new__(WebPushClient)
|
||||||
|
client.config = SimpleNamespace(
|
||||||
|
cameras={"front_door": SimpleNamespace(friendly_name=None)}
|
||||||
|
)
|
||||||
|
client.web_pushers = {"allowed": [], "denied": []}
|
||||||
|
client.user_cameras = {"allowed": {"front_door"}, "denied": set()}
|
||||||
|
client.check_registrations = MagicMock()
|
||||||
|
client.cleanup_registrations = MagicMock()
|
||||||
|
client.send_push_notification = MagicMock()
|
||||||
|
|
||||||
|
client.send_camera_monitoring(
|
||||||
|
{"camera": "front_door", "message": "Monitoring condition met"}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(client.send_push_notification.call_count, 1)
|
||||||
|
self.assertEqual(
|
||||||
|
client.send_push_notification.call_args.kwargs["user"], "allowed"
|
||||||
|
)
|
||||||
376
testing-scripts/analyze_recording_keyframes.py
Normal file
376
testing-scripts/analyze_recording_keyframes.py
Normal file
@ -0,0 +1,376 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Analyze keyframe and timestamp structure of Frigate recording segments.
|
||||||
|
|
||||||
|
This is a diagnostic tool for investigating seek precision / GOP behavior on
|
||||||
|
recorded segments. It does not modify anything.
|
||||||
|
|
||||||
|
ffprobe is only available inside the Frigate container, at
|
||||||
|
/usr/lib/ffmpeg/$DEFAULT_FFMPEG_VERSION/bin/ffprobe
|
||||||
|
This script auto-resolves that path from the DEFAULT_FFMPEG_VERSION env var
|
||||||
|
(or falls back to scanning /usr/lib/ffmpeg/*/bin/ffprobe). Pass --ffprobe to
|
||||||
|
override if needed.
|
||||||
|
|
||||||
|
All recording segments on the filesystem are in UTC. The --timestamp flag
|
||||||
|
expects a UTC Unix timestamp.
|
||||||
|
|
||||||
|
Typical use:
|
||||||
|
# Inside the Frigate container (or wherever recordings are mounted)
|
||||||
|
python3 analyze_recording_keyframes.py <camera_name>
|
||||||
|
|
||||||
|
# Analyze 10 most recent segments
|
||||||
|
python3 analyze_recording_keyframes.py <camera_name> --count 10
|
||||||
|
|
||||||
|
# Locate the segment that contains a specific UTC Unix timestamp and
|
||||||
|
# show it plus surrounding segments
|
||||||
|
python3 analyze_recording_keyframes.py <camera> --timestamp 1713471234.567
|
||||||
|
|
||||||
|
# Custom recordings directory
|
||||||
|
python3 analyze_recording_keyframes.py <camera> --recordings-dir /media/frigate/recordings
|
||||||
|
|
||||||
|
# Override the ffprobe path explicitly
|
||||||
|
python3 analyze_recording_keyframes.py <camera> --ffprobe /usr/lib/ffmpeg/7.0/bin/ffprobe
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from statistics import mean, median, stdev
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_ffprobe_path(override: str | None) -> str:
|
||||||
|
"""Resolve the ffprobe binary path.
|
||||||
|
|
||||||
|
Inside the Frigate container, ffprobe lives at
|
||||||
|
/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe — the exact version
|
||||||
|
depends on the image build and is exposed as an env var.
|
||||||
|
"""
|
||||||
|
if override:
|
||||||
|
return override
|
||||||
|
version = os.environ.get("DEFAULT_FFMPEG_VERSION", "")
|
||||||
|
if version:
|
||||||
|
path = f"/usr/lib/ffmpeg/{version}/bin/ffprobe"
|
||||||
|
if Path(path).is_file():
|
||||||
|
return path
|
||||||
|
# Fall back to scanning the Frigate ffmpeg install root.
|
||||||
|
for candidate in sorted(Path("/usr/lib/ffmpeg").glob("*/bin/ffprobe")):
|
||||||
|
if candidate.is_file():
|
||||||
|
return str(candidate)
|
||||||
|
print(
|
||||||
|
"Could not locate ffprobe. Pass --ffprobe <path> or set "
|
||||||
|
"DEFAULT_FFMPEG_VERSION.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def find_recent_segments(recordings_dir: Path, camera: str, count: int) -> list[Path]:
|
||||||
|
"""Return the N most recent .mp4 segments for the given camera.
|
||||||
|
|
||||||
|
Expected layout: <recordings_dir>/<YYYY-MM-DD>/<HH>/<camera>/<MM>.<SS>.mp4
|
||||||
|
"""
|
||||||
|
pattern = f"*/*/{camera}/*.mp4"
|
||||||
|
segments = sorted(recordings_dir.glob(pattern))
|
||||||
|
return segments[-count:]
|
||||||
|
|
||||||
|
|
||||||
|
def find_segments_near_timestamp(
|
||||||
|
recordings_dir: Path, camera: str, target_ts: float, count: int
|
||||||
|
) -> tuple[list[Path], Path | None]:
|
||||||
|
"""Return `count` segments centered on the one containing `target_ts`.
|
||||||
|
|
||||||
|
Also returns the specific segment that should contain the timestamp, so
|
||||||
|
callers can highlight it in output.
|
||||||
|
"""
|
||||||
|
pattern = f"*/*/{camera}/*.mp4"
|
||||||
|
with_ts: list[tuple[float, Path]] = []
|
||||||
|
for seg in sorted(recordings_dir.glob(pattern)):
|
||||||
|
ts = filename_to_timestamp(seg)
|
||||||
|
if ts is not None:
|
||||||
|
with_ts.append((ts, seg))
|
||||||
|
|
||||||
|
if not with_ts:
|
||||||
|
return [], None
|
||||||
|
|
||||||
|
# Largest filename_ts that is <= target_ts — that's the segment that
|
||||||
|
# should contain the timestamp (Frigate catalogs segments by filename).
|
||||||
|
target_idx = -1
|
||||||
|
for i, (ts, _) in enumerate(with_ts):
|
||||||
|
if ts <= target_ts:
|
||||||
|
target_idx = i
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
if target_idx < 0:
|
||||||
|
# target_ts is before the earliest segment we have — just return the
|
||||||
|
# first `count` segments so the user can see what's available.
|
||||||
|
window = with_ts[:count]
|
||||||
|
return [seg for _, seg in window], None
|
||||||
|
|
||||||
|
half = count // 2
|
||||||
|
start = max(0, target_idx - half)
|
||||||
|
end = min(len(with_ts), start + count)
|
||||||
|
start = max(0, end - count)
|
||||||
|
|
||||||
|
window = with_ts[start:end]
|
||||||
|
return [seg for _, seg in window], with_ts[target_idx][1]
|
||||||
|
|
||||||
|
|
||||||
|
def filename_to_timestamp(segment: Path) -> float | None:
|
||||||
|
"""Parse the wall-clock time from Frigate's segment path layout."""
|
||||||
|
try:
|
||||||
|
date = segment.parent.parent.parent.name # YYYY-MM-DD
|
||||||
|
hour = segment.parent.parent.name # HH
|
||||||
|
mm_ss = segment.stem # MM.SS
|
||||||
|
minute, second = mm_ss.split(".")
|
||||||
|
dt = datetime.datetime.strptime(
|
||||||
|
f"{date} {hour}:{minute}:{second}",
|
||||||
|
"%Y-%m-%d %H:%M:%S",
|
||||||
|
).replace(tzinfo=datetime.timezone.utc)
|
||||||
|
return dt.timestamp()
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run_ffprobe(ffprobe: str, args: list[str]) -> dict:
|
||||||
|
"""Run ffprobe and return parsed JSON, or empty dict on failure."""
|
||||||
|
result = subprocess.run(
|
||||||
|
[ffprobe, "-v", "error", *args, "-of", "json"],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
if result.returncode != 0:
|
||||||
|
print(f" ffprobe error: {result.stderr.strip()}", file=sys.stderr)
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
return json.loads(result.stdout)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_format_info(ffprobe: str, segment: Path) -> tuple[dict, dict]:
|
||||||
|
"""Return (format_dict, stream_dict) for the first video stream."""
|
||||||
|
data = run_ffprobe(
|
||||||
|
ffprobe,
|
||||||
|
[
|
||||||
|
"-show_entries",
|
||||||
|
"format=duration,start_time",
|
||||||
|
"-show_entries",
|
||||||
|
"stream=codec_name,profile,r_frame_rate,width,height",
|
||||||
|
"-select_streams",
|
||||||
|
"v:0",
|
||||||
|
str(segment),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
fmt = data.get("format", {})
|
||||||
|
streams = data.get("streams") or [{}]
|
||||||
|
return fmt, streams[0]
|
||||||
|
|
||||||
|
|
||||||
|
def get_video_packets(ffprobe: str, segment: Path) -> list[dict]:
|
||||||
|
"""Return video packets with pts_time and flags."""
|
||||||
|
data = run_ffprobe(
|
||||||
|
ffprobe,
|
||||||
|
[
|
||||||
|
"-select_streams",
|
||||||
|
"v",
|
||||||
|
"-show_entries",
|
||||||
|
"packet=pts_time,dts_time,flags",
|
||||||
|
str(segment),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
return data.get("packets", [])
|
||||||
|
|
||||||
|
|
||||||
|
def analyze(ffprobe: str, segment: Path, highlight: bool = False) -> None:
|
||||||
|
marker = " <-- contains target timestamp" if highlight else ""
|
||||||
|
print(f"\n=== {segment} ==={marker}")
|
||||||
|
|
||||||
|
fmt, stream = get_format_info(ffprobe, segment)
|
||||||
|
duration = float(fmt.get("duration", 0) or 0)
|
||||||
|
start_time = float(fmt.get("start_time", 0) or 0)
|
||||||
|
codec = stream.get("codec_name", "?")
|
||||||
|
profile = stream.get("profile", "?")
|
||||||
|
width = stream.get("width", "?")
|
||||||
|
height = stream.get("height", "?")
|
||||||
|
fps = stream.get("r_frame_rate", "?/1")
|
||||||
|
|
||||||
|
filename_ts = filename_to_timestamp(segment)
|
||||||
|
filename_iso = (
|
||||||
|
datetime.datetime.fromtimestamp(
|
||||||
|
filename_ts, tz=datetime.timezone.utc
|
||||||
|
).isoformat()
|
||||||
|
if filename_ts is not None
|
||||||
|
else "?"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f" Codec: {codec} ({profile}) {width}x{height} {fps}")
|
||||||
|
print(f" Filename time: {filename_ts} ({filename_iso})")
|
||||||
|
print(f" Format duration: {duration:.3f}s")
|
||||||
|
print(f" Format start: {start_time:.3f}s (PTS offset of first packet)")
|
||||||
|
|
||||||
|
packets = get_video_packets(ffprobe, segment)
|
||||||
|
if not packets:
|
||||||
|
print(" (no video packets)")
|
||||||
|
return
|
||||||
|
|
||||||
|
keyframe_times: list[float] = []
|
||||||
|
first_pts: float | None = None
|
||||||
|
last_pts: float | None = None
|
||||||
|
|
||||||
|
for pkt in packets:
|
||||||
|
pts_str = pkt.get("pts_time")
|
||||||
|
if pts_str is None or pts_str == "N/A":
|
||||||
|
continue
|
||||||
|
pts = float(pts_str)
|
||||||
|
if first_pts is None:
|
||||||
|
first_pts = pts
|
||||||
|
last_pts = pts
|
||||||
|
if "K" in pkt.get("flags", ""):
|
||||||
|
keyframe_times.append(pts)
|
||||||
|
|
||||||
|
total_packets = len(packets)
|
||||||
|
kf_count = len(keyframe_times)
|
||||||
|
|
||||||
|
print(f" Video packets: {total_packets}")
|
||||||
|
print(f" Keyframes: {kf_count}")
|
||||||
|
if first_pts is not None and last_pts is not None:
|
||||||
|
print(
|
||||||
|
f" Packet PTS: first={first_pts:.3f}s last={last_pts:.3f}s "
|
||||||
|
f"span={last_pts - first_pts:.3f}s"
|
||||||
|
)
|
||||||
|
|
||||||
|
if keyframe_times:
|
||||||
|
print(
|
||||||
|
f" Keyframe PTS: first={keyframe_times[0]:.3f}s "
|
||||||
|
f"last={keyframe_times[-1]:.3f}s"
|
||||||
|
)
|
||||||
|
formatted = ", ".join(f"{t:.3f}" for t in keyframe_times)
|
||||||
|
print(f" Keyframe times: [{formatted}]")
|
||||||
|
|
||||||
|
if len(keyframe_times) >= 2:
|
||||||
|
gaps = [b - a for a, b in zip(keyframe_times, keyframe_times[1:])]
|
||||||
|
avg_fps_estimate = (
|
||||||
|
total_packets / (last_pts - first_pts)
|
||||||
|
if last_pts and first_pts is not None and last_pts > first_pts
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" GOP gaps (s): min={min(gaps):.3f} max={max(gaps):.3f} "
|
||||||
|
f"mean={mean(gaps):.3f} median={median(gaps):.3f}"
|
||||||
|
)
|
||||||
|
if len(gaps) > 1:
|
||||||
|
print(f" stdev={stdev(gaps):.3f}")
|
||||||
|
print(
|
||||||
|
f" Est. mean GOP: ~{mean(gaps) * avg_fps_estimate:.1f} frames"
|
||||||
|
if avg_fps_estimate
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
if max(gaps) > 5:
|
||||||
|
print(
|
||||||
|
" !! Max GOP > 5s — consistent with adaptive/smart codec "
|
||||||
|
"(even if 'Smart Codec' is off in the UI, some cameras still "
|
||||||
|
"produce irregular GOPs under specific encoder profiles)"
|
||||||
|
)
|
||||||
|
elif kf_count == 1:
|
||||||
|
print(" !! Only one keyframe in segment — very long GOP")
|
||||||
|
|
||||||
|
# Report how well filename time aligns with first-packet PTS.
|
||||||
|
# (Filename time is what Frigate uses as recording.start_time in the DB.)
|
||||||
|
if filename_ts is not None and first_pts is not None:
|
||||||
|
print(
|
||||||
|
f" Notes: first packet PTS is {first_pts:.3f}s into the file; "
|
||||||
|
f"Frigate treats filename time as PTS=0 for seek math."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=__doc__,
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument("camera", help="Camera name (matches the recordings subfolder)")
|
||||||
|
parser.add_argument(
|
||||||
|
"--count",
|
||||||
|
type=int,
|
||||||
|
default=5,
|
||||||
|
help="Number of most recent segments to analyze (default: 5)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--recordings-dir",
|
||||||
|
default="/media/frigate/recordings",
|
||||||
|
help="Path to the recordings directory (default: /media/frigate/recordings)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ffprobe",
|
||||||
|
default=None,
|
||||||
|
help=(
|
||||||
|
"Full path to the ffprobe binary. Defaults to the Frigate-bundled "
|
||||||
|
"binary at /usr/lib/ffmpeg/$DEFAULT_FFMPEG_VERSION/bin/ffprobe."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--timestamp",
|
||||||
|
type=float,
|
||||||
|
default=None,
|
||||||
|
help=(
|
||||||
|
"Unix timestamp (UTC seconds, decimals allowed) to locate. The "
|
||||||
|
"script finds the segment that should contain this time and "
|
||||||
|
"analyzes it plus surrounding segments (count controls the "
|
||||||
|
"window). All on-disk segments are stored in UTC, so pass a UTC "
|
||||||
|
"Unix timestamp."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
ffprobe = resolve_ffprobe_path(args.ffprobe)
|
||||||
|
|
||||||
|
recordings_dir = Path(args.recordings_dir)
|
||||||
|
if not recordings_dir.is_dir():
|
||||||
|
print(
|
||||||
|
f"Recordings directory not found: {recordings_dir}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
target_segment: Path | None = None
|
||||||
|
if args.timestamp is not None:
|
||||||
|
segments, target_segment = find_segments_near_timestamp(
|
||||||
|
recordings_dir, args.camera, args.timestamp, args.count
|
||||||
|
)
|
||||||
|
target_iso = datetime.datetime.fromtimestamp(
|
||||||
|
args.timestamp, tz=datetime.timezone.utc
|
||||||
|
).isoformat()
|
||||||
|
mode = f"around timestamp {args.timestamp} ({target_iso})"
|
||||||
|
else:
|
||||||
|
segments = find_recent_segments(recordings_dir, args.camera, args.count)
|
||||||
|
mode = "most recent"
|
||||||
|
|
||||||
|
if not segments:
|
||||||
|
print(
|
||||||
|
f"No segments found for camera '{args.camera}' under {recordings_dir}",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if args.timestamp is not None and target_segment is None:
|
||||||
|
print(
|
||||||
|
f"!! Target timestamp {args.timestamp} is before the earliest "
|
||||||
|
f"segment on disk; showing the earliest available segments instead.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"Analyzing {len(segments)} {mode} segment(s) for camera "
|
||||||
|
f"'{args.camera}' under {recordings_dir} (ffprobe: {ffprobe})"
|
||||||
|
)
|
||||||
|
for segment in segments:
|
||||||
|
analyze(ffprobe, segment, highlight=(segment == target_segment))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
783
testing-scripts/face_dataset.py
Normal file
783
testing-scripts/face_dataset.py
Normal file
@ -0,0 +1,783 @@
|
|||||||
|
"""
|
||||||
|
Face recognition investigation script.
|
||||||
|
|
||||||
|
Standalone replica of Frigate's ArcFace pipeline (see
|
||||||
|
frigate/data_processing/common/face/model.py and
|
||||||
|
frigate/embeddings/onnx/face_embedding.py) for analyzing a face collection
|
||||||
|
outside the running service. Useful for:
|
||||||
|
|
||||||
|
- Diagnosing why a person's collection produces false positives
|
||||||
|
- Finding outlier/contaminating training images
|
||||||
|
- Inspecting the effect of the shipped vector-wise outlier filter
|
||||||
|
|
||||||
|
Layout:
|
||||||
|
- Core pipeline: LandmarkAligner, ArcFaceEmbedder, arcface_preprocess,
|
||||||
|
similarity_to_confidence, blur_reduction — all mirroring the production
|
||||||
|
code exactly
|
||||||
|
- Default run: summarize positive and negative sets against a baseline
|
||||||
|
trim_mean class representation
|
||||||
|
- Optional diagnostics (flags): vector-outlier filter behavior, degenerate
|
||||||
|
"tiny crop" embedding clustering, and multi-identity contamination
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 face_investigate.py \\
|
||||||
|
--positive <positive_folder> \\
|
||||||
|
--negative <negative_folder> \\
|
||||||
|
[--model-cache /path/to/model_cache] \\
|
||||||
|
[--vector-outlier] [--degenerate] [--contamination]
|
||||||
|
|
||||||
|
The positive folder should contain training images for a single identity
|
||||||
|
(same layout as FACE_DIR/<name>/*.webp). The negative folder should contain
|
||||||
|
runtime crops to test against — a mix of true matches and misfires.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Iterable
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import onnxruntime as ort
|
||||||
|
from PIL import Image
|
||||||
|
from scipy import stats
|
||||||
|
|
||||||
|
ARCFACE_INPUT_SIZE = 112
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Replicated Frigate pipeline
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _process_image_frigate(image: np.ndarray) -> Image.Image:
|
||||||
|
"""Mirror BaseEmbedding._process_image for an ndarray input.
|
||||||
|
|
||||||
|
NOTE: Frigate passes the output of `cv2.imread` (BGR) directly in. PIL's
|
||||||
|
`Image.fromarray` does NOT reorder channels, so the embedder effectively
|
||||||
|
receives a BGR-ordered tensor. We replicate that faithfully here. (Tested
|
||||||
|
— swapping to RGB produces near-identical embeddings; this model is
|
||||||
|
robust to channel order.)
|
||||||
|
"""
|
||||||
|
return Image.fromarray(image)
|
||||||
|
|
||||||
|
|
||||||
|
def arcface_preprocess(image_bgr: np.ndarray) -> np.ndarray:
|
||||||
|
"""Mirror ArcfaceEmbedding._preprocess_inputs."""
|
||||||
|
pil = _process_image_frigate(image_bgr)
|
||||||
|
|
||||||
|
width, height = pil.size
|
||||||
|
if width != ARCFACE_INPUT_SIZE or height != ARCFACE_INPUT_SIZE:
|
||||||
|
if width > height:
|
||||||
|
new_height = int(((height / width) * ARCFACE_INPUT_SIZE) // 4 * 4)
|
||||||
|
pil = pil.resize((ARCFACE_INPUT_SIZE, new_height))
|
||||||
|
else:
|
||||||
|
new_width = int(((width / height) * ARCFACE_INPUT_SIZE) // 4 * 4)
|
||||||
|
pil = pil.resize((new_width, ARCFACE_INPUT_SIZE))
|
||||||
|
|
||||||
|
og = np.array(pil).astype(np.float32)
|
||||||
|
og_h, og_w, channels = og.shape
|
||||||
|
|
||||||
|
frame = np.zeros(
|
||||||
|
(ARCFACE_INPUT_SIZE, ARCFACE_INPUT_SIZE, channels), dtype=np.float32
|
||||||
|
)
|
||||||
|
x_center = (ARCFACE_INPUT_SIZE - og_w) // 2
|
||||||
|
y_center = (ARCFACE_INPUT_SIZE - og_h) // 2
|
||||||
|
frame[y_center : y_center + og_h, x_center : x_center + og_w] = og
|
||||||
|
|
||||||
|
frame = (frame / 127.5) - 1.0
|
||||||
|
frame = np.transpose(frame, (2, 0, 1))
|
||||||
|
frame = np.expand_dims(frame, axis=0)
|
||||||
|
return frame
|
||||||
|
|
||||||
|
|
||||||
|
class LandmarkAligner:
|
||||||
|
"""Mirror FaceRecognizer.align_face."""
|
||||||
|
|
||||||
|
def __init__(self, landmark_model_path: str):
|
||||||
|
if not os.path.exists(landmark_model_path):
|
||||||
|
raise FileNotFoundError(landmark_model_path)
|
||||||
|
self.detector = cv2.face.createFacemarkLBF()
|
||||||
|
self.detector.loadModel(landmark_model_path)
|
||||||
|
|
||||||
|
def align(
|
||||||
|
self, image: np.ndarray, out_w: int, out_h: int
|
||||||
|
) -> tuple[np.ndarray, dict]:
|
||||||
|
land_image = (
|
||||||
|
cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if image.ndim == 3 else image
|
||||||
|
)
|
||||||
|
_, lands = self.detector.fit(
|
||||||
|
land_image, np.array([(0, 0, land_image.shape[1], land_image.shape[0])])
|
||||||
|
)
|
||||||
|
landmarks = lands[0][0]
|
||||||
|
|
||||||
|
leftEyePts = landmarks[42:48]
|
||||||
|
rightEyePts = landmarks[36:42]
|
||||||
|
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
|
||||||
|
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
|
||||||
|
|
||||||
|
dY = rightEyeCenter[1] - leftEyeCenter[1]
|
||||||
|
dX = rightEyeCenter[0] - leftEyeCenter[0]
|
||||||
|
angle = np.degrees(np.arctan2(dY, dX)) - 180
|
||||||
|
dist = float(np.sqrt((dX**2) + (dY**2)))
|
||||||
|
|
||||||
|
desiredRightEyeX = 1.0 - 0.35
|
||||||
|
desiredDist = (desiredRightEyeX - 0.35) * out_w
|
||||||
|
scale = desiredDist / dist if dist > 0 else 1.0
|
||||||
|
|
||||||
|
eyesCenter = (
|
||||||
|
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
|
||||||
|
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
|
||||||
|
)
|
||||||
|
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
|
||||||
|
tX = out_w * 0.5
|
||||||
|
tY = out_h * 0.35
|
||||||
|
M[0, 2] += tX - eyesCenter[0]
|
||||||
|
M[1, 2] += tY - eyesCenter[1]
|
||||||
|
|
||||||
|
aligned = cv2.warpAffine(
|
||||||
|
image, M, (out_w, out_h), flags=cv2.INTER_CUBIC
|
||||||
|
)
|
||||||
|
info = dict(
|
||||||
|
angle=float(angle),
|
||||||
|
eye_dist_px=dist,
|
||||||
|
scale=float(scale),
|
||||||
|
landmarks=landmarks,
|
||||||
|
)
|
||||||
|
return aligned, info
|
||||||
|
|
||||||
|
|
||||||
|
class ArcFaceEmbedder:
|
||||||
|
def __init__(self, model_path: str):
|
||||||
|
self.session = ort.InferenceSession(
|
||||||
|
model_path, providers=["CPUExecutionProvider"]
|
||||||
|
)
|
||||||
|
self.input_name = self.session.get_inputs()[0].name
|
||||||
|
|
||||||
|
def embed(self, image_bgr: np.ndarray) -> np.ndarray:
|
||||||
|
tensor = arcface_preprocess(image_bgr)
|
||||||
|
out = self.session.run(None, {self.input_name: tensor})[0]
|
||||||
|
return out.squeeze()
|
||||||
|
|
||||||
|
|
||||||
|
def similarity_to_confidence(
|
||||||
|
cos_sim: float,
|
||||||
|
median: float = 0.3,
|
||||||
|
range_width: float = 0.6,
|
||||||
|
slope_factor: float = 12,
|
||||||
|
) -> float:
|
||||||
|
slope = slope_factor / range_width
|
||||||
|
return float(1.0 / (1.0 + np.exp(-slope * (cos_sim - median))))
|
||||||
|
|
||||||
|
|
||||||
|
def laplacian_variance(image: np.ndarray) -> float:
|
||||||
|
return float(cv2.Laplacian(image, cv2.CV_64F).var())
|
||||||
|
|
||||||
|
|
||||||
|
def blur_reduction(variance: float) -> float:
|
||||||
|
if variance < 120:
|
||||||
|
return 0.06
|
||||||
|
elif variance < 160:
|
||||||
|
return 0.04
|
||||||
|
elif variance < 200:
|
||||||
|
return 0.02
|
||||||
|
elif variance < 250:
|
||||||
|
return 0.01
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def cosine(a: np.ndarray, b: np.ndarray) -> float:
|
||||||
|
denom = np.linalg.norm(a) * np.linalg.norm(b)
|
||||||
|
if denom == 0:
|
||||||
|
return 0.0
|
||||||
|
return float(np.dot(a, b) / denom)
|
||||||
|
|
||||||
|
|
||||||
|
def l2(v: np.ndarray) -> np.ndarray:
|
||||||
|
return v / (np.linalg.norm(v) + 1e-9)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Sample loading
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FaceSample:
|
||||||
|
path: str
|
||||||
|
shape: tuple[int, int]
|
||||||
|
embedding: np.ndarray
|
||||||
|
blur_var: float
|
||||||
|
align_info: dict
|
||||||
|
|
||||||
|
|
||||||
|
def load_folder(
|
||||||
|
folder: str, aligner: LandmarkAligner, embedder: ArcFaceEmbedder
|
||||||
|
) -> list[FaceSample]:
|
||||||
|
samples: list[FaceSample] = []
|
||||||
|
names = sorted(os.listdir(folder))
|
||||||
|
for name in names:
|
||||||
|
if name.startswith("."):
|
||||||
|
continue
|
||||||
|
path = os.path.join(folder, name)
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
continue
|
||||||
|
img = cv2.imread(path)
|
||||||
|
if img is None:
|
||||||
|
print(f" [skip unreadable] {name}")
|
||||||
|
continue
|
||||||
|
aligned, info = aligner.align(img, img.shape[1], img.shape[0])
|
||||||
|
emb = embedder.embed(aligned)
|
||||||
|
samples.append(
|
||||||
|
FaceSample(
|
||||||
|
path=path,
|
||||||
|
shape=(img.shape[1], img.shape[0]),
|
||||||
|
embedding=emb,
|
||||||
|
blur_var=laplacian_variance(img),
|
||||||
|
align_info=info,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return samples
|
||||||
|
|
||||||
|
|
||||||
|
def trimmed_mean(embs: Iterable[np.ndarray], trim: float = 0.15) -> np.ndarray:
|
||||||
|
arr = np.stack(list(embs), axis=0)
|
||||||
|
return stats.trim_mean(arr, trim, axis=0)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Baseline analyses (always run)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def summarize_positive(samples: list[FaceSample], mean_emb: np.ndarray) -> None:
|
||||||
|
"""Summary of training set: per-sample cos to class mean, intra-class stats.
|
||||||
|
|
||||||
|
Outliers with cos far below the rest are likely degrading the mean —
|
||||||
|
they'd be the first candidates the shipped vector-outlier filter drops.
|
||||||
|
"""
|
||||||
|
print("\n" + "=" * 78)
|
||||||
|
print(f"POSITIVE SET ANALYSIS ({len(samples)} images)")
|
||||||
|
print("=" * 78)
|
||||||
|
|
||||||
|
rows = []
|
||||||
|
for s in samples:
|
||||||
|
cs = cosine(s.embedding, mean_emb)
|
||||||
|
conf = similarity_to_confidence(cs)
|
||||||
|
red = blur_reduction(s.blur_var)
|
||||||
|
rows.append(
|
||||||
|
dict(
|
||||||
|
name=os.path.basename(s.path),
|
||||||
|
shape=f"{s.shape[0]}x{s.shape[1]}",
|
||||||
|
eye_px=s.align_info["eye_dist_px"],
|
||||||
|
angle=s.align_info["angle"] + 180,
|
||||||
|
blur=s.blur_var,
|
||||||
|
cos=cs,
|
||||||
|
conf=conf,
|
||||||
|
red=red,
|
||||||
|
adj_conf=max(0.0, conf - red),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
rows.sort(key=lambda r: r["cos"])
|
||||||
|
sims = np.array([r["cos"] for r in rows])
|
||||||
|
print(
|
||||||
|
f"\nCosine-to-trimmed-mean: mean={sims.mean():.3f} std={sims.std():.3f} "
|
||||||
|
f"min={sims.min():.3f} max={sims.max():.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\n-- Worst matches (bottom 10, most likely hurting the mean) --")
|
||||||
|
print(
|
||||||
|
f"{'cos':>6} {'conf':>6} {'blur':>7} {'eyes':>6} "
|
||||||
|
f"{'angle':>6} {'shape':>9} name"
|
||||||
|
)
|
||||||
|
for r in rows[:10]:
|
||||||
|
print(
|
||||||
|
f"{r['cos']:6.3f} {r['conf']:6.3f} {r['blur']:7.1f} "
|
||||||
|
f"{r['eye_px']:6.1f} {r['angle']:6.1f} {r['shape']:>9} {r['name']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\n-- Best matches (top 5) --")
|
||||||
|
for r in rows[-5:][::-1]:
|
||||||
|
print(
|
||||||
|
f"{r['cos']:6.3f} {r['conf']:6.3f} {r['blur']:7.1f} "
|
||||||
|
f"{r['eye_px']:6.1f} {r['angle']:6.1f} {r['shape']:>9} {r['name']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pairwise analysis — flags embeddings poorly correlated with the rest
|
||||||
|
print("\n-- Pairwise intra-class similarity (mean cos vs. other positives) --")
|
||||||
|
embs = np.stack([s.embedding for s in samples], axis=0)
|
||||||
|
norms = embs / (np.linalg.norm(embs, axis=1, keepdims=True) + 1e-9)
|
||||||
|
sim_matrix = norms @ norms.T
|
||||||
|
np.fill_diagonal(sim_matrix, np.nan)
|
||||||
|
mean_pairwise = np.nanmean(sim_matrix, axis=1)
|
||||||
|
names = [os.path.basename(s.path) for s in samples]
|
||||||
|
ordered = sorted(zip(names, mean_pairwise), key=lambda t: t[1])
|
||||||
|
print(f"{'mean_cos':>9} name")
|
||||||
|
for nm, mp in ordered[:10]:
|
||||||
|
print(f"{mp:9.3f} {nm}")
|
||||||
|
print(f"\n overall mean pairwise cos: {np.nanmean(sim_matrix):.3f}")
|
||||||
|
print(f" median pairwise cos: {np.nanmedian(sim_matrix):.3f}")
|
||||||
|
|
||||||
|
|
||||||
|
def summarize_negative(
|
||||||
|
neg_samples: list[FaceSample],
|
||||||
|
mean_emb: np.ndarray,
|
||||||
|
pos_samples: list[FaceSample],
|
||||||
|
) -> None:
|
||||||
|
"""Score each negative against the class mean, then show its top-3
|
||||||
|
nearest positives. High-scoring negatives that match specific outlier
|
||||||
|
positives hint at training-set contamination.
|
||||||
|
"""
|
||||||
|
print("\n" + "=" * 78)
|
||||||
|
print(f"NEGATIVE SET ANALYSIS ({len(neg_samples)} images)")
|
||||||
|
print("=" * 78)
|
||||||
|
print(
|
||||||
|
f"\n{'cos':>6} {'conf':>6} {'red':>5} {'adj':>5} "
|
||||||
|
f"{'blur':>7} {'eyes':>6} {'shape':>9} name"
|
||||||
|
)
|
||||||
|
for s in neg_samples:
|
||||||
|
cs = cosine(s.embedding, mean_emb)
|
||||||
|
conf = similarity_to_confidence(cs)
|
||||||
|
red = blur_reduction(s.blur_var)
|
||||||
|
print(
|
||||||
|
f"{cs:6.3f} {conf:6.3f} {red:5.2f} {max(0, conf - red):5.2f} "
|
||||||
|
f"{s.blur_var:7.1f} {s.align_info['eye_dist_px']:6.1f} "
|
||||||
|
f"{s.shape[0]}x{s.shape[1]:<5} {os.path.basename(s.path)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\n-- For each negative, top-3 most similar positives --")
|
||||||
|
pos_embs = np.stack([p.embedding for p in pos_samples])
|
||||||
|
pos_norm = pos_embs / (np.linalg.norm(pos_embs, axis=1, keepdims=True) + 1e-9)
|
||||||
|
for s in neg_samples:
|
||||||
|
v = s.embedding / (np.linalg.norm(s.embedding) + 1e-9)
|
||||||
|
sims = pos_norm @ v
|
||||||
|
idx = np.argsort(-sims)[:3]
|
||||||
|
print(f"\n {os.path.basename(s.path)}:")
|
||||||
|
for i in idx:
|
||||||
|
print(
|
||||||
|
f" {sims[i]:6.3f} {os.path.basename(pos_samples[i].path)} "
|
||||||
|
f"blur={pos_samples[i].blur_var:.1f} "
|
||||||
|
f"eyes={pos_samples[i].align_info['eye_dist_px']:.1f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Optional diagnostics
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def vector_outlier_test(
|
||||||
|
pos: list[FaceSample], neg: list[FaceSample], base_trim: float = 0.15
|
||||||
|
) -> None:
|
||||||
|
"""Measure the shipped vector-wise outlier filter at various thresholds.
|
||||||
|
|
||||||
|
The production filter at `build_class_mean` in
|
||||||
|
frigate/data_processing/common/face/model.py uses T=0.30. This test
|
||||||
|
sweeps T so you can see which images would be dropped on a new collection
|
||||||
|
and how that affects the negative scores.
|
||||||
|
|
||||||
|
Algorithm: iteratively recompute trim_mean on the kept set, drop any
|
||||||
|
embedding with cos < T to that mean, repeat until converged. Floor at
|
||||||
|
50% of the collection to avoid collapse.
|
||||||
|
"""
|
||||||
|
print("\n" + "=" * 78)
|
||||||
|
print("VECTOR-WISE OUTLIER PRE-FILTER — layered on trim_mean(0.15)")
|
||||||
|
print("=" * 78)
|
||||||
|
|
||||||
|
all_embs = np.stack([s.embedding for s in pos])
|
||||||
|
|
||||||
|
def iterative_mean(
|
||||||
|
embs: np.ndarray,
|
||||||
|
threshold: float,
|
||||||
|
iters: int = 3,
|
||||||
|
min_keep_frac: float = 0.5,
|
||||||
|
) -> tuple[np.ndarray, np.ndarray]:
|
||||||
|
keep = np.ones(len(embs), dtype=bool)
|
||||||
|
floor = max(5, int(np.ceil(min_keep_frac * len(embs))))
|
||||||
|
for _ in range(iters):
|
||||||
|
m = stats.trim_mean(embs[keep], base_trim, axis=0)
|
||||||
|
m_norm = m / (np.linalg.norm(m) + 1e-9)
|
||||||
|
e_norms = embs / (np.linalg.norm(embs, axis=1, keepdims=True) + 1e-9)
|
||||||
|
cos_to_mean = e_norms @ m_norm
|
||||||
|
new_keep = cos_to_mean >= threshold
|
||||||
|
if new_keep.sum() < floor:
|
||||||
|
top_idx = np.argsort(-cos_to_mean)[:floor]
|
||||||
|
new_keep = np.zeros_like(new_keep)
|
||||||
|
new_keep[top_idx] = True
|
||||||
|
if np.array_equal(new_keep, keep):
|
||||||
|
break
|
||||||
|
keep = new_keep
|
||||||
|
final = stats.trim_mean(embs[keep], base_trim, axis=0)
|
||||||
|
return final, keep
|
||||||
|
|
||||||
|
provisional = stats.trim_mean(all_embs, base_trim, axis=0)
|
||||||
|
p_norm = provisional / (np.linalg.norm(provisional) + 1e-9)
|
||||||
|
e_norms_all = all_embs / (np.linalg.norm(all_embs, axis=1, keepdims=True) + 1e-9)
|
||||||
|
cos_to_prov = e_norms_all @ p_norm
|
||||||
|
print("\nDistribution of cos(positive, provisional trim_mean):")
|
||||||
|
print(
|
||||||
|
f" min={cos_to_prov.min():.3f} p10={np.percentile(cos_to_prov, 10):.3f} "
|
||||||
|
f"p25={np.percentile(cos_to_prov, 25):.3f} "
|
||||||
|
f"median={np.median(cos_to_prov):.3f} "
|
||||||
|
f"p75={np.percentile(cos_to_prov, 75):.3f} max={cos_to_prov.max():.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
baseline_mean = stats.trim_mean(all_embs, base_trim, axis=0)
|
||||||
|
baseline_pos = np.array([cosine(p.embedding, baseline_mean) for p in pos])
|
||||||
|
baseline_neg = (
|
||||||
|
np.array([cosine(n.embedding, baseline_mean) for n in neg])
|
||||||
|
if neg
|
||||||
|
else np.array([])
|
||||||
|
)
|
||||||
|
baseline_conf_neg = np.array(
|
||||||
|
[similarity_to_confidence(c) for c in baseline_neg]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"\nBaseline (trim_mean only, {len(pos)} images):"
|
||||||
|
f"\n pos cos min={baseline_pos.min():.3f} "
|
||||||
|
f"mean={baseline_pos.mean():.3f} max={baseline_pos.max():.3f}"
|
||||||
|
)
|
||||||
|
if len(neg):
|
||||||
|
print(
|
||||||
|
f" neg cos min={baseline_neg.min():.3f} "
|
||||||
|
f"mean={baseline_neg.mean():.3f} max={baseline_neg.max():.3f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" neg conf min={baseline_conf_neg.min():.3f} "
|
||||||
|
f"mean={baseline_conf_neg.mean():.3f} max={baseline_conf_neg.max():.3f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" margin (pos.min - neg.max): "
|
||||||
|
f"{baseline_pos.min() - baseline_neg.max():+.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nIterative (refine mean → drop vectors with cos<T → repeat):")
|
||||||
|
print(
|
||||||
|
f"\n{'T':>5} {'kept':>6} {'pos min':>7} {'pos mean':>8} "
|
||||||
|
f"{'neg max':>7} {'neg mean':>8} {'neg conf.max':>12} {'margin':>7}"
|
||||||
|
)
|
||||||
|
for T in [0.15, 0.20, 0.25, 0.28, 0.30, 0.33, 0.36, 0.40]:
|
||||||
|
mean, keep = iterative_mean(all_embs, T)
|
||||||
|
pos_sims = np.array([cosine(p.embedding, mean) for p in pos])
|
||||||
|
neg_sims = (
|
||||||
|
np.array([cosine(n.embedding, mean) for n in neg])
|
||||||
|
if neg
|
||||||
|
else np.array([])
|
||||||
|
)
|
||||||
|
neg_conf = np.array([similarity_to_confidence(c) for c in neg_sims])
|
||||||
|
margin = pos_sims.min() - (neg_sims.max() if len(neg_sims) else 0)
|
||||||
|
print(
|
||||||
|
f"{T:5.2f} {int(keep.sum()):>3}/{len(pos):<2} "
|
||||||
|
f"{pos_sims.min():7.3f} {pos_sims.mean():8.3f} "
|
||||||
|
f"{neg_sims.max() if len(neg_sims) else float('nan'):7.3f} "
|
||||||
|
f"{neg_sims.mean() if len(neg_sims) else float('nan'):8.3f} "
|
||||||
|
f"{neg_conf.max() if len(neg_conf) else float('nan'):12.3f} "
|
||||||
|
f"{margin:+7.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Show which images get dropped at the shipped threshold + neighbors
|
||||||
|
for T_show in (0.25, 0.30, 0.33):
|
||||||
|
_, keep = iterative_mean(all_embs, T_show)
|
||||||
|
print(
|
||||||
|
f"\nAt T={T_show}, the {int((~keep).sum())} dropped positives are:"
|
||||||
|
)
|
||||||
|
final_mean = stats.trim_mean(all_embs[keep], base_trim, axis=0)
|
||||||
|
m_n = final_mean / (np.linalg.norm(final_mean) + 1e-9)
|
||||||
|
for i, (p, k) in enumerate(zip(pos, keep)):
|
||||||
|
if not k:
|
||||||
|
e_n = p.embedding / (np.linalg.norm(p.embedding) + 1e-9)
|
||||||
|
cos_final = float(e_n @ m_n)
|
||||||
|
print(
|
||||||
|
f" cos_to_clean_mean={cos_final:6.3f} "
|
||||||
|
f"shape={p.shape[0]}x{p.shape[1]} "
|
||||||
|
f"eyes={p.align_info['eye_dist_px']:6.1f} "
|
||||||
|
f"blur={p.blur_var:7.1f} "
|
||||||
|
f"{os.path.basename(p.path)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def degenerate_embedding_test(
|
||||||
|
pos: list[FaceSample], neg: list[FaceSample]
|
||||||
|
) -> None:
|
||||||
|
"""Detect whether negatives and low-quality positives share a degenerate
|
||||||
|
'tiny/noisy face' region of the embedding space.
|
||||||
|
|
||||||
|
Signal: if neg-to-neg cos is higher than pos-to-pos cos, the negatives
|
||||||
|
aren't really per-identity embeddings — they're dominated by upsample /
|
||||||
|
low-resolution artifacts that all map to a similar corner of embedding
|
||||||
|
space regardless of who the face belongs to.
|
||||||
|
|
||||||
|
Also rebuilds the mean using only high-intra-similarity positives to
|
||||||
|
show whether a cleaner training set separates the negatives.
|
||||||
|
"""
|
||||||
|
print("\n" + "=" * 78)
|
||||||
|
print("DEGENERATE-EMBEDDING TEST")
|
||||||
|
print("=" * 78)
|
||||||
|
|
||||||
|
pos_embs = np.stack([l2(s.embedding) for s in pos])
|
||||||
|
neg_embs = np.stack([l2(s.embedding) for s in neg])
|
||||||
|
|
||||||
|
nn = neg_embs @ neg_embs.T
|
||||||
|
np.fill_diagonal(nn, np.nan)
|
||||||
|
pp = pos_embs @ pos_embs.T
|
||||||
|
np.fill_diagonal(pp, np.nan)
|
||||||
|
pn = pos_embs @ neg_embs.T
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"\n neg<->neg mean cos : {np.nanmean(nn):.3f} "
|
||||||
|
f"(how tightly negatives cluster together)"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" pos<->pos mean cos : {np.nanmean(pp):.3f} "
|
||||||
|
f"(how tightly positives cluster)"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" pos<->neg mean cos : {pn.mean():.3f} "
|
||||||
|
f"(cross-class — should be low for a clean class)"
|
||||||
|
)
|
||||||
|
if np.nanmean(nn) > np.nanmean(pp):
|
||||||
|
print(
|
||||||
|
"\n >> neg<->neg > pos<->pos: negatives cluster more tightly than\n"
|
||||||
|
" positives. This is the degenerate-embedding signature —\n"
|
||||||
|
" upsampled tiny crops share a common 'face-like blob' region\n"
|
||||||
|
" regardless of identity."
|
||||||
|
)
|
||||||
|
|
||||||
|
mean_intra = np.nanmean(pp, axis=1)
|
||||||
|
for thresh in (0.30, 0.33, 0.36):
|
||||||
|
keep = mean_intra >= thresh
|
||||||
|
if keep.sum() < 5:
|
||||||
|
continue
|
||||||
|
clean_embs = [pos[i].embedding for i in range(len(pos)) if keep[i]]
|
||||||
|
clean_mean = stats.trim_mean(np.stack(clean_embs), 0.15, axis=0)
|
||||||
|
neg_scores = np.array([cosine(n.embedding, clean_mean) for n in neg])
|
||||||
|
neg_confs = np.array([similarity_to_confidence(c) for c in neg_scores])
|
||||||
|
pos_scores = np.array(
|
||||||
|
[
|
||||||
|
cosine(pos[i].embedding, clean_mean)
|
||||||
|
for i in range(len(pos))
|
||||||
|
if keep[i]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"\n mean_intra >= {thresh}: keeping {int(keep.sum())}/{len(pos)} positives"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" pos cos vs mean : min={pos_scores.min():.3f} "
|
||||||
|
f"mean={pos_scores.mean():.3f} max={pos_scores.max():.3f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" neg cos vs mean : min={neg_scores.min():.3f} "
|
||||||
|
f"mean={neg_scores.mean():.3f} max={neg_scores.max():.3f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" neg conf : min={neg_confs.min():.3f} "
|
||||||
|
f"mean={neg_confs.mean():.3f} max={neg_confs.max():.3f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" margin (pos.min - neg.max): "
|
||||||
|
f"{pos_scores.min() - neg_scores.max():+.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def contamination_analysis(
|
||||||
|
pos: list[FaceSample], neg: list[FaceSample]
|
||||||
|
) -> None:
|
||||||
|
"""Check whether the positive collection contains a second identity.
|
||||||
|
|
||||||
|
Two signals:
|
||||||
|
(a) Per-positive: if an image is closer to at least one negative than
|
||||||
|
to the rest of the positive class, it's likely a mislabeled face.
|
||||||
|
(b) 2-means split of the positive embeddings: if one cluster center
|
||||||
|
lands close to the negative mean, that cluster is a contaminating
|
||||||
|
sub-identity that's pulling the class mean toward the negatives.
|
||||||
|
"""
|
||||||
|
print("\n" + "=" * 78)
|
||||||
|
print("CONTAMINATION ANALYSIS")
|
||||||
|
print("=" * 78)
|
||||||
|
|
||||||
|
pos_embs = np.stack([l2(s.embedding) for s in pos])
|
||||||
|
neg_embs = np.stack([l2(s.embedding) for s in neg])
|
||||||
|
pos_names = [os.path.basename(s.path) for s in pos]
|
||||||
|
|
||||||
|
pos_pos = pos_embs @ pos_embs.T
|
||||||
|
np.fill_diagonal(pos_pos, np.nan)
|
||||||
|
pos_neg = pos_embs @ neg_embs.T
|
||||||
|
|
||||||
|
mean_intra = np.nanmean(pos_pos, axis=1)
|
||||||
|
max_to_neg = pos_neg.max(axis=1)
|
||||||
|
mean_to_neg = pos_neg.mean(axis=1)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"\nPositives closer to a negative than to their own class avg"
|
||||||
|
"\n(these are candidates for mislabeled images):"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"\n{'max_neg':>7} {'mean_neg':>8} {'mean_intra':>10} "
|
||||||
|
f"{'delta':>6} name"
|
||||||
|
)
|
||||||
|
rows = list(zip(pos_names, max_to_neg, mean_to_neg, mean_intra))
|
||||||
|
rows.sort(key=lambda r: -(r[1] - r[3]))
|
||||||
|
for nm, mxn, mnn, mi in rows[:15]:
|
||||||
|
delta = mxn - mi
|
||||||
|
marker = " <<" if delta > 0 else ""
|
||||||
|
print(f"{mxn:7.3f} {mnn:8.3f} {mi:10.3f} {delta:6.3f} {nm}{marker}")
|
||||||
|
|
||||||
|
# 2-means in cosine space (no sklearn dependency).
|
||||||
|
print("\n2-means split of positive embeddings (cosine space):")
|
||||||
|
rng = np.random.default_rng(0)
|
||||||
|
best = None
|
||||||
|
for _ in range(5):
|
||||||
|
idx = rng.choice(len(pos_embs), 2, replace=False)
|
||||||
|
centers = pos_embs[idx].copy()
|
||||||
|
for _ in range(50):
|
||||||
|
sims = pos_embs @ centers.T
|
||||||
|
labels = np.argmax(sims, axis=1)
|
||||||
|
new_centers = np.stack(
|
||||||
|
[
|
||||||
|
l2(pos_embs[labels == k].mean(axis=0))
|
||||||
|
if np.any(labels == k)
|
||||||
|
else centers[k]
|
||||||
|
for k in range(2)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
if np.allclose(new_centers, centers):
|
||||||
|
break
|
||||||
|
centers = new_centers
|
||||||
|
tight = float(np.mean([sims[i, labels[i]] for i in range(len(labels))]))
|
||||||
|
if best is None or tight > best[0]:
|
||||||
|
best = (tight, labels.copy(), centers.copy())
|
||||||
|
|
||||||
|
_, labels, centers = best
|
||||||
|
sizes = [int((labels == k).sum()) for k in range(2)]
|
||||||
|
neg_mean = l2(neg_embs.mean(axis=0))
|
||||||
|
print(
|
||||||
|
f" cluster 0: size={sizes[0]:>2} "
|
||||||
|
f"center<->other_center_cos={float(centers[0] @ centers[1]):.3f} "
|
||||||
|
f"center<->neg_mean_cos={float(centers[0] @ neg_mean):.3f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f" cluster 1: size={sizes[1]:>2} "
|
||||||
|
f"center<->neg_mean_cos={float(centers[1] @ neg_mean):.3f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
neg_aligned = 0 if centers[0] @ neg_mean > centers[1] @ neg_mean else 1
|
||||||
|
print(
|
||||||
|
f"\n cluster {neg_aligned} is more similar to the negatives — "
|
||||||
|
f"its members are the contamination candidates:"
|
||||||
|
)
|
||||||
|
for i, lbl in enumerate(labels):
|
||||||
|
if lbl == neg_aligned:
|
||||||
|
print(
|
||||||
|
f" max_to_neg={max_to_neg[i]:.3f} "
|
||||||
|
f"mean_intra={mean_intra[i]:.3f} {pos_names[i]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
keep_mask = labels != neg_aligned
|
||||||
|
if keep_mask.sum() >= 3:
|
||||||
|
clean_embs = [pos[i].embedding for i in range(len(pos)) if keep_mask[i]]
|
||||||
|
clean_mean = stats.trim_mean(np.stack(clean_embs), 0.15, axis=0)
|
||||||
|
print(
|
||||||
|
f"\n Rebuilding class mean from the OTHER cluster "
|
||||||
|
f"({keep_mask.sum()} images):"
|
||||||
|
)
|
||||||
|
print(f" {'cos':>6} {'conf':>6} name")
|
||||||
|
for n in neg:
|
||||||
|
cs = cosine(n.embedding, clean_mean)
|
||||||
|
cf = similarity_to_confidence(cs)
|
||||||
|
print(f" {cs:6.3f} {cf:6.3f} {os.path.basename(n.path)}")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# main
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
ap = argparse.ArgumentParser(
|
||||||
|
description="Analyze a face recognition collection outside Frigate.",
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
epilog=__doc__,
|
||||||
|
)
|
||||||
|
ap.add_argument("--positive", required=True, help="Training folder for one identity")
|
||||||
|
ap.add_argument(
|
||||||
|
"--negative",
|
||||||
|
default=None,
|
||||||
|
help="Runtime-crop folder to score against (optional)",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--model-cache",
|
||||||
|
default="/config/model_cache",
|
||||||
|
help="Directory containing facedet/arcface.onnx and facedet/landmarkdet.yaml",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--trim",
|
||||||
|
type=float,
|
||||||
|
default=0.15,
|
||||||
|
help="trim_mean proportion (Frigate uses 0.15)",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--vector-outlier",
|
||||||
|
action="store_true",
|
||||||
|
help="Sweep the vector-wise outlier filter threshold",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--degenerate",
|
||||||
|
action="store_true",
|
||||||
|
help="Test whether negatives share a degenerate embedding region",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--contamination",
|
||||||
|
action="store_true",
|
||||||
|
help="Check whether the positive folder contains a second identity",
|
||||||
|
)
|
||||||
|
args = ap.parse_args()
|
||||||
|
|
||||||
|
arcface_path = os.path.join(args.model_cache, "facedet", "arcface.onnx")
|
||||||
|
landmark_path = os.path.join(args.model_cache, "facedet", "landmarkdet.yaml")
|
||||||
|
for p in (arcface_path, landmark_path):
|
||||||
|
if not os.path.exists(p):
|
||||||
|
print(f"ERROR: model file not found: {p}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print(f"Loading ArcFace from {arcface_path}")
|
||||||
|
embedder = ArcFaceEmbedder(arcface_path)
|
||||||
|
print(f"Loading landmark model from {landmark_path}")
|
||||||
|
aligner = LandmarkAligner(landmark_path)
|
||||||
|
|
||||||
|
print(f"\nLoading positives from {args.positive} ...")
|
||||||
|
pos = load_folder(args.positive, aligner, embedder)
|
||||||
|
print(f" {len(pos)} positives loaded")
|
||||||
|
|
||||||
|
neg: list[FaceSample] = []
|
||||||
|
if args.negative:
|
||||||
|
print(f"\nLoading negatives from {args.negative} ...")
|
||||||
|
neg = load_folder(args.negative, aligner, embedder)
|
||||||
|
print(f" {len(neg)} negatives loaded")
|
||||||
|
|
||||||
|
if not pos:
|
||||||
|
print("no positive samples — aborting")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
mean_emb = trimmed_mean([s.embedding for s in pos], trim=args.trim)
|
||||||
|
summarize_positive(pos, mean_emb)
|
||||||
|
if neg:
|
||||||
|
summarize_negative(neg, mean_emb, pos)
|
||||||
|
|
||||||
|
if args.vector_outlier:
|
||||||
|
vector_outlier_test(pos, neg, args.trim)
|
||||||
|
if args.degenerate and neg:
|
||||||
|
degenerate_embedding_test(pos, neg)
|
||||||
|
if args.contamination and neg:
|
||||||
|
contamination_analysis(pos, neg)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
@ -1,4 +1,114 @@
|
|||||||
import { test, expect } from "../fixtures/frigate-test";
|
import { test, expect } from "../fixtures/frigate-test";
|
||||||
|
import {
|
||||||
|
expectBodyInteractive,
|
||||||
|
waitForBodyInteractive,
|
||||||
|
} from "../helpers/overlay-interaction";
|
||||||
|
|
||||||
|
test.describe("Export Page - Delete race @high", () => {
|
||||||
|
// Empirical guard for radix-ui/primitives#3445: when a modal DropdownMenu
|
||||||
|
// opens an AlertDialog and the AlertDialog's confirm action causes the
|
||||||
|
// parent's optimistic cache update to unmount the card, we want to know
|
||||||
|
// whether the deduped react-dismissable-layer (1.1.11) handles the
|
||||||
|
// pointer-events stack cleanup or whether `modal={false}` is still
|
||||||
|
// required on the DropdownMenu. The classic "canonical" pattern, distinct
|
||||||
|
// from the FaceSelectionDialog auto-unmount race already covered by
|
||||||
|
// face-library.spec.ts.
|
||||||
|
test("deleting an export via dropdown→alert→confirm leaves body interactive", async ({
|
||||||
|
frigateApp,
|
||||||
|
}) => {
|
||||||
|
if (frigateApp.isMobile) {
|
||||||
|
test.skip();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const initialExports = [
|
||||||
|
{
|
||||||
|
id: "export-race-001",
|
||||||
|
camera: "front_door",
|
||||||
|
name: "Race - Test Export",
|
||||||
|
date: 1775490731.3863528,
|
||||||
|
video_path: "/exports/export-race-001.mp4",
|
||||||
|
thumb_path: "/exports/export-race-001-thumb.jpg",
|
||||||
|
in_progress: false,
|
||||||
|
export_case_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
let deleted = false;
|
||||||
|
|
||||||
|
await frigateApp.installDefaults({
|
||||||
|
exports: initialExports,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Flip /api/export to empty after the delete POST is observed so the
|
||||||
|
// page's SWR mutate sees the export gone.
|
||||||
|
await frigateApp.page.route("**/api/export**", async (route) => {
|
||||||
|
const payload = deleted ? [] : initialExports;
|
||||||
|
await route.fulfill({ json: payload });
|
||||||
|
});
|
||||||
|
await frigateApp.page.route("**/api/exports/delete", async (route) => {
|
||||||
|
deleted = true;
|
||||||
|
const delayMs = Number(
|
||||||
|
(globalThis as { process?: { env?: Record<string, string> } }).process
|
||||||
|
?.env?.DELETE_DELAY_MS ?? "100",
|
||||||
|
);
|
||||||
|
if (delayMs > 0) {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
||||||
|
}
|
||||||
|
await route.fulfill({ json: { success: true } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await frigateApp.goto("/export");
|
||||||
|
await expect(frigateApp.page.getByText("Race - Test Export")).toBeVisible({
|
||||||
|
timeout: 5_000,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Open the kebab menu on the export card. The kebab uses the
|
||||||
|
// (misleading) aria-label "Edit name" from ExportCard's source — it
|
||||||
|
// wraps the FiMoreVertical icon. There is exactly one such button on
|
||||||
|
// the page once we have a single export rendered.
|
||||||
|
const kebab = frigateApp.page
|
||||||
|
.getByRole("button", { name: /edit name/i })
|
||||||
|
.first();
|
||||||
|
await expect(kebab).toBeVisible({ timeout: 5_000 });
|
||||||
|
await kebab.click();
|
||||||
|
|
||||||
|
const menu = frigateApp.page
|
||||||
|
.locator('[role="menu"], [data-radix-menu-content]')
|
||||||
|
.first();
|
||||||
|
await expect(menu).toBeVisible({ timeout: 3_000 });
|
||||||
|
|
||||||
|
// Delete Export
|
||||||
|
await menu
|
||||||
|
.getByRole("menuitem", { name: /delete export/i })
|
||||||
|
.first()
|
||||||
|
.click();
|
||||||
|
|
||||||
|
// AlertDialog at page level. The confirm button's accessible name is
|
||||||
|
// "Delete Export" (its aria-label), the visible text is just "Delete".
|
||||||
|
const confirm = frigateApp.page.getByRole("alertdialog");
|
||||||
|
await expect(confirm).toBeVisible({ timeout: 3_000 });
|
||||||
|
await confirm
|
||||||
|
.getByRole("button", { name: /^delete export$/i })
|
||||||
|
.first()
|
||||||
|
.click();
|
||||||
|
|
||||||
|
// The card optimistically disappears, the dialog closes, and body
|
||||||
|
// pointer-events must come unstuck.
|
||||||
|
await expect(
|
||||||
|
frigateApp.page.getByText("Race - Test Export"),
|
||||||
|
).not.toBeVisible({ timeout: 5_000 });
|
||||||
|
await waitForBodyInteractive(frigateApp.page, 5_000);
|
||||||
|
await expectBodyInteractive(frigateApp.page);
|
||||||
|
|
||||||
|
// Sanity: another page-level button still responds.
|
||||||
|
const newCase = frigateApp.page.getByRole("button", { name: /new case/i });
|
||||||
|
await expect(newCase).toBeVisible({ timeout: 3_000 });
|
||||||
|
await newCase.click();
|
||||||
|
await expect(
|
||||||
|
frigateApp.page.getByRole("dialog").filter({ hasText: /create case/i }),
|
||||||
|
).toBeVisible({ timeout: 3_000 });
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
test.describe("Export Page - Overview @high", () => {
|
test.describe("Export Page - Overview @high", () => {
|
||||||
test("renders uncategorized exports and case cards from mock data", async ({
|
test("renders uncategorized exports and case cards from mock data", async ({
|
||||||
|
|||||||
@ -358,6 +358,158 @@ test.describe("FaceSelectionDialog @high", () => {
|
|||||||
await frigateApp.page.keyboard.press("Escape");
|
await frigateApp.page.keyboard.press("Escape");
|
||||||
await expect(menu).not.toBeVisible({ timeout: 3_000 });
|
await expect(menu).not.toBeVisible({ timeout: 3_000 });
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("classifying the last image in a group leaves body interactive", async ({
|
||||||
|
frigateApp,
|
||||||
|
}) => {
|
||||||
|
// Regression guard for the stuck body pointer-events bug when the
|
||||||
|
// last image in a grouped-recognition detail Dialog is classified.
|
||||||
|
// Tracked upstream at radix-ui/primitives#3445.
|
||||||
|
//
|
||||||
|
// Root cause: when the user clicks a FaceSelectionDialog menu item,
|
||||||
|
// the modal DropdownMenu enters its exit animation (Radix's Presence
|
||||||
|
// keeps it in the DOM with data-state="closed" until animationend).
|
||||||
|
// While that is in flight the classify axios resolves, SWR removes
|
||||||
|
// the image from /api/faces, the parent's map no longer renders the
|
||||||
|
// grouped card, and React unmounts the subtree — including the still-
|
||||||
|
// animating DropdownMenu's Presence container. DismissableLayer's
|
||||||
|
// shared modal-layer stack can't reconcile the interrupted exit, so
|
||||||
|
// the `body { pointer-events: none }` entry it put on mount is never
|
||||||
|
// popped and the rest of the UI becomes unclickable.
|
||||||
|
//
|
||||||
|
// The fix is `modal={false}` on the FaceSelectionDialog's
|
||||||
|
// DropdownMenu (desktop path only). With modal=false the DropdownMenu
|
||||||
|
// never puts an entry on DismissableLayer's body-pointer-events stack
|
||||||
|
// in the first place, so there's nothing to leak when its Presence is
|
||||||
|
// torn down mid-animation. The Radix-community-documented workaround
|
||||||
|
// for #3445.
|
||||||
|
//
|
||||||
|
// The bug only reproduces when the mock resolves fast enough that
|
||||||
|
// the parent unmounts before the dropdown's exit animation finishes.
|
||||||
|
// Measured window via a 3x sweep on the pre-fix build: 0–200 ms
|
||||||
|
// triggers it; 300 ms+ no longer reproduces. Production LAN networks
|
||||||
|
// sit comfortably inside the bad window, while `npm run dev` seems
|
||||||
|
// to mask it via React StrictMode's double-effect scheduling.
|
||||||
|
const EVENT_ID = "1775487131.3863528-race";
|
||||||
|
const initialFaces = withGroupedTrainingAttempt(basicFacesMock(), {
|
||||||
|
eventId: EVENT_ID,
|
||||||
|
attempts: [
|
||||||
|
{ timestamp: 1775487131.3863528, label: "unknown", score: 0.95 },
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
let classified = false;
|
||||||
|
|
||||||
|
await frigateApp.installDefaults({
|
||||||
|
faces: initialFaces,
|
||||||
|
events: [
|
||||||
|
{
|
||||||
|
id: EVENT_ID,
|
||||||
|
label: "person",
|
||||||
|
sub_label: null,
|
||||||
|
camera: "front_door",
|
||||||
|
start_time: 1775487131.3863528,
|
||||||
|
end_time: 1775487161.3863528,
|
||||||
|
false_positive: false,
|
||||||
|
zones: ["front_yard"],
|
||||||
|
thumbnail: null,
|
||||||
|
has_clip: true,
|
||||||
|
has_snapshot: true,
|
||||||
|
retain_indefinitely: false,
|
||||||
|
plus_id: null,
|
||||||
|
model_hash: "abc123",
|
||||||
|
detector_type: "cpu",
|
||||||
|
model_type: "ssd",
|
||||||
|
data: {
|
||||||
|
top_score: 0.92,
|
||||||
|
score: 0.92,
|
||||||
|
region: [0.1, 0.1, 0.5, 0.8],
|
||||||
|
box: [0.2, 0.15, 0.45, 0.75],
|
||||||
|
area: 0.18,
|
||||||
|
ratio: 0.6,
|
||||||
|
type: "object",
|
||||||
|
path_data: [],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Re-route /api/faces to flip to the "train empty" payload once the
|
||||||
|
// classify POST has been received. Registered AFTER installDefaults so
|
||||||
|
// Playwright's LIFO route matching hits this handler first.
|
||||||
|
await frigateApp.page.route("**/api/faces", async (route) => {
|
||||||
|
const payload = classified ? basicFacesMock() : initialFaces;
|
||||||
|
await route.fulfill({ json: payload });
|
||||||
|
});
|
||||||
|
|
||||||
|
// Hold the classify POST briefly. The race opens when the parent
|
||||||
|
// unmounts before the dropdown's exit animation finishes (~200ms
|
||||||
|
// in Radix). 100ms keeps us comfortably inside that window and
|
||||||
|
// reliably triggered the bug in a 3x sweep across 0/50/100/200ms
|
||||||
|
// on the pre-fix build. CLASSIFY_DELAY_MS overrides for local sweeps.
|
||||||
|
const delayMs = Number(
|
||||||
|
(globalThis as { process?: { env?: Record<string, string> } }).process
|
||||||
|
?.env?.CLASSIFY_DELAY_MS ?? "100",
|
||||||
|
);
|
||||||
|
await frigateApp.page.route(
|
||||||
|
"**/api/faces/train/*/classify",
|
||||||
|
async (route) => {
|
||||||
|
classified = true;
|
||||||
|
if (delayMs > 0) {
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
||||||
|
}
|
||||||
|
await route.fulfill({ json: { success: true } });
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
await frigateApp.goto("/faces");
|
||||||
|
|
||||||
|
// Open the grouped detail Dialog.
|
||||||
|
const groupedImage = frigateApp.page
|
||||||
|
.locator('img[src*="clips/faces/train/"]')
|
||||||
|
.first();
|
||||||
|
await expect(groupedImage).toBeVisible({ timeout: 5_000 });
|
||||||
|
await groupedImage.locator("xpath=..").click();
|
||||||
|
const dialog = frigateApp.page
|
||||||
|
.getByRole("dialog")
|
||||||
|
.filter({
|
||||||
|
has: frigateApp.page.locator('img[src*="clips/faces/train/"]'),
|
||||||
|
})
|
||||||
|
.first();
|
||||||
|
await expect(dialog).toBeVisible({ timeout: 5_000 });
|
||||||
|
|
||||||
|
// Single attempt → single `+` trigger.
|
||||||
|
const triggers = dialog.locator('[aria-haspopup="menu"]');
|
||||||
|
await expect(triggers).toHaveCount(1);
|
||||||
|
await triggers.first().click();
|
||||||
|
|
||||||
|
const menu = frigateApp.page
|
||||||
|
.locator('[role="menu"], [data-radix-menu-content]')
|
||||||
|
.first();
|
||||||
|
await expect(menu).toBeVisible({ timeout: 5_000 });
|
||||||
|
await menu.getByRole("menuitem", { name: /^alice$/i }).click();
|
||||||
|
|
||||||
|
// The Dialog must leave the tree cleanly, and body must recover.
|
||||||
|
await expect(dialog).not.toBeVisible({ timeout: 5_000 });
|
||||||
|
|
||||||
|
// Give Radix's exit animation + cleanup a comfortable margin on top of
|
||||||
|
// the ~300ms simulated network delay.
|
||||||
|
await waitForBodyInteractive(frigateApp.page, 5_000);
|
||||||
|
await expectBodyInteractive(frigateApp.page);
|
||||||
|
|
||||||
|
// User-visible confirmation: click something outside the dialog
|
||||||
|
// and assert it actually responds.
|
||||||
|
const librarySelector = frigateApp.page
|
||||||
|
.getByRole("button")
|
||||||
|
.filter({ hasText: /\(\d+\)/ })
|
||||||
|
.first();
|
||||||
|
await librarySelector.click();
|
||||||
|
await expect(
|
||||||
|
frigateApp.page
|
||||||
|
.locator('[role="menu"], [data-radix-menu-content]')
|
||||||
|
.first(),
|
||||||
|
).toBeVisible({ timeout: 3_000 });
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
test.describe("Face Library — mobile @high @mobile", () => {
|
test.describe("Face Library — mobile @high @mobile", () => {
|
||||||
|
|||||||
@ -257,6 +257,7 @@
|
|||||||
"export": "Export",
|
"export": "Export",
|
||||||
"actions": "Actions",
|
"actions": "Actions",
|
||||||
"uiPlayground": "UI Playground",
|
"uiPlayground": "UI Playground",
|
||||||
|
"features": "Features",
|
||||||
"faceLibrary": "Face Library",
|
"faceLibrary": "Face Library",
|
||||||
"classification": "Classification",
|
"classification": "Classification",
|
||||||
"chat": "Chat",
|
"chat": "Chat",
|
||||||
|
|||||||
@ -457,7 +457,13 @@
|
|||||||
"enableDesc": "Temporarily disable an enabled camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.<br /> <em>Note: This does not disable go2rtc restreams.</em>",
|
"enableDesc": "Temporarily disable an enabled camera until Frigate restarts. Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.<br /> <em>Note: This does not disable go2rtc restreams.</em>",
|
||||||
"disableLabel": "Disabled cameras",
|
"disableLabel": "Disabled cameras",
|
||||||
"disableDesc": "Enable a camera that is currently not visible in the UI and disabled in the configuration. A restart of Frigate is required after enabling.",
|
"disableDesc": "Enable a camera that is currently not visible in the UI and disabled in the configuration. A restart of Frigate is required after enabling.",
|
||||||
"enableSuccess": "Enabled {{cameraName}} in configuration. Restart Frigate to apply the changes."
|
"enableSuccess": "Enabled {{cameraName}} in configuration. Restart Frigate to apply the changes.",
|
||||||
|
"friendlyName": {
|
||||||
|
"edit": "Edit camera display name",
|
||||||
|
"title": "Edit Display Name",
|
||||||
|
"description": "Set the friendly name shown for this camera throughout the Frigate UI. Leave blank to use the camera ID.",
|
||||||
|
"rename": "Rename"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"cameraConfig": {
|
"cameraConfig": {
|
||||||
"add": "Add Camera",
|
"add": "Add Camera",
|
||||||
|
|||||||
@ -161,13 +161,13 @@ export function AnimatedEventCard({
|
|||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<Button
|
<Button
|
||||||
className={cn(
|
className={cn(
|
||||||
"absolute left-2 top-1 z-40 transition-opacity",
|
"absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 transition-opacity",
|
||||||
threatLevel === ThreatLevel.SECURITY_CONCERN &&
|
threatLevel === ThreatLevel.SECURITY_CONCERN &&
|
||||||
"pointer-events-auto bg-severity_alert opacity-100 hover:bg-severity_alert",
|
"pointer-events-auto opacity-100",
|
||||||
threatLevel === ThreatLevel.NEEDS_REVIEW &&
|
threatLevel === ThreatLevel.NEEDS_REVIEW &&
|
||||||
"pointer-events-auto bg-severity_detection opacity-100 hover:bg-severity_detection",
|
"pointer-events-auto opacity-100",
|
||||||
threatLevel === ThreatLevel.NORMAL &&
|
threatLevel === ThreatLevel.NORMAL &&
|
||||||
"pointer-events-none bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 group-hover:pointer-events-auto group-hover:opacity-100",
|
"pointer-events-none opacity-0 group-hover:pointer-events-auto group-hover:opacity-100",
|
||||||
)}
|
)}
|
||||||
size="xs"
|
size="xs"
|
||||||
aria-label={t("markAsReviewed")}
|
aria-label={t("markAsReviewed")}
|
||||||
|
|||||||
@ -155,14 +155,40 @@ export function MessageBubble({
|
|||||||
) : (
|
) : (
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"[&>*:last-child]:inline",
|
|
||||||
!isComplete &&
|
!isComplete &&
|
||||||
"after:ml-0.5 after:inline-block after:h-4 after:w-2 after:animate-cursor-blink after:rounded-sm after:bg-foreground after:align-middle after:content-['']",
|
"[&>p:last-child]:inline after:ml-0.5 after:inline-block after:h-4 after:w-2 after:animate-cursor-blink after:rounded-sm after:bg-foreground after:align-middle after:content-['']",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<ReactMarkdown
|
<ReactMarkdown
|
||||||
remarkPlugins={[remarkGfm]}
|
remarkPlugins={[remarkGfm]}
|
||||||
components={{
|
components={{
|
||||||
|
p: ({ node: _n, ...props }) => (
|
||||||
|
<p className="my-2 first:mt-0 last:mb-0" {...props} />
|
||||||
|
),
|
||||||
|
ul: ({ node: _n, ...props }) => (
|
||||||
|
<ul
|
||||||
|
className="my-2 list-disc space-y-1 pl-6 first:mt-0 last:mb-0"
|
||||||
|
{...props}
|
||||||
|
/>
|
||||||
|
),
|
||||||
|
ol: ({ node: _n, ...props }) => (
|
||||||
|
<ol
|
||||||
|
className="my-2 list-decimal space-y-1 pl-6 first:mt-0 last:mb-0"
|
||||||
|
{...props}
|
||||||
|
/>
|
||||||
|
),
|
||||||
|
li: ({ node: _n, ...props }) => (
|
||||||
|
<li className="pl-1" {...props} />
|
||||||
|
),
|
||||||
|
code: ({ node: _n, className, ...props }) => (
|
||||||
|
<code
|
||||||
|
className={cn(
|
||||||
|
"rounded bg-foreground/10 px-1 py-0.5 font-mono text-sm",
|
||||||
|
className,
|
||||||
|
)}
|
||||||
|
{...props}
|
||||||
|
/>
|
||||||
|
),
|
||||||
table: ({ node: _n, ...props }) => (
|
table: ({ node: _n, ...props }) => (
|
||||||
<table
|
<table
|
||||||
className="my-2 w-full border-collapse border border-border"
|
className="my-2 w-full border-collapse border border-border"
|
||||||
|
|||||||
@ -14,7 +14,6 @@ import Step3ChooseExamples, {
|
|||||||
Step3FormData,
|
Step3FormData,
|
||||||
} from "./wizard/Step3ChooseExamples";
|
} from "./wizard/Step3ChooseExamples";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { isDesktop } from "react-device-detect";
|
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
|
|
||||||
const OBJECT_STEPS = [
|
const OBJECT_STEPS = [
|
||||||
@ -153,13 +152,9 @@ export default function ClassificationModelWizardDialog({
|
|||||||
>
|
>
|
||||||
<DialogContent
|
<DialogContent
|
||||||
className={cn(
|
className={cn(
|
||||||
"",
|
"scrollbar-container max-h-[90%] overflow-y-auto",
|
||||||
isDesktop &&
|
wizardState.currentStep == 0 && "xl:max-h-[80%]",
|
||||||
wizardState.currentStep == 0 &&
|
wizardState.currentStep > 0 && "md:max-w-[70%] xl:max-h-[80%]",
|
||||||
"max-h-[90%] overflow-y-auto xl:max-h-[80%]",
|
|
||||||
isDesktop &&
|
|
||||||
wizardState.currentStep > 0 &&
|
|
||||||
"max-h-[90%] max-w-[70%] overflow-y-auto xl:max-h-[80%]",
|
|
||||||
)}
|
)}
|
||||||
onInteractOutside={(e) => {
|
onInteractOutside={(e) => {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
|
|||||||
@ -65,10 +65,14 @@ import {
|
|||||||
globalCameraDefaultSections,
|
globalCameraDefaultSections,
|
||||||
buildOverrides,
|
buildOverrides,
|
||||||
buildConfigDataForPath,
|
buildConfigDataForPath,
|
||||||
|
flattenOverrides,
|
||||||
getBaseCameraSectionValue,
|
getBaseCameraSectionValue,
|
||||||
sanitizeSectionData as sharedSanitizeSectionData,
|
sanitizeSectionData as sharedSanitizeSectionData,
|
||||||
requiresRestartForOverrides as sharedRequiresRestartForOverrides,
|
requiresRestartForOverrides as sharedRequiresRestartForOverrides,
|
||||||
} from "@/utils/configUtil";
|
} from "@/utils/configUtil";
|
||||||
|
import SaveAllPreviewPopover, {
|
||||||
|
type SaveAllPreviewItem,
|
||||||
|
} from "@/components/overlay/detail/SaveAllPreviewPopover";
|
||||||
import RestartDialog from "@/components/overlay/dialog/RestartDialog";
|
import RestartDialog from "@/components/overlay/dialog/RestartDialog";
|
||||||
import { useRestart } from "@/api/ws";
|
import { useRestart } from "@/api/ws";
|
||||||
import type {
|
import type {
|
||||||
@ -913,6 +917,34 @@ export function ConfigSection({
|
|||||||
);
|
);
|
||||||
}, [sectionConfig?.renderers, sectionPath, cameraName, setPendingData]);
|
}, [sectionConfig?.renderers, sectionPath, cameraName, setPendingData]);
|
||||||
|
|
||||||
|
// Build a flat list of pending field changes for this section only.
|
||||||
|
// Mirrors the global Save All preview but scoped to the current section so
|
||||||
|
// users can inspect what will be saved without leaving the section.
|
||||||
|
const sectionPreviewItems = useMemo<SaveAllPreviewItem[]>(() => {
|
||||||
|
if (!hasChanges) return [];
|
||||||
|
if (!effectiveOverrides || typeof effectiveOverrides !== "object") {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
const flattened = flattenOverrides(effectiveOverrides as JsonValue);
|
||||||
|
return flattened.map(({ path, value }) => ({
|
||||||
|
scope: effectiveLevel,
|
||||||
|
cameraName,
|
||||||
|
profileName: profileName
|
||||||
|
? (profileFriendlyName ?? profileName)
|
||||||
|
: undefined,
|
||||||
|
fieldPath: path ? `${sectionPath}.${path}` : sectionPath,
|
||||||
|
value,
|
||||||
|
}));
|
||||||
|
}, [
|
||||||
|
hasChanges,
|
||||||
|
effectiveOverrides,
|
||||||
|
effectiveLevel,
|
||||||
|
cameraName,
|
||||||
|
profileName,
|
||||||
|
profileFriendlyName,
|
||||||
|
sectionPath,
|
||||||
|
]);
|
||||||
|
|
||||||
if (!modifiedSchema) {
|
if (!modifiedSchema) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -1018,6 +1050,12 @@ export function ConfigSection({
|
|||||||
defaultValue: "You have unsaved changes",
|
defaultValue: "You have unsaved changes",
|
||||||
})}
|
})}
|
||||||
</span>
|
</span>
|
||||||
|
<SaveAllPreviewPopover
|
||||||
|
items={sectionPreviewItems}
|
||||||
|
className="h-7 w-7"
|
||||||
|
align="start"
|
||||||
|
side="top"
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
<div className="flex w-full flex-col gap-2 sm:flex-row sm:items-center md:w-auto">
|
<div className="flex w-full flex-col gap-2 sm:flex-row sm:items-center md:w-auto">
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import {
|
|||||||
LuLifeBuoy,
|
LuLifeBuoy,
|
||||||
LuList,
|
LuList,
|
||||||
LuLogOut,
|
LuLogOut,
|
||||||
|
LuMessageSquare,
|
||||||
LuMoon,
|
LuMoon,
|
||||||
LuSquarePen,
|
LuSquarePen,
|
||||||
LuScanFace,
|
LuScanFace,
|
||||||
@ -482,21 +483,25 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
|
|||||||
</Link>
|
</Link>
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
{isAdmin && isMobile && config?.face_recognition.enabled && (
|
</DropdownMenuGroup>
|
||||||
<>
|
{isMobile && isAdmin && (
|
||||||
<Link to="/faces">
|
<>
|
||||||
<MenuItem
|
<DropdownMenuLabel className="mt-1">
|
||||||
className="flex w-full items-center p-2 text-sm"
|
{t("menu.features")}
|
||||||
aria-label={t("menu.faceLibrary")}
|
</DropdownMenuLabel>
|
||||||
>
|
<DropdownMenuSeparator />
|
||||||
<LuScanFace className="mr-2 size-4" />
|
<DropdownMenuGroup className="flex flex-col">
|
||||||
<span>{t("menu.faceLibrary")}</span>
|
{config?.face_recognition.enabled && (
|
||||||
</MenuItem>
|
<Link to="/faces">
|
||||||
</Link>
|
<MenuItem
|
||||||
</>
|
className="flex w-full items-center p-2 text-sm"
|
||||||
)}
|
aria-label={t("menu.faceLibrary")}
|
||||||
{isAdmin && isMobile && (
|
>
|
||||||
<>
|
<LuScanFace className="mr-2 size-4" />
|
||||||
|
<span>{t("menu.faceLibrary")}</span>
|
||||||
|
</MenuItem>
|
||||||
|
</Link>
|
||||||
|
)}
|
||||||
<Link to="/classification">
|
<Link to="/classification">
|
||||||
<MenuItem
|
<MenuItem
|
||||||
className="flex w-full items-center p-2 text-sm"
|
className="flex w-full items-center p-2 text-sm"
|
||||||
@ -506,9 +511,20 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
|
|||||||
<span>{t("menu.classification")}</span>
|
<span>{t("menu.classification")}</span>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
</Link>
|
</Link>
|
||||||
</>
|
{config?.genai?.model !== "none" && (
|
||||||
)}
|
<Link to="/chat">
|
||||||
</DropdownMenuGroup>
|
<MenuItem
|
||||||
|
className="flex w-full items-center p-2 text-sm"
|
||||||
|
aria-label={t("menu.chat")}
|
||||||
|
>
|
||||||
|
<LuMessageSquare className="mr-2 size-4" />
|
||||||
|
<span>{t("menu.chat")}</span>
|
||||||
|
</MenuItem>
|
||||||
|
</Link>
|
||||||
|
)}
|
||||||
|
</DropdownMenuGroup>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
|
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
|
||||||
{t("menu.appearance")}
|
{t("menu.appearance")}
|
||||||
</DropdownMenuLabel>
|
</DropdownMenuLabel>
|
||||||
|
|||||||
@ -124,7 +124,7 @@ export default function ClassificationSelectionDialog({
|
|||||||
/>
|
/>
|
||||||
|
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<Selector>
|
<Selector {...(isDesktop ? { modal: false } : {})}>
|
||||||
<SelectorTrigger asChild>
|
<SelectorTrigger asChild>
|
||||||
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
||||||
</SelectorTrigger>
|
</SelectorTrigger>
|
||||||
|
|||||||
@ -85,7 +85,7 @@ export default function FaceSelectionDialog({
|
|||||||
)}
|
)}
|
||||||
|
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<Selector>
|
<Selector {...(isDesktop ? { modal: false } : {})}>
|
||||||
<SelectorTrigger asChild>
|
<SelectorTrigger asChild>
|
||||||
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
||||||
</SelectorTrigger>
|
</SelectorTrigger>
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import TextEntry from "@/components/input/TextEntry";
|
import TextEntry from "@/components/input/TextEntry";
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import {
|
import {
|
||||||
@ -19,7 +20,9 @@ type TextEntryDialogProps = {
|
|||||||
setOpen: (open: boolean) => void;
|
setOpen: (open: boolean) => void;
|
||||||
onSave: (text: string) => void;
|
onSave: (text: string) => void;
|
||||||
defaultValue?: string;
|
defaultValue?: string;
|
||||||
|
placeholder?: string;
|
||||||
allowEmpty?: boolean;
|
allowEmpty?: boolean;
|
||||||
|
isSaving?: boolean;
|
||||||
regexPattern?: RegExp;
|
regexPattern?: RegExp;
|
||||||
regexErrorMessage?: string;
|
regexErrorMessage?: string;
|
||||||
forbiddenPattern?: RegExp;
|
forbiddenPattern?: RegExp;
|
||||||
@ -33,7 +36,9 @@ export default function TextEntryDialog({
|
|||||||
setOpen,
|
setOpen,
|
||||||
onSave,
|
onSave,
|
||||||
defaultValue = "",
|
defaultValue = "",
|
||||||
|
placeholder,
|
||||||
allowEmpty = false,
|
allowEmpty = false,
|
||||||
|
isSaving = false,
|
||||||
regexPattern,
|
regexPattern,
|
||||||
regexErrorMessage,
|
regexErrorMessage,
|
||||||
forbiddenPattern,
|
forbiddenPattern,
|
||||||
@ -50,6 +55,7 @@ export default function TextEntryDialog({
|
|||||||
</DialogHeader>
|
</DialogHeader>
|
||||||
<TextEntry
|
<TextEntry
|
||||||
defaultValue={defaultValue}
|
defaultValue={defaultValue}
|
||||||
|
placeholder={placeholder}
|
||||||
allowEmpty={allowEmpty}
|
allowEmpty={allowEmpty}
|
||||||
onSave={onSave}
|
onSave={onSave}
|
||||||
regexPattern={regexPattern}
|
regexPattern={regexPattern}
|
||||||
@ -58,11 +64,22 @@ export default function TextEntryDialog({
|
|||||||
forbiddenErrorMessage={forbiddenErrorMessage}
|
forbiddenErrorMessage={forbiddenErrorMessage}
|
||||||
>
|
>
|
||||||
<DialogFooter className={cn("pt-4", isMobile && "gap-2")}>
|
<DialogFooter className={cn("pt-4", isMobile && "gap-2")}>
|
||||||
<Button type="button" onClick={() => setOpen(false)}>
|
<Button
|
||||||
|
type="button"
|
||||||
|
disabled={isSaving}
|
||||||
|
onClick={() => setOpen(false)}
|
||||||
|
>
|
||||||
{t("button.cancel")}
|
{t("button.cancel")}
|
||||||
</Button>
|
</Button>
|
||||||
<Button variant="select" type="submit">
|
<Button variant="select" type="submit" disabled={isSaving}>
|
||||||
{t("button.save")}
|
{isSaving ? (
|
||||||
|
<div className="flex flex-row items-center gap-2">
|
||||||
|
<ActivityIndicator className="size-4" />
|
||||||
|
<span>{t("button.saving")}</span>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
t("button.save")
|
||||||
|
)}
|
||||||
</Button>
|
</Button>
|
||||||
</DialogFooter>
|
</DialogFooter>
|
||||||
</TextEntry>
|
</TextEntry>
|
||||||
|
|||||||
@ -28,11 +28,7 @@ import useOptimisticState from "@/hooks/use-optimistic-state";
|
|||||||
import { isMobile } from "react-device-detect";
|
import { isMobile } from "react-device-detect";
|
||||||
import { FaVideo } from "react-icons/fa";
|
import { FaVideo } from "react-icons/fa";
|
||||||
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
||||||
import type {
|
import type { ConfigSectionData, JsonObject } from "@/types/configForm";
|
||||||
ConfigSectionData,
|
|
||||||
JsonObject,
|
|
||||||
JsonValue,
|
|
||||||
} from "@/types/configForm";
|
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import FilterSwitch from "@/components/filter/FilterSwitch";
|
import FilterSwitch from "@/components/filter/FilterSwitch";
|
||||||
import { ZoneMaskFilterButton } from "@/components/filter/ZoneMaskFilter";
|
import { ZoneMaskFilterButton } from "@/components/filter/ZoneMaskFilter";
|
||||||
@ -93,6 +89,7 @@ import { mutate } from "swr";
|
|||||||
import { RJSFSchema } from "@rjsf/utils";
|
import { RJSFSchema } from "@rjsf/utils";
|
||||||
import {
|
import {
|
||||||
buildConfigDataForPath,
|
buildConfigDataForPath,
|
||||||
|
flattenOverrides,
|
||||||
parseProfileFromSectionPath,
|
parseProfileFromSectionPath,
|
||||||
prepareSectionSavePayload,
|
prepareSectionSavePayload,
|
||||||
PROFILE_ELIGIBLE_SECTIONS,
|
PROFILE_ELIGIBLE_SECTIONS,
|
||||||
@ -190,25 +187,6 @@ const parsePendingDataKey = (pendingDataKey: string) => {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
const flattenOverrides = (
|
|
||||||
value: JsonValue | undefined,
|
|
||||||
path: string[] = [],
|
|
||||||
): Array<{ path: string; value: JsonValue }> => {
|
|
||||||
if (value === undefined) return [];
|
|
||||||
if (value === null || typeof value !== "object" || Array.isArray(value)) {
|
|
||||||
return [{ path: path.join("."), value }];
|
|
||||||
}
|
|
||||||
|
|
||||||
const entries = Object.entries(value);
|
|
||||||
if (entries.length === 0) {
|
|
||||||
return [{ path: path.join("."), value: {} }];
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries.flatMap(([key, entryValue]) =>
|
|
||||||
flattenOverrides(entryValue, [...path, key]),
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
const createSectionPage = (
|
const createSectionPage = (
|
||||||
sectionKey: string,
|
sectionKey: string,
|
||||||
level: "global" | "camera",
|
level: "global" | "camera",
|
||||||
|
|||||||
@ -219,6 +219,32 @@ export function buildOverrides(
|
|||||||
return current;
|
return current;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// flattenOverrides — turn an overrides object into a list of leaf paths
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// Walks a nested overrides value and produces a flat list of `{ path, value }`
|
||||||
|
// entries, one per leaf. Used by save/preview UIs to enumerate the individual
|
||||||
|
// fields that will be changed.
|
||||||
|
export function flattenOverrides(
|
||||||
|
value: JsonValue | undefined,
|
||||||
|
path: string[] = [],
|
||||||
|
): Array<{ path: string; value: JsonValue }> {
|
||||||
|
if (value === undefined) return [];
|
||||||
|
if (value === null || typeof value !== "object" || Array.isArray(value)) {
|
||||||
|
return [{ path: path.join("."), value }];
|
||||||
|
}
|
||||||
|
|
||||||
|
const entries = Object.entries(value);
|
||||||
|
if (entries.length === 0) {
|
||||||
|
return [{ path: path.join("."), value: {} }];
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries.flatMap(([key, entryValue]) =>
|
||||||
|
flattenOverrides(entryValue, [...path, key]),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// sanitizeSectionData — normalize config values and strip hidden fields
|
// sanitizeSectionData — normalize config values and strip hidden fields
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -14,7 +14,7 @@ import { useTranslation } from "react-i18next";
|
|||||||
import CameraEditForm from "@/components/settings/CameraEditForm";
|
import CameraEditForm from "@/components/settings/CameraEditForm";
|
||||||
import CameraWizardDialog from "@/components/settings/CameraWizardDialog";
|
import CameraWizardDialog from "@/components/settings/CameraWizardDialog";
|
||||||
import DeleteCameraDialog from "@/components/overlay/dialog/DeleteCameraDialog";
|
import DeleteCameraDialog from "@/components/overlay/dialog/DeleteCameraDialog";
|
||||||
import { LuPlus, LuTrash2 } from "react-icons/lu";
|
import { LuPencil, LuPlus, LuTrash2 } from "react-icons/lu";
|
||||||
import { IoMdArrowRoundBack } from "react-icons/io";
|
import { IoMdArrowRoundBack } from "react-icons/io";
|
||||||
import { isDesktop } from "react-device-detect";
|
import { isDesktop } from "react-device-detect";
|
||||||
import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
|
import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
|
||||||
@ -26,6 +26,12 @@ import axios from "axios";
|
|||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import RestartDialog from "@/components/overlay/dialog/RestartDialog";
|
import RestartDialog from "@/components/overlay/dialog/RestartDialog";
|
||||||
import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator";
|
import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator";
|
||||||
|
import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog";
|
||||||
|
import {
|
||||||
|
Tooltip,
|
||||||
|
TooltipContent,
|
||||||
|
TooltipTrigger,
|
||||||
|
} from "@/components/ui/tooltip";
|
||||||
import type { ProfileState } from "@/types/profile";
|
import type { ProfileState } from "@/types/profile";
|
||||||
import { getProfileColor } from "@/utils/profileColors";
|
import { getProfileColor } from "@/utils/profileColors";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
@ -161,7 +167,13 @@ export default function CameraManagementView({
|
|||||||
key={camera}
|
key={camera}
|
||||||
className="flex flex-row items-center justify-between"
|
className="flex flex-row items-center justify-between"
|
||||||
>
|
>
|
||||||
<CameraNameLabel camera={camera} />
|
<div className="flex items-center gap-1">
|
||||||
|
<CameraNameLabel camera={camera} />
|
||||||
|
<CameraFriendlyNameEditor
|
||||||
|
cameraName={camera}
|
||||||
|
onConfigChanged={updateConfig}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
<CameraEnableSwitch cameraName={camera} />
|
<CameraEnableSwitch cameraName={camera} />
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
@ -297,6 +309,103 @@ function CameraEnableSwitch({ cameraName }: CameraEnableSwitchProps) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CameraFriendlyNameEditorProps = {
|
||||||
|
cameraName: string;
|
||||||
|
onConfigChanged: () => Promise<unknown>;
|
||||||
|
};
|
||||||
|
|
||||||
|
function CameraFriendlyNameEditor({
|
||||||
|
cameraName,
|
||||||
|
onConfigChanged,
|
||||||
|
}: CameraFriendlyNameEditorProps) {
|
||||||
|
const { t } = useTranslation(["views/settings", "common"]);
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
const [open, setOpen] = useState(false);
|
||||||
|
const [isSaving, setIsSaving] = useState(false);
|
||||||
|
|
||||||
|
const currentFriendlyName = config?.cameras?.[cameraName]?.friendly_name;
|
||||||
|
|
||||||
|
const onSave = useCallback(
|
||||||
|
async (text: string) => {
|
||||||
|
if (isSaving) return;
|
||||||
|
setIsSaving(true);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await axios.put("config/set", {
|
||||||
|
requires_restart: 0,
|
||||||
|
config_data: {
|
||||||
|
cameras: {
|
||||||
|
[cameraName]: {
|
||||||
|
friendly_name: text.trim() || null,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await onConfigChanged();
|
||||||
|
setOpen(false);
|
||||||
|
|
||||||
|
toast.success(t("toast.save.success", { ns: "common" }), {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage =
|
||||||
|
axios.isAxiosError(error) &&
|
||||||
|
(error.response?.data?.message || error.response?.data?.detail)
|
||||||
|
? error.response?.data?.message || error.response?.data?.detail
|
||||||
|
: t("toast.save.error.noMessage", { ns: "common" });
|
||||||
|
|
||||||
|
toast.error(
|
||||||
|
t("toast.save.error.title", { errorMessage, ns: "common" }),
|
||||||
|
{ position: "top-center" },
|
||||||
|
);
|
||||||
|
} finally {
|
||||||
|
setIsSaving(false);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[cameraName, isSaving, onConfigChanged, t],
|
||||||
|
);
|
||||||
|
|
||||||
|
const renameLabel = t("cameraManagement.streams.friendlyName.rename", {
|
||||||
|
ns: "views/settings",
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Tooltip>
|
||||||
|
<TooltipTrigger asChild>
|
||||||
|
<Button
|
||||||
|
variant="ghost"
|
||||||
|
size="icon"
|
||||||
|
className="size-7"
|
||||||
|
aria-label={renameLabel}
|
||||||
|
onClick={() => setOpen(true)}
|
||||||
|
disabled={isSaving}
|
||||||
|
>
|
||||||
|
<LuPencil className="size-3.5" />
|
||||||
|
</Button>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>{renameLabel}</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
<TextEntryDialog
|
||||||
|
open={open}
|
||||||
|
setOpen={setOpen}
|
||||||
|
title={t("cameraManagement.streams.friendlyName.title", {
|
||||||
|
ns: "views/settings",
|
||||||
|
})}
|
||||||
|
description={t("cameraManagement.streams.friendlyName.description", {
|
||||||
|
ns: "views/settings",
|
||||||
|
})}
|
||||||
|
defaultValue={currentFriendlyName ?? ""}
|
||||||
|
placeholder={currentFriendlyName ? undefined : cameraName}
|
||||||
|
allowEmpty
|
||||||
|
isSaving={isSaving}
|
||||||
|
onSave={onSave}
|
||||||
|
/>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
type CameraConfigEnableSwitchProps = {
|
type CameraConfigEnableSwitchProps = {
|
||||||
cameraName: string;
|
cameraName: string;
|
||||||
setRestartDialogOpen: React.Dispatch<React.SetStateAction<boolean>>;
|
setRestartDialogOpen: React.Dispatch<React.SetStateAction<boolean>>;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user