Compare commits

..

No commits in common. "1fb21a4dacf7e1e8c51ea777d8bc94fbab94fffe" and "32875fb4cc3ffee3eeb1ec324e71761855242b97" have entirely different histories.

14 changed files with 427 additions and 632 deletions

View File

@ -1 +1,2 @@
scikit-build == 0.18.* scikit-build == 0.18.*
nvidia-pyindex

View File

@ -696,11 +696,7 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
clauses.append((Timeline.camera == camera)) clauses.append((Timeline.camera == camera))
if source_id: if source_id:
source_ids = [sid.strip() for sid in source_id.split(",")] clauses.append((Timeline.source_id == source_id))
if len(source_ids) == 1:
clauses.append((Timeline.source_id == source_ids[0]))
else:
clauses.append((Timeline.source_id.in_(source_ids)))
if len(clauses) == 0: if len(clauses) == 0:
clauses.append((True)) clauses.append((True))

View File

@ -53,7 +53,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.tensor_output_details: dict[str, Any] | None = None self.tensor_output_details: dict[str, Any] | None = None
self.labelmap: dict[int, str] = {} self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond() self.classifications_per_second = EventsPerSecond()
self.state_history: dict[str, dict[str, Any]] = {}
if ( if (
self.metrics self.metrics
@ -95,42 +94,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
if self.inference_speed: if self.inference_speed:
self.inference_speed.update(duration) self.inference_speed.update(duration)
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
"""
Verify state change requires 3 consecutive identical states before publishing.
Returns state to publish or None if verification not complete.
"""
if camera not in self.state_history:
self.state_history[camera] = {
"current_state": None,
"pending_state": None,
"consecutive_count": 0,
}
verification = self.state_history[camera]
if detected_state == verification["current_state"]:
verification["pending_state"] = None
verification["consecutive_count"] = 0
return None
if detected_state == verification["pending_state"]:
verification["consecutive_count"] += 1
if verification["consecutive_count"] >= 3:
verification["current_state"] = detected_state
verification["pending_state"] = None
verification["consecutive_count"] = 0
return detected_state
else:
verification["pending_state"] = detected_state
verification["consecutive_count"] = 1
logger.debug(
f"New state '{detected_state}' detected for {camera}, need {3 - verification['consecutive_count']} more consecutive detections"
)
return None
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
if self.metrics and self.model_config.name in self.metrics.classification_cps: if self.metrics and self.model_config.name in self.metrics.classification_cps:
self.metrics.classification_cps[ self.metrics.classification_cps[
@ -168,19 +131,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.last_run = now self.last_run = now
should_run = True should_run = True
# Shortcut: always run if we have a pending state verification to complete
if (
not should_run
and camera in self.state_history
and self.state_history[camera]["pending_state"] is not None
and now > self.last_run + 0.5
):
self.last_run = now
should_run = True
logger.debug(
f"Running verification check for pending state: {self.state_history[camera]['pending_state']} ({self.state_history[camera]['consecutive_count']}/3)"
)
if not should_run: if not should_run:
return return
@ -238,19 +188,10 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
score, score,
) )
if score < self.model_config.threshold: if score >= self.model_config.threshold:
logger.debug(
f"Score {score} below threshold {self.model_config.threshold}, skipping verification"
)
return
detected_state = self.labelmap[best_id]
verified_state = self.verify_state_change(camera, detected_state)
if verified_state is not None:
self.requestor.send_data( self.requestor.send_data(
f"{camera}/classification/{self.model_config.name}", f"{camera}/classification/{self.model_config.name}",
verified_state, self.labelmap[best_id],
) )
def handle_request(self, topic, request_data): def handle_request(self, topic, request_data):
@ -289,7 +230,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.sub_label_publisher = sub_label_publisher self.sub_label_publisher = sub_label_publisher
self.tensor_input_details: dict[str, Any] | None = None self.tensor_input_details: dict[str, Any] | None = None
self.tensor_output_details: dict[str, Any] | None = None self.tensor_output_details: dict[str, Any] | None = None
self.classification_history: dict[str, list[tuple[str, float, float]]] = {} self.detected_objects: dict[str, float] = {}
self.labelmap: dict[int, str] = {} self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond() self.classifications_per_second = EventsPerSecond()
@ -331,56 +272,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
if self.inference_speed: if self.inference_speed:
self.inference_speed.update(duration) self.inference_speed.update(duration)
def get_weighted_score(
self,
object_id: str,
current_label: str,
current_score: float,
current_time: float,
) -> tuple[str | None, float]:
"""
Determine weighted score based on history to prevent false positives/negatives.
Requires 60% of attempts to agree on a label before publishing.
Returns (weighted_label, weighted_score) or (None, 0.0) if no weighted score.
"""
if object_id not in self.classification_history:
self.classification_history[object_id] = []
self.classification_history[object_id].append(
(current_label, current_score, current_time)
)
history = self.classification_history[object_id]
if len(history) < 3:
return None, 0.0
label_counts = {}
label_scores = {}
total_attempts = len(history)
for label, score, timestamp in history:
if label not in label_counts:
label_counts[label] = 0
label_scores[label] = []
label_counts[label] += 1
label_scores[label].append(score)
best_label = max(label_counts, key=label_counts.get)
best_count = label_counts[best_label]
consensus_threshold = total_attempts * 0.6
if best_count < consensus_threshold:
return None, 0.0
avg_score = sum(label_scores[best_label]) / len(label_scores[best_label])
if best_label == "none":
return None, 0.0
return best_label, avg_score
def process_frame(self, obj_data, frame): def process_frame(self, obj_data, frame):
if self.metrics and self.model_config.name in self.metrics.classification_cps: if self.metrics and self.model_config.name in self.metrics.classification_cps:
self.metrics.classification_cps[ self.metrics.classification_cps[
@ -393,9 +284,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
if obj_data["label"] not in self.model_config.object_config.objects: if obj_data["label"] not in self.model_config.object_config.objects:
return return
if obj_data.get("end_time") is not None:
return
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
x, y, x2, y2 = calculate_region( x, y, x2, y2 = calculate_region(
frame.shape, frame.shape,
@ -443,6 +331,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
probs = res / res.sum(axis=0) probs = res / res.sum(axis=0)
best_id = np.argmax(probs) best_id = np.argmax(probs)
score = round(probs[best_id], 2) score = round(probs[best_id], 2)
previous_score = self.detected_objects.get(obj_data["id"], 0.0)
self.__update_metrics(datetime.datetime.now().timestamp() - now) self.__update_metrics(datetime.datetime.now().timestamp() - now)
write_classification_attempt( write_classification_attempt(
@ -458,34 +347,30 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
logger.debug(f"Score {score} is less than threshold.") logger.debug(f"Score {score} is less than threshold.")
return return
if score <= previous_score:
logger.debug(f"Score {score} is worse than previous score {previous_score}")
return
sub_label = self.labelmap[best_id] sub_label = self.labelmap[best_id]
self.detected_objects[obj_data["id"]] = score
consensus_label, consensus_score = self.get_weighted_score( if (
obj_data["id"], sub_label, score, now self.model_config.object_config.classification_type
) == ObjectClassificationType.sub_label
):
if consensus_label is not None: if sub_label != "none":
if (
self.model_config.object_config.classification_type
== ObjectClassificationType.sub_label
):
self.sub_label_publisher.publish( self.sub_label_publisher.publish(
(obj_data["id"], consensus_label, consensus_score), (obj_data["id"], sub_label, score),
EventMetadataTypeEnum.sub_label, EventMetadataTypeEnum.sub_label,
) )
elif ( elif (
self.model_config.object_config.classification_type self.model_config.object_config.classification_type
== ObjectClassificationType.attribute == ObjectClassificationType.attribute
): ):
self.sub_label_publisher.publish( self.sub_label_publisher.publish(
( (obj_data["id"], self.model_config.name, sub_label, score),
obj_data["id"], EventMetadataTypeEnum.attribute.value,
self.model_config.name, )
consensus_label,
consensus_score,
),
EventMetadataTypeEnum.attribute.value,
)
def handle_request(self, topic, request_data): def handle_request(self, topic, request_data):
if topic == EmbeddingsRequestEnum.reload_classification_model.value: if topic == EmbeddingsRequestEnum.reload_classification_model.value:
@ -503,8 +388,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
return None return None
def expire_object(self, object_id, camera): def expire_object(self, object_id, camera):
if object_id in self.classification_history: if object_id in self.detected_objects:
self.classification_history.pop(object_id) self.detected_objects.pop(object_id)
@staticmethod @staticmethod

View File

@ -63,24 +63,18 @@ class GenAIClient:
else: else:
return "" return ""
def get_verified_object_prompt() -> str: def get_verified_objects() -> str:
if review_data["recognized_objects"]: if review_data["recognized_objects"]:
object_list = " - " + "\n - ".join(review_data["recognized_objects"]) return " - " + "\n - ".join(review_data["recognized_objects"])
return f"""## Verified Objects (USE THESE NAMES)
When any of the following verified objects are present in the scene, you MUST use these exact names in your title and scene description:
{object_list}
"""
else: else:
return "" return " None"
context_prompt = f""" context_prompt = f"""
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera. Please analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
## Normal Activity Patterns for This Property **Normal activity patterns for this property:**
{activity_context_prompt} {activity_context_prompt}
## Task Instructions
Your task is to provide a clear, accurate description of the scene that: Your task is to provide a clear, accurate description of the scene that:
1. States exactly what is happening based on observable actions and movements. 1. States exactly what is happening based on observable actions and movements.
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns. 2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
@ -88,8 +82,6 @@ Your task is to provide a clear, accurate description of the scene that:
**IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.** **IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.**
## Analysis Guidelines
When forming your description: When forming your description:
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list. - **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence. - **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
@ -100,8 +92,6 @@ When forming your description:
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects. - Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
- **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention. - **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention.
## Response Format
Your response MUST be a flat JSON object with: Your response MUST be a flat JSON object with:
- `title` (string): A concise, one-sentence title that captures the main activity. Include any verified recognized objects (from the "Verified recognized objects" list below) and key detected objects. Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night". - `title` (string): A concise, one-sentence title that captures the main activity. Include any verified recognized objects (from the "Verified recognized objects" list below) and key detected objects. Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night".
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign. - `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
@ -109,22 +99,20 @@ Your response MUST be a flat JSON object with:
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above. - `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
{get_concern_prompt()} {get_concern_prompt()}
## Threat Level Definitions Threat-level definitions:
- 0 **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidenceconsidering zone context, detected objects, and timing togethersupports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.** - 0 **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidenceconsidering zone context, detected objects, and timing togethersupports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
- 1 **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.** - 1 **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.**
- 2 **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage. - 2 **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
## Sequence Details Sequence details:
- Frame 1 = earliest, Frame {len(thumbnails)} = latest - Frame 1 = earliest, Frame {len(thumbnails)} = latest
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
- Detected objects: {", ".join(review_data["objects"])} - Detected objects: {", ".join(review_data["objects"])}
- Verified recognized objects (use these names when describing these objects):
{get_verified_objects()}
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"} - Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"}
{get_verified_object_prompt()} **IMPORTANT:**
## Important Notes
- Values must be plain strings, floats, or integers no nested objects, no extra commentary. - Values must be plain strings, floats, or integers no nested objects, no extra commentary.
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects. - Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
{get_language_prompt()} {get_language_prompt()}

View File

@ -11,80 +11,38 @@ import {
import { TooltipPortal } from "@radix-ui/react-tooltip"; import { TooltipPortal } from "@radix-ui/react-tooltip";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { Event } from "@/types/event";
type ObjectTrackOverlayProps = { type ObjectTrackOverlayProps = {
camera: string; camera: string;
selectedObjectId: string;
showBoundingBoxes?: boolean; showBoundingBoxes?: boolean;
currentTime: number; currentTime: number;
videoWidth: number; videoWidth: number;
videoHeight: number; videoHeight: number;
className?: string; className?: string;
onSeekToTime?: (timestamp: number, play?: boolean) => void; onSeekToTime?: (timestamp: number, play?: boolean) => void;
}; objectTimeline?: ObjectLifecycleSequence[];
type PathPoint = {
x: number;
y: number;
timestamp: number;
lifecycle_item?: ObjectLifecycleSequence;
objectId: string;
};
type ObjectData = {
objectId: string;
label: string;
color: string;
pathPoints: PathPoint[];
currentZones: string[];
currentBox?: number[];
}; };
export default function ObjectTrackOverlay({ export default function ObjectTrackOverlay({
camera, camera,
selectedObjectId,
showBoundingBoxes = false, showBoundingBoxes = false,
currentTime, currentTime,
videoWidth, videoWidth,
videoHeight, videoHeight,
className, className,
onSeekToTime, onSeekToTime,
objectTimeline,
}: ObjectTrackOverlayProps) { }: ObjectTrackOverlayProps) {
const { t } = useTranslation("views/events"); const { t } = useTranslation("views/events");
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
const { annotationOffset, selectedObjectIds } = useDetailStream(); const { annotationOffset } = useDetailStream();
const effectiveCurrentTime = currentTime - annotationOffset / 1000; const effectiveCurrentTime = currentTime - annotationOffset / 1000;
// Fetch all event data in a single request (CSV ids) // Fetch the full event data to get saved path points
const { data: eventsData } = useSWR<Event[]>( const { data: eventData } = useSWR(["event_ids", { ids: selectedObjectId }]);
selectedObjectIds.length > 0
? ["event_ids", { ids: selectedObjectIds.join(",") }]
: null,
);
// Fetch timeline data for each object ID using fixed number of hooks
const { data: timelineData } = useSWR<ObjectLifecycleSequence[]>(
selectedObjectIds.length > 0
? `timeline?source_id=${selectedObjectIds.join(",")}&limit=1000`
: null,
{ revalidateOnFocus: false },
);
const timelineResults = useMemo(() => {
// Group timeline entries by source_id
if (!timelineData) return selectedObjectIds.map(() => []);
const grouped: Record<string, ObjectLifecycleSequence[]> = {};
for (const entry of timelineData) {
if (!grouped[entry.source_id]) {
grouped[entry.source_id] = [];
}
grouped[entry.source_id].push(entry);
}
// Return timeline arrays in the same order as selectedObjectIds
return selectedObjectIds.map((id) => grouped[id] || []);
}, [selectedObjectIds, timelineData]);
const typeColorMap = useMemo( const typeColorMap = useMemo(
() => ({ () => ({
@ -100,18 +58,16 @@ export default function ObjectTrackOverlay({
[], [],
); );
const getObjectColor = useCallback( const getObjectColor = useMemo(() => {
(label: string, objectId: string) => { return (label: string) => {
const objectColor = config?.model?.colormap[label]; const objectColor = config?.model?.colormap[label];
if (objectColor) { if (objectColor) {
const reversed = [...objectColor].reverse(); const reversed = [...objectColor].reverse();
return `rgb(${reversed.join(",")})`; return `rgb(${reversed.join(",")})`;
} }
// Fallback to deterministic color based on object ID return "rgb(255, 0, 0)"; // fallback red
return generateColorFromId(objectId); };
}, }, [config]);
[config],
);
const getZoneColor = useCallback( const getZoneColor = useCallback(
(zoneName: string) => { (zoneName: string) => {
@ -125,121 +81,125 @@ export default function ObjectTrackOverlay({
[config, camera], [config, camera],
); );
// Build per-object data structures const currentObjectZones = useMemo(() => {
const objectsData = useMemo<ObjectData[]>(() => { if (!objectTimeline) return [];
if (!eventsData || !Array.isArray(eventsData)) return [];
if (config?.cameras[camera]?.onvif.autotracking.enabled_in_config) // Find the most recent timeline event at or before effective current time
const relevantEvents = objectTimeline
.filter((event) => event.timestamp <= effectiveCurrentTime)
.sort((a, b) => b.timestamp - a.timestamp); // Most recent first
// Get zones from the most recent event
return relevantEvents[0]?.data?.zones || [];
}, [objectTimeline, effectiveCurrentTime]);
const zones = useMemo(() => {
if (!config?.cameras?.[camera]?.zones || !currentObjectZones.length)
return []; return [];
return selectedObjectIds
.map((objectId, index) => {
const eventData = eventsData.find((e) => e.id === objectId);
const timelineData = timelineResults[index];
// get saved path points from event
const savedPathPoints: PathPoint[] =
eventData?.data?.path_data?.map(
([coords, timestamp]: [number[], number]) => ({
x: coords[0],
y: coords[1],
timestamp,
lifecycle_item: undefined,
objectId,
}),
) || [];
// timeline points for this object
const eventSequencePoints: PathPoint[] =
timelineData
?.filter(
(event: ObjectLifecycleSequence) => event.data.box !== undefined,
)
.map((event: ObjectLifecycleSequence) => {
const [left, top, width, height] = event.data.box!;
return {
x: left + width / 2, // Center x
y: top + height, // Bottom y
timestamp: event.timestamp,
lifecycle_item: event,
objectId,
};
}) || [];
// show full path once current time has reached the object's start time
const combinedPoints = [...savedPathPoints, ...eventSequencePoints]
.sort((a, b) => a.timestamp - b.timestamp)
.filter(
(point) =>
currentTime >= (eventData?.start_time ?? 0) &&
point.timestamp >= (eventData?.start_time ?? 0) &&
point.timestamp <= (eventData?.end_time ?? Infinity),
);
// Get color for this object
const label = eventData?.label || "unknown";
const color = getObjectColor(label, objectId);
// Get current zones
const currentZones =
timelineData
?.filter(
(event: ObjectLifecycleSequence) =>
event.timestamp <= effectiveCurrentTime,
)
.sort(
(a: ObjectLifecycleSequence, b: ObjectLifecycleSequence) =>
b.timestamp - a.timestamp,
)[0]?.data?.zones || [];
// Get current bounding box
const currentBox = timelineData
?.filter(
(event: ObjectLifecycleSequence) =>
event.timestamp <= effectiveCurrentTime && event.data.box,
)
.sort(
(a: ObjectLifecycleSequence, b: ObjectLifecycleSequence) =>
b.timestamp - a.timestamp,
)[0]?.data?.box;
return {
objectId,
label,
color,
pathPoints: combinedPoints,
currentZones,
currentBox,
};
})
.filter((obj: ObjectData) => obj.pathPoints.length > 0); // Only include objects with path data
}, [
eventsData,
selectedObjectIds,
timelineResults,
currentTime,
effectiveCurrentTime,
getObjectColor,
config,
camera,
]);
// Collect all zones across all objects
const allZones = useMemo(() => {
if (!config?.cameras?.[camera]?.zones) return [];
const zoneNames = new Set<string>();
objectsData.forEach((obj) => {
obj.currentZones.forEach((zone) => zoneNames.add(zone));
});
return Object.entries(config.cameras[camera].zones) return Object.entries(config.cameras[camera].zones)
.filter(([name]) => zoneNames.has(name)) .filter(([name]) => currentObjectZones.includes(name))
.map(([name, zone]) => ({ .map(([name, zone]) => ({
name, name,
coordinates: zone.coordinates, coordinates: zone.coordinates,
color: getZoneColor(name), color: getZoneColor(name),
})); }));
}, [config, camera, objectsData, getZoneColor]); }, [config, camera, getZoneColor, currentObjectZones]);
// get saved path points from event
const savedPathPoints = useMemo(() => {
return (
eventData?.[0].data?.path_data?.map(
([coords, timestamp]: [number[], number]) => ({
x: coords[0],
y: coords[1],
timestamp,
lifecycle_item: undefined,
}),
) || []
);
}, [eventData]);
// timeline points for selected event
const eventSequencePoints = useMemo(() => {
return (
objectTimeline
?.filter((event) => event.data.box !== undefined)
.map((event) => {
const [left, top, width, height] = event.data.box!;
return {
x: left + width / 2, // Center x
y: top + height, // Bottom y
timestamp: event.timestamp,
lifecycle_item: event,
};
}) || []
);
}, [objectTimeline]);
// final object path with timeline points included
const pathPoints = useMemo(() => {
// don't display a path for autotracking cameras
if (config?.cameras[camera]?.onvif.autotracking.enabled_in_config)
return [];
const combinedPoints = [...savedPathPoints, ...eventSequencePoints].sort(
(a, b) => a.timestamp - b.timestamp,
);
// Filter points around current time (within a reasonable window)
const timeWindow = 30; // 30 seconds window
return combinedPoints.filter(
(point) =>
point.timestamp >= currentTime - timeWindow &&
point.timestamp <= currentTime + timeWindow,
);
}, [savedPathPoints, eventSequencePoints, config, camera, currentTime]);
// get absolute positions on the svg canvas for each point
const absolutePositions = useMemo(() => {
if (!pathPoints) return [];
return pathPoints.map((point) => {
// Find the corresponding timeline entry for this point
const timelineEntry = objectTimeline?.find(
(entry) => entry.timestamp == point.timestamp,
);
return {
x: point.x * videoWidth,
y: point.y * videoHeight,
timestamp: point.timestamp,
lifecycle_item:
timelineEntry ||
(point.box // normal path point
? {
timestamp: point.timestamp,
camera: camera,
source: "tracked_object",
source_id: selectedObjectId,
class_type: "visible" as LifecycleClassType,
data: {
camera: camera,
label: point.label,
sub_label: "",
box: point.box,
region: [0, 0, 0, 0], // placeholder
attribute: "",
zones: [],
},
}
: undefined),
};
});
}, [
pathPoints,
videoWidth,
videoHeight,
objectTimeline,
camera,
selectedObjectId,
]);
const generateStraightPath = useCallback( const generateStraightPath = useCallback(
(points: { x: number; y: number }[]) => { (points: { x: number; y: number }[]) => {
@ -254,20 +214,15 @@ export default function ObjectTrackOverlay({
); );
const getPointColor = useCallback( const getPointColor = useCallback(
(baseColorString: string, type?: string) => { (baseColor: number[], type?: string) => {
if (type && typeColorMap[type as keyof typeof typeColorMap]) { if (type && typeColorMap[type as keyof typeof typeColorMap]) {
const typeColor = typeColorMap[type as keyof typeof typeColorMap]; const typeColor = typeColorMap[type as keyof typeof typeColorMap];
if (typeColor) { if (typeColor) {
return `rgb(${typeColor.join(",")})`; return `rgb(${typeColor.join(",")})`;
} }
} }
// Parse and darken base color slightly for path points // normal path point
const match = baseColorString.match(/\d+/g); return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`;
if (match) {
const [r, g, b] = match.map(Number);
return `rgb(${Math.max(0, r - 10)}, ${Math.max(0, g - 10)}, ${Math.max(0, b - 10)})`;
}
return baseColorString;
}, },
[typeColorMap], [typeColorMap],
); );
@ -279,8 +234,49 @@ export default function ObjectTrackOverlay({
[onSeekToTime], [onSeekToTime],
); );
// render bounding box for object at current time if we have a timeline entry
const currentBoundingBox = useMemo(() => {
if (!objectTimeline) return null;
// Find the most recent timeline event at or before effective current time with a bounding box
const relevantEvents = objectTimeline
.filter(
(event) => event.timestamp <= effectiveCurrentTime && event.data.box,
)
.sort((a, b) => b.timestamp - a.timestamp); // Most recent first
const currentEvent = relevantEvents[0];
if (!currentEvent?.data.box) return null;
const [left, top, width, height] = currentEvent.data.box;
return {
left,
top,
width,
height,
centerX: left + width / 2,
centerY: top + height,
};
}, [objectTimeline, effectiveCurrentTime]);
const objectColor = useMemo(() => {
return pathPoints[0]?.label
? getObjectColor(pathPoints[0].label)
: "rgb(255, 0, 0)";
}, [pathPoints, getObjectColor]);
const objectColorArray = useMemo(() => {
return pathPoints[0]?.label
? getObjectColor(pathPoints[0].label).match(/\d+/g)?.map(Number) || [
255, 0, 0,
]
: [255, 0, 0];
}, [pathPoints, getObjectColor]);
// render any zones for object at current time
const zonePolygons = useMemo(() => { const zonePolygons = useMemo(() => {
return allZones.map((zone) => { return zones.map((zone) => {
// Convert zone coordinates from normalized (0-1) to pixel coordinates // Convert zone coordinates from normalized (0-1) to pixel coordinates
const points = zone.coordinates const points = zone.coordinates
.split(",") .split(",")
@ -302,9 +298,9 @@ export default function ObjectTrackOverlay({
stroke: zone.color, stroke: zone.color,
}; };
}); });
}, [allZones, videoWidth, videoHeight]); }, [zones, videoWidth, videoHeight]);
if (objectsData.length === 0 || !config) { if (!pathPoints.length || !config) {
return null; return null;
} }
@ -329,102 +325,73 @@ export default function ObjectTrackOverlay({
/> />
))} ))}
{objectsData.map((objData) => { {absolutePositions.length > 1 && (
const absolutePositions = objData.pathPoints.map((point) => ({ <path
x: point.x * videoWidth, d={generateStraightPath(absolutePositions)}
y: point.y * videoHeight, fill="none"
timestamp: point.timestamp, stroke={objectColor}
lifecycle_item: point.lifecycle_item, strokeWidth="5"
})); strokeLinecap="round"
strokeLinejoin="round"
/>
)}
return ( {absolutePositions.map((pos, index) => (
<g key={objData.objectId}> <Tooltip key={`point-${index}`}>
{absolutePositions.length > 1 && ( <TooltipTrigger asChild>
<path <circle
d={generateStraightPath(absolutePositions)} cx={pos.x}
fill="none" cy={pos.y}
stroke={objData.color} r="7"
strokeWidth="5" fill={getPointColor(
strokeLinecap="round" objectColorArray,
strokeLinejoin="round" pos.lifecycle_item?.class_type,
/> )}
)} stroke="white"
strokeWidth="3"
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
onClick={() => handlePointClick(pos.timestamp)}
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent side="top" className="smart-capitalize">
{pos.lifecycle_item
? `${pos.lifecycle_item.class_type.replace("_", " ")} at ${new Date(pos.timestamp * 1000).toLocaleTimeString()}`
: t("objectTrack.trackedPoint")}
{onSeekToTime && (
<div className="mt-1 text-xs text-muted-foreground">
{t("objectTrack.clickToSeek")}
</div>
)}
</TooltipContent>
</TooltipPortal>
</Tooltip>
))}
{absolutePositions.map((pos, index) => ( {currentBoundingBox && showBoundingBoxes && (
<Tooltip key={`${objData.objectId}-point-${index}`}> <g>
<TooltipTrigger asChild> <rect
<circle x={currentBoundingBox.left * videoWidth}
cx={pos.x} y={currentBoundingBox.top * videoHeight}
cy={pos.y} width={currentBoundingBox.width * videoWidth}
r="7" height={currentBoundingBox.height * videoHeight}
fill={getPointColor( fill="none"
objData.color, stroke={objectColor}
pos.lifecycle_item?.class_type, strokeWidth="5"
)} opacity="0.9"
stroke="white" />
strokeWidth="3"
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
onClick={() => handlePointClick(pos.timestamp)}
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent side="top" className="smart-capitalize">
{pos.lifecycle_item
? `${pos.lifecycle_item.class_type.replace("_", " ")} at ${new Date(pos.timestamp * 1000).toLocaleTimeString()}`
: t("objectTrack.trackedPoint")}
{onSeekToTime && (
<div className="mt-1 text-xs normal-case text-muted-foreground">
{t("objectTrack.clickToSeek")}
</div>
)}
</TooltipContent>
</TooltipPortal>
</Tooltip>
))}
{objData.currentBox && showBoundingBoxes && ( <circle
<g> cx={currentBoundingBox.centerX * videoWidth}
<rect cy={currentBoundingBox.centerY * videoHeight}
x={objData.currentBox[0] * videoWidth} r="5"
y={objData.currentBox[1] * videoHeight} fill="rgb(255, 255, 0)" // yellow highlight
width={objData.currentBox[2] * videoWidth} stroke={objectColor}
height={objData.currentBox[3] * videoHeight} strokeWidth="5"
fill="none" opacity="1"
stroke={objData.color} />
strokeWidth="5" </g>
opacity="0.9" )}
/>
<circle
cx={
(objData.currentBox[0] + objData.currentBox[2] / 2) *
videoWidth
}
cy={
(objData.currentBox[1] + objData.currentBox[3]) *
videoHeight
}
r="5"
fill="rgb(255, 255, 0)" // yellow highlight
stroke={objData.color}
strokeWidth="5"
opacity="1"
/>
</g>
)}
</g>
);
})}
</svg> </svg>
); );
} }
// Generate a deterministic HSL color from a string (object ID)
function generateColorFromId(id: string): string {
let hash = 0;
for (let i = 0; i < id.length; i++) {
hash = id.charCodeAt(i) + ((hash << 5) - hash);
}
// Use golden ratio to distribute hues evenly
const hue = (hash * 137.508) % 360;
return `hsl(${hue}, 70%, 50%)`;
}

View File

@ -94,10 +94,6 @@ export default function ObjectLifecycle({
); );
}, [config, event]); }, [config, event]);
const label = event.sub_label
? event.sub_label
: getTranslatedLabel(event.label);
const getZoneColor = useCallback( const getZoneColor = useCallback(
(zoneName: string) => { (zoneName: string) => {
const zoneColor = const zoneColor =
@ -632,29 +628,17 @@ export default function ObjectLifecycle({
}} }}
role="button" role="button"
> >
<div <div className={cn("ml-1 rounded-full bg-muted-foreground p-2")}>
className={cn(
"relative ml-2 rounded-full bg-muted-foreground p-2",
)}
>
{getIconForLabel( {getIconForLabel(
event.sub_label ? event.label + "-verified" : event.label, event.label,
"size-4 text-white", "size-6 text-primary dark:text-white",
)} )}
</div> </div>
<div className="flex items-center gap-2"> <div className="flex items-end gap-2">
<span className="capitalize">{label}</span> <span>{getTranslatedLabel(event.label)}</span>
<span className="text-secondary-foreground"> <span className="text-secondary-foreground">
{formattedStart ?? ""} - {formattedEnd ?? ""} {formattedStart ?? ""} - {formattedEnd ?? ""}
</span> </span>
{event.data?.recognized_license_plate && (
<>
·{" "}
<span className="text-sm text-secondary-foreground">
{event.data.recognized_license_plate}
</span>
</>
)}
</div> </div>
</div> </div>
</div> </div>

View File

@ -20,6 +20,7 @@ import { cn } from "@/lib/utils";
import { ASPECT_VERTICAL_LAYOUT, RecordingPlayerError } from "@/types/record"; import { ASPECT_VERTICAL_LAYOUT, RecordingPlayerError } from "@/types/record";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import ObjectTrackOverlay from "@/components/overlay/ObjectTrackOverlay"; import ObjectTrackOverlay from "@/components/overlay/ObjectTrackOverlay";
import { DetailStreamContextType } from "@/context/detail-stream-context";
// Android native hls does not seek correctly // Android native hls does not seek correctly
const USE_NATIVE_HLS = !isAndroid; const USE_NATIVE_HLS = !isAndroid;
@ -53,11 +54,8 @@ type HlsVideoPlayerProps = {
onUploadFrame?: (playTime: number) => Promise<AxiosResponse> | undefined; onUploadFrame?: (playTime: number) => Promise<AxiosResponse> | undefined;
toggleFullscreen?: () => void; toggleFullscreen?: () => void;
onError?: (error: RecordingPlayerError) => void; onError?: (error: RecordingPlayerError) => void;
isDetailMode?: boolean; detail?: Partial<DetailStreamContextType>;
camera?: string;
currentTimeOverride?: number;
}; };
export default function HlsVideoPlayer({ export default function HlsVideoPlayer({
videoRef, videoRef,
containerRef, containerRef,
@ -77,15 +75,17 @@ export default function HlsVideoPlayer({
onUploadFrame, onUploadFrame,
toggleFullscreen, toggleFullscreen,
onError, onError,
isDetailMode = false, detail,
camera,
currentTimeOverride,
}: HlsVideoPlayerProps) { }: HlsVideoPlayerProps) {
const { t } = useTranslation("components/player"); const { t } = useTranslation("components/player");
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
// for detail stream context in History // for detail stream context in History
const currentTime = currentTimeOverride; const selectedObjectId = detail?.selectedObjectId;
const selectedObjectTimeline = detail?.selectedObjectTimeline;
const currentTime = detail?.currentTime;
const camera = detail?.camera;
const isDetailMode = detail?.isDetailMode ?? false;
// playback // playback
@ -316,14 +316,16 @@ export default function HlsVideoPlayer({
}} }}
> >
{isDetailMode && {isDetailMode &&
selectedObjectId &&
camera && camera &&
currentTime && currentTime &&
videoDimensions.width > 0 && videoDimensions.width > 0 &&
videoDimensions.height > 0 && ( videoDimensions.height > 0 && (
<div className="absolute z-50 size-full"> <div className="absolute z-50 size-full">
<ObjectTrackOverlay <ObjectTrackOverlay
key={`overlay-${currentTime}`} key={`${selectedObjectId}-${currentTime}`}
camera={camera} camera={camera}
selectedObjectId={selectedObjectId}
showBoundingBoxes={!isPlaying} showBoundingBoxes={!isPlaying}
currentTime={currentTime} currentTime={currentTime}
videoWidth={videoDimensions.width} videoWidth={videoDimensions.width}
@ -334,6 +336,7 @@ export default function HlsVideoPlayer({
onSeekToTime(timestamp, play); onSeekToTime(timestamp, play);
} }
}} }}
objectTimeline={selectedObjectTimeline}
/> />
</div> </div>
)} )}

View File

@ -61,11 +61,7 @@ export default function DynamicVideoPlayer({
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
// for detail stream context in History // for detail stream context in History
const { const detail = useDetailStream();
isDetailMode,
camera: contextCamera,
currentTime,
} = useDetailStream();
// controlling playback // controlling playback
@ -299,9 +295,7 @@ export default function DynamicVideoPlayer({
setIsBuffering(true); setIsBuffering(true);
} }
}} }}
isDetailMode={isDetailMode} detail={detail}
camera={contextCamera || camera}
currentTimeOverride={currentTime}
/> />
<PreviewPlayer <PreviewPlayer
className={cn( className={cn(

View File

@ -171,11 +171,7 @@ export default function DetailStream({
<FrigatePlusDialog <FrigatePlusDialog
upload={upload} upload={upload}
onClose={() => setUpload(undefined)} onClose={() => setUpload(undefined)}
onEventUploaded={() => { onEventUploaded={() => setUpload(undefined)}
if (upload) {
upload.plus_id = "new_upload";
}
}}
/> />
<div <div
@ -258,9 +254,7 @@ function ReviewGroup({
const rawIconLabels: string[] = [ const rawIconLabels: string[] = [
...(fetchedEvents ...(fetchedEvents
? fetchedEvents.map((e) => ? fetchedEvents.map((e) => e.label)
e.sub_label ? e.label + "-verified" : e.label,
)
: (review.data?.objects ?? [])), : (review.data?.objects ?? [])),
...(review.data?.audio ?? []), ...(review.data?.audio ?? []),
]; ];
@ -323,7 +317,7 @@ function ReviewGroup({
<div className="ml-1 flex flex-col items-start gap-1.5"> <div className="ml-1 flex flex-col items-start gap-1.5">
<div className="flex flex-row gap-3"> <div className="flex flex-row gap-3">
<div className="text-sm font-medium">{displayTime}</div> <div className="text-sm font-medium">{displayTime}</div>
<div className="relative flex items-center gap-2 text-white"> <div className="flex items-center gap-2">
{iconLabels.slice(0, 5).map((lbl, idx) => ( {iconLabels.slice(0, 5).map((lbl, idx) => (
<div <div
key={`${lbl}-${idx}`} key={`${lbl}-${idx}`}
@ -429,34 +423,30 @@ function EventList({
}: EventListProps) { }: EventListProps) {
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
const { selectedObjectIds, toggleObjectSelection } = useDetailStream(); const { selectedObjectId, setSelectedObjectId } = useDetailStream();
const isSelected = selectedObjectIds.includes(event.id);
const label = event.sub_label || getTranslatedLabel(event.label);
const handleObjectSelect = (event: Event | undefined) => { const handleObjectSelect = (event: Event | undefined) => {
if (event) { if (event) {
// onSeek(event.start_time ?? 0); onSeek(event.start_time ?? 0);
toggleObjectSelection(event.id); setSelectedObjectId(event.id);
} else { } else {
toggleObjectSelection(undefined); setSelectedObjectId(undefined);
} }
}; };
// Clear selection when effectiveTime has passed this event's end_time // Clear selectedObjectId when effectiveTime has passed this event's end_time
useEffect(() => { useEffect(() => {
if (isSelected && effectiveTime && event.end_time) { if (selectedObjectId === event.id && effectiveTime && event.end_time) {
if (effectiveTime >= event.end_time) { if (effectiveTime >= event.end_time) {
toggleObjectSelection(event.id); setSelectedObjectId(undefined);
} }
} }
}, [ }, [
isSelected, selectedObjectId,
event.id, event.id,
event.end_time, event.end_time,
effectiveTime, effectiveTime,
toggleObjectSelection, setSelectedObjectId,
]); ]);
return ( return (
@ -464,59 +454,48 @@ function EventList({
<div <div
className={cn( className={cn(
"rounded-md bg-secondary p-2", "rounded-md bg-secondary p-2",
isSelected event.id == selectedObjectId
? "bg-secondary-highlight" ? "bg-secondary-highlight"
: "outline-transparent duration-500", : "outline-transparent duration-500",
!isSelected && event.id != selectedObjectId &&
(effectiveTime ?? 0) >= (event.start_time ?? 0) - 0.5 && (effectiveTime ?? 0) >= (event.start_time ?? 0) - 0.5 &&
(effectiveTime ?? 0) <= (effectiveTime ?? 0) <=
(event.end_time ?? event.start_time ?? 0) + 0.5 && (event.end_time ?? event.start_time ?? 0) + 0.5 &&
"bg-secondary-highlight", "bg-secondary-highlight",
)} )}
> >
<div className="ml-1.5 flex w-full items-end justify-between"> <div className="ml-1.5 flex w-full items-center justify-between">
<div className="flex flex-1 items-center gap-2 text-sm font-medium"> <div
className="flex items-center gap-2 text-sm font-medium"
onClick={(e) => {
e.stopPropagation();
handleObjectSelect(
event.id == selectedObjectId ? undefined : event,
);
}}
role="button"
>
<div <div
className={cn( className={cn(
"relative rounded-full p-1 text-white", "rounded-full p-1",
isSelected ? "bg-selected" : "bg-muted-foreground", event.id == selectedObjectId
? "bg-selected"
: "bg-muted-foreground",
)} )}
onClick={(e) => {
e.stopPropagation();
handleObjectSelect(isSelected ? undefined : event);
}}
> >
{getIconForLabel( {getIconForLabel(event.label, "size-3 text-white")}
event.sub_label ? event.label + "-verified" : event.label,
"size-3 text-white",
)}
</div> </div>
<div <div className="flex items-end gap-2">
className="flex flex-1 items-center gap-2" <span>{getTranslatedLabel(event.label)}</span>
onClick={(e) => {
e.stopPropagation();
onSeek(event.start_time ?? 0);
}}
role="button"
>
<span className="capitalize">{label}</span>
{event.data?.recognized_license_plate && (
<>
·{" "}
<span className="text-sm text-secondary-foreground">
{event.data.recognized_license_plate}
</span>
</>
)}
</div> </div>
</div> </div>
<div className="mr-2 flex flex-row justify-end"> <div className="mr-2 flex flex-1 flex-row justify-end">
<EventMenu <EventMenu
event={event} event={event}
config={config} config={config}
onOpenUpload={(e) => onOpenUpload?.(e)} onOpenUpload={(e) => onOpenUpload?.(e)}
isSelected={isSelected} selectedObjectId={selectedObjectId}
onToggleSelection={handleObjectSelect} setSelectedObjectId={handleObjectSelect}
/> />
</div> </div>
</div> </div>

View File

@ -12,15 +12,14 @@ import { useNavigate } from "react-router-dom";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { Event } from "@/types/event"; import { Event } from "@/types/event";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import { useState } from "react";
type EventMenuProps = { type EventMenuProps = {
event: Event; event: Event;
config?: FrigateConfig; config?: FrigateConfig;
onOpenUpload?: (e: Event) => void; onOpenUpload?: (e: Event) => void;
onOpenSimilarity?: (e: Event) => void; onOpenSimilarity?: (e: Event) => void;
isSelected?: boolean; selectedObjectId?: string;
onToggleSelection?: (event: Event | undefined) => void; setSelectedObjectId?: (event: Event | undefined) => void;
}; };
export default function EventMenu({ export default function EventMenu({
@ -28,26 +27,25 @@ export default function EventMenu({
config, config,
onOpenUpload, onOpenUpload,
onOpenSimilarity, onOpenSimilarity,
isSelected = false, selectedObjectId,
onToggleSelection, setSelectedObjectId,
}: EventMenuProps) { }: EventMenuProps) {
const apiHost = useApiHost(); const apiHost = useApiHost();
const navigate = useNavigate(); const navigate = useNavigate();
const { t } = useTranslation("views/explore"); const { t } = useTranslation("views/explore");
const [isOpen, setIsOpen] = useState(false);
const handleObjectSelect = () => { const handleObjectSelect = () => {
if (isSelected) { if (event.id === selectedObjectId) {
onToggleSelection?.(undefined); setSelectedObjectId?.(undefined);
} else { } else {
onToggleSelection?.(event); setSelectedObjectId?.(event);
} }
}; };
return ( return (
<> <>
<span tabIndex={0} className="sr-only" /> <span tabIndex={0} className="sr-only" />
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}> <DropdownMenu>
<DropdownMenuTrigger> <DropdownMenuTrigger>
<div className="rounded p-1 pr-2" role="button"> <div className="rounded p-1 pr-2" role="button">
<HiDotsHorizontal className="size-4 text-muted-foreground" /> <HiDotsHorizontal className="size-4 text-muted-foreground" />
@ -56,7 +54,7 @@ export default function EventMenu({
<DropdownMenuPortal> <DropdownMenuPortal>
<DropdownMenuContent> <DropdownMenuContent>
<DropdownMenuItem onSelect={handleObjectSelect}> <DropdownMenuItem onSelect={handleObjectSelect}>
{isSelected {event.id === selectedObjectId
? t("itemMenu.hideObjectDetails.label") ? t("itemMenu.hideObjectDetails.label")
: t("itemMenu.showObjectDetails.label")} : t("itemMenu.showObjectDetails.label")}
</DropdownMenuItem> </DropdownMenuItem>
@ -87,7 +85,6 @@ export default function EventMenu({
config?.plus?.enabled && ( config?.plus?.enabled && (
<DropdownMenuItem <DropdownMenuItem
onSelect={() => { onSelect={() => {
setIsOpen(false);
onOpenUpload?.(event); onOpenUpload?.(event);
}} }}
> >

View File

@ -1,14 +1,16 @@
import React, { createContext, useContext, useState, useEffect } from "react"; import React, { createContext, useContext, useState, useEffect } from "react";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import useSWR from "swr"; import useSWR from "swr";
import { ObjectLifecycleSequence } from "@/types/timeline";
export interface DetailStreamContextType { export interface DetailStreamContextType {
selectedObjectIds: string[]; selectedObjectId: string | undefined;
selectedObjectTimeline?: ObjectLifecycleSequence[];
currentTime: number; currentTime: number;
camera: string; camera: string;
annotationOffset: number; // milliseconds annotationOffset: number; // milliseconds
setAnnotationOffset: (ms: number) => void; setAnnotationOffset: (ms: number) => void;
toggleObjectSelection: (id: string | undefined) => void; setSelectedObjectId: (id: string | undefined) => void;
isDetailMode: boolean; isDetailMode: boolean;
} }
@ -29,21 +31,13 @@ export function DetailStreamProvider({
currentTime, currentTime,
camera, camera,
}: DetailStreamProviderProps) { }: DetailStreamProviderProps) {
const [selectedObjectIds, setSelectedObjectIds] = useState<string[]>([]); const [selectedObjectId, setSelectedObjectId] = useState<
string | undefined
>();
const toggleObjectSelection = (id: string | undefined) => { const { data: selectedObjectTimeline } = useSWR<ObjectLifecycleSequence[]>(
if (id === undefined) { selectedObjectId ? ["timeline", { source_id: selectedObjectId }] : null,
setSelectedObjectIds([]); );
} else {
setSelectedObjectIds((prev) => {
if (prev.includes(id)) {
return prev.filter((existingId) => existingId !== id);
} else {
return [...prev, id];
}
});
}
};
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
@ -59,12 +53,13 @@ export function DetailStreamProvider({
}, [config, camera]); }, [config, camera]);
const value: DetailStreamContextType = { const value: DetailStreamContextType = {
selectedObjectIds, selectedObjectId,
selectedObjectTimeline,
currentTime, currentTime,
camera, camera,
annotationOffset, annotationOffset,
setAnnotationOffset, setAnnotationOffset,
toggleObjectSelection, setSelectedObjectId,
isDetailMode, isDetailMode,
}; };

View File

@ -22,7 +22,6 @@ export interface Event {
area: number; area: number;
ratio: number; ratio: number;
type: "object" | "audio" | "manual"; type: "object" | "audio" | "manual";
recognized_license_plate?: string;
path_data: [number[], number][]; path_data: [number[], number][];
}; };
} }

View File

@ -1,7 +1,6 @@
import { ObjectLifecycleSequence } from "@/types/timeline"; import { ObjectLifecycleSequence } from "@/types/timeline";
import { t } from "i18next"; import { t } from "i18next";
import { getTranslatedLabel } from "./i18n"; import { getTranslatedLabel } from "./i18n";
import { capitalizeFirstLetter } from "./stringUtil";
export function getLifecycleItemDescription( export function getLifecycleItemDescription(
lifecycleItem: ObjectLifecycleSequence, lifecycleItem: ObjectLifecycleSequence,
@ -11,7 +10,7 @@ export function getLifecycleItemDescription(
: lifecycleItem.data.sub_label || lifecycleItem.data.label; : lifecycleItem.data.sub_label || lifecycleItem.data.label;
const label = lifecycleItem.data.sub_label const label = lifecycleItem.data.sub_label
? capitalizeFirstLetter(rawLabel) ? rawLabel
: getTranslatedLabel(rawLabel); : getTranslatedLabel(rawLabel);
switch (lifecycleItem.class_type) { switch (lifecycleItem.class_type) {

View File

@ -11,7 +11,6 @@ import DetailStream from "@/components/timeline/DetailStream";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group"; import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
import { useOverlayState } from "@/hooks/use-overlay-state"; import { useOverlayState } from "@/hooks/use-overlay-state";
import { useResizeObserver } from "@/hooks/resize-observer";
import { ExportMode } from "@/types/filter"; import { ExportMode } from "@/types/filter";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import { Preview } from "@/types/preview"; import { Preview } from "@/types/preview";
@ -32,7 +31,12 @@ import {
useRef, useRef,
useState, useState,
} from "react"; } from "react";
import { isDesktop, isMobile } from "react-device-detect"; import {
isDesktop,
isMobile,
isMobileOnly,
isTablet,
} from "react-device-detect";
import { IoMdArrowRoundBack } from "react-icons/io"; import { IoMdArrowRoundBack } from "react-icons/io";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { Toaster } from "@/components/ui/sonner"; import { Toaster } from "@/components/ui/sonner";
@ -51,6 +55,7 @@ import {
RecordingSegment, RecordingSegment,
RecordingStartingPoint, RecordingStartingPoint,
} from "@/types/record"; } from "@/types/record";
import { useResizeObserver } from "@/hooks/resize-observer";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useFullscreen } from "@/hooks/use-fullscreen"; import { useFullscreen } from "@/hooks/use-fullscreen";
import { useTimezone } from "@/hooks/use-date-utils"; import { useTimezone } from "@/hooks/use-date-utils";
@ -394,47 +399,49 @@ export function RecordingView({
} }
}, [mainCameraAspect]); }, [mainCameraAspect]);
// use a resize observer to determine whether to use w-full or h-full based on container aspect ratio const [{ width: mainWidth, height: mainHeight }] =
const [{ width: containerWidth, height: containerHeight }] =
useResizeObserver(cameraLayoutRef); useResizeObserver(cameraLayoutRef);
const [{ width: previewRowWidth, height: previewRowHeight }] =
useResizeObserver(previewRowRef);
const useHeightBased = useMemo(() => { const mainCameraStyle = useMemo(() => {
if (!containerWidth || !containerHeight) { if (isMobile || mainCameraAspect != "normal" || !config) {
return false; return undefined;
} }
const cameraAspectRatio = getCameraAspect(mainCamera); const camera = config.cameras[mainCamera];
if (!cameraAspectRatio) {
return false; if (!camera) {
return undefined;
} }
// Calculate available space for camera after accounting for preview row const aspect = getCameraAspect(mainCamera);
// For tall cameras: preview row is side-by-side (takes width)
// For wide/normal cameras: preview row is stacked (takes height)
const availableWidth =
mainCameraAspect == "tall" && previewRowWidth
? containerWidth - previewRowWidth
: containerWidth;
const availableHeight =
mainCameraAspect != "tall" && previewRowHeight
? containerHeight - previewRowHeight
: containerHeight;
const availableAspectRatio = availableWidth / availableHeight; if (!aspect) {
return undefined;
}
// If available space is wider than camera aspect, constrain by height (h-full) const availableHeight = mainHeight - 112;
// If available space is taller than camera aspect, constrain by width (w-full)
return availableAspectRatio >= cameraAspectRatio; let percent;
if (mainWidth / availableHeight < aspect) {
percent = 100;
} else {
const availableWidth = aspect * availableHeight;
percent =
(mainWidth < availableWidth
? mainWidth / availableWidth
: availableWidth / mainWidth) * 100;
}
return {
width: `${Math.round(percent)}%`,
};
}, [ }, [
containerWidth, config,
containerHeight,
previewRowWidth,
previewRowHeight,
getCameraAspect,
mainCamera,
mainCameraAspect, mainCameraAspect,
mainWidth,
mainHeight,
mainCamera,
getCameraAspect,
]); ]);
const previewRowOverflows = useMemo(() => { const previewRowOverflows = useMemo(() => {
@ -678,17 +685,19 @@ export function RecordingView({
<div <div
ref={mainLayoutRef} ref={mainLayoutRef}
className={cn( className={cn(
"flex flex-1 overflow-hidden", "flex h-full justify-center overflow-hidden",
isDesktop ? "flex-row" : "flex-col gap-2 landscape:flex-row", isDesktop ? "" : "flex-col gap-2 landscape:flex-row",
)} )}
> >
<div <div
ref={cameraLayoutRef} ref={cameraLayoutRef}
className={cn( className={cn(
"flex flex-1 flex-wrap overflow-hidden", "flex flex-1 flex-wrap",
isDesktop isDesktop
? "min-w-0 px-4" ? timelineType === "detail"
: "portrait:max-h-[50dvh] portrait:flex-shrink-0 portrait:flex-grow-0 portrait:basis-auto", ? "md:w-[40%] lg:w-[70%] xl:w-full"
: "w-[80%]"
: "",
)} )}
> >
<div <div
@ -702,25 +711,37 @@ export function RecordingView({
<div <div
key={mainCamera} key={mainCamera}
className={cn( className={cn(
"relative flex max-h-full min-h-0 min-w-0 max-w-full items-center justify-center", "relative",
isDesktop isDesktop
? // Desktop: dynamically switch between w-full and h-full based on ? cn(
// container vs camera aspect ratio to ensure proper fitting "flex justify-center px-4",
useHeightBased mainCameraAspect == "tall"
? "h-full" ? "h-[50%] md:h-[60%] lg:h-[75%] xl:h-[90%]"
: "w-full" : mainCameraAspect == "wide"
? "w-full"
: "",
)
: cn( : cn(
"flex-shrink-0 pt-2", "pt-2 portrait:w-full",
mainCameraAspect == "wide" isMobileOnly &&
? "aspect-wide" (mainCameraAspect == "wide"
: mainCameraAspect == "tall" ? "aspect-wide landscape:w-full"
? "aspect-tall" : "aspect-video landscape:h-[94%] landscape:xl:h-[65%]"),
: "aspect-video", isTablet &&
"portrait:w-full landscape:h-full", (mainCameraAspect == "wide"
? "aspect-wide landscape:w-full"
: mainCameraAspect == "normal"
? "landscape:w-full"
: "aspect-video landscape:h-[100%]"),
), ),
)} )}
style={{ style={{
aspectRatio: getCameraAspect(mainCamera), width: mainCameraStyle ? mainCameraStyle.width : undefined,
aspectRatio: isDesktop
? mainCameraAspect == "tall"
? getCameraAspect(mainCamera)
: undefined
: Math.max(1, getCameraAspect(mainCamera) ?? 0),
}} }}
> >
{isDesktop && ( {isDesktop && (
@ -761,10 +782,10 @@ export function RecordingView({
<div <div
ref={previewRowRef} ref={previewRowRef}
className={cn( className={cn(
"scrollbar-container flex flex-shrink-0 gap-2 overflow-auto", "scrollbar-container flex gap-2 overflow-auto",
mainCameraAspect == "tall" mainCameraAspect == "tall"
? "ml-2 h-full w-72 min-w-72 flex-col" ? "h-full w-72 flex-col"
: "h-28 min-h-28 w-full", : `h-28 w-full`,
previewRowOverflows ? "" : "items-center justify-center", previewRowOverflows ? "" : "items-center justify-center",
timelineType == "detail" && isDesktop && "mt-4", timelineType == "detail" && isDesktop && "mt-4",
)} )}
@ -950,23 +971,10 @@ function Timeline({
return ( return (
<div <div
className={cn( className={cn(
"relative overflow-hidden", "relative",
isDesktop isDesktop
? cn( ? `${timelineType == "timeline" ? "w-[100px]" : timelineType == "detail" ? "w-[30%] min-w-[350px]" : "w-60"} no-scrollbar overflow-y-auto`
"no-scrollbar overflow-y-auto", : `overflow-hidden portrait:flex-grow ${timelineType == "timeline" ? "landscape:w-[100px]" : timelineType == "detail" && isDesktop ? "flex-1" : "landscape:w-[300px]"} `,
timelineType == "timeline"
? "w-[100px] flex-shrink-0"
: timelineType == "detail"
? "min-w-[20rem] max-w-[30%] flex-shrink-0 flex-grow-0 basis-[30rem] md:min-w-[20rem] md:max-w-[25%] lg:min-w-[30rem] lg:max-w-[33%]"
: "w-60 flex-shrink-0",
)
: cn(
timelineType == "timeline"
? "portrait:flex-grow landscape:w-[100px] landscape:flex-shrink-0"
: timelineType == "detail"
? "portrait:flex-grow landscape:w-[19rem] landscape:flex-shrink-0"
: "portrait:flex-grow landscape:w-[19rem] landscape:flex-shrink-0",
),
)} )}
> >
{isMobile && ( {isMobile && (