mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-17 02:26:43 +03:00
Compare commits
3 Commits
32875fb4cc
...
1fb21a4dac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1fb21a4dac | ||
|
|
63042b9c08 | ||
|
|
0a6b9f98ed |
@ -1,2 +1 @@
|
|||||||
scikit-build == 0.18.*
|
scikit-build == 0.18.*
|
||||||
nvidia-pyindex
|
|
||||||
|
|||||||
@ -696,7 +696,11 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
|
|||||||
clauses.append((Timeline.camera == camera))
|
clauses.append((Timeline.camera == camera))
|
||||||
|
|
||||||
if source_id:
|
if source_id:
|
||||||
clauses.append((Timeline.source_id == source_id))
|
source_ids = [sid.strip() for sid in source_id.split(",")]
|
||||||
|
if len(source_ids) == 1:
|
||||||
|
clauses.append((Timeline.source_id == source_ids[0]))
|
||||||
|
else:
|
||||||
|
clauses.append((Timeline.source_id.in_(source_ids)))
|
||||||
|
|
||||||
if len(clauses) == 0:
|
if len(clauses) == 0:
|
||||||
clauses.append((True))
|
clauses.append((True))
|
||||||
|
|||||||
@ -53,6 +53,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
self.tensor_output_details: dict[str, Any] | None = None
|
self.tensor_output_details: dict[str, Any] | None = None
|
||||||
self.labelmap: dict[int, str] = {}
|
self.labelmap: dict[int, str] = {}
|
||||||
self.classifications_per_second = EventsPerSecond()
|
self.classifications_per_second = EventsPerSecond()
|
||||||
|
self.state_history: dict[str, dict[str, Any]] = {}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
self.metrics
|
self.metrics
|
||||||
@ -94,6 +95,42 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
if self.inference_speed:
|
if self.inference_speed:
|
||||||
self.inference_speed.update(duration)
|
self.inference_speed.update(duration)
|
||||||
|
|
||||||
|
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
|
||||||
|
"""
|
||||||
|
Verify state change requires 3 consecutive identical states before publishing.
|
||||||
|
Returns state to publish or None if verification not complete.
|
||||||
|
"""
|
||||||
|
if camera not in self.state_history:
|
||||||
|
self.state_history[camera] = {
|
||||||
|
"current_state": None,
|
||||||
|
"pending_state": None,
|
||||||
|
"consecutive_count": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
verification = self.state_history[camera]
|
||||||
|
|
||||||
|
if detected_state == verification["current_state"]:
|
||||||
|
verification["pending_state"] = None
|
||||||
|
verification["consecutive_count"] = 0
|
||||||
|
return None
|
||||||
|
|
||||||
|
if detected_state == verification["pending_state"]:
|
||||||
|
verification["consecutive_count"] += 1
|
||||||
|
|
||||||
|
if verification["consecutive_count"] >= 3:
|
||||||
|
verification["current_state"] = detected_state
|
||||||
|
verification["pending_state"] = None
|
||||||
|
verification["consecutive_count"] = 0
|
||||||
|
return detected_state
|
||||||
|
else:
|
||||||
|
verification["pending_state"] = detected_state
|
||||||
|
verification["consecutive_count"] = 1
|
||||||
|
logger.debug(
|
||||||
|
f"New state '{detected_state}' detected for {camera}, need {3 - verification['consecutive_count']} more consecutive detections"
|
||||||
|
)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
|
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
|
||||||
if self.metrics and self.model_config.name in self.metrics.classification_cps:
|
if self.metrics and self.model_config.name in self.metrics.classification_cps:
|
||||||
self.metrics.classification_cps[
|
self.metrics.classification_cps[
|
||||||
@ -131,6 +168,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
self.last_run = now
|
self.last_run = now
|
||||||
should_run = True
|
should_run = True
|
||||||
|
|
||||||
|
# Shortcut: always run if we have a pending state verification to complete
|
||||||
|
if (
|
||||||
|
not should_run
|
||||||
|
and camera in self.state_history
|
||||||
|
and self.state_history[camera]["pending_state"] is not None
|
||||||
|
and now > self.last_run + 0.5
|
||||||
|
):
|
||||||
|
self.last_run = now
|
||||||
|
should_run = True
|
||||||
|
logger.debug(
|
||||||
|
f"Running verification check for pending state: {self.state_history[camera]['pending_state']} ({self.state_history[camera]['consecutive_count']}/3)"
|
||||||
|
)
|
||||||
|
|
||||||
if not should_run:
|
if not should_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -188,10 +238,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
score,
|
score,
|
||||||
)
|
)
|
||||||
|
|
||||||
if score >= self.model_config.threshold:
|
if score < self.model_config.threshold:
|
||||||
|
logger.debug(
|
||||||
|
f"Score {score} below threshold {self.model_config.threshold}, skipping verification"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
detected_state = self.labelmap[best_id]
|
||||||
|
verified_state = self.verify_state_change(camera, detected_state)
|
||||||
|
|
||||||
|
if verified_state is not None:
|
||||||
self.requestor.send_data(
|
self.requestor.send_data(
|
||||||
f"{camera}/classification/{self.model_config.name}",
|
f"{camera}/classification/{self.model_config.name}",
|
||||||
self.labelmap[best_id],
|
verified_state,
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_request(self, topic, request_data):
|
def handle_request(self, topic, request_data):
|
||||||
@ -230,7 +289,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
self.sub_label_publisher = sub_label_publisher
|
self.sub_label_publisher = sub_label_publisher
|
||||||
self.tensor_input_details: dict[str, Any] | None = None
|
self.tensor_input_details: dict[str, Any] | None = None
|
||||||
self.tensor_output_details: dict[str, Any] | None = None
|
self.tensor_output_details: dict[str, Any] | None = None
|
||||||
self.detected_objects: dict[str, float] = {}
|
self.classification_history: dict[str, list[tuple[str, float, float]]] = {}
|
||||||
self.labelmap: dict[int, str] = {}
|
self.labelmap: dict[int, str] = {}
|
||||||
self.classifications_per_second = EventsPerSecond()
|
self.classifications_per_second = EventsPerSecond()
|
||||||
|
|
||||||
@ -272,6 +331,56 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
if self.inference_speed:
|
if self.inference_speed:
|
||||||
self.inference_speed.update(duration)
|
self.inference_speed.update(duration)
|
||||||
|
|
||||||
|
def get_weighted_score(
|
||||||
|
self,
|
||||||
|
object_id: str,
|
||||||
|
current_label: str,
|
||||||
|
current_score: float,
|
||||||
|
current_time: float,
|
||||||
|
) -> tuple[str | None, float]:
|
||||||
|
"""
|
||||||
|
Determine weighted score based on history to prevent false positives/negatives.
|
||||||
|
Requires 60% of attempts to agree on a label before publishing.
|
||||||
|
Returns (weighted_label, weighted_score) or (None, 0.0) if no weighted score.
|
||||||
|
"""
|
||||||
|
if object_id not in self.classification_history:
|
||||||
|
self.classification_history[object_id] = []
|
||||||
|
|
||||||
|
self.classification_history[object_id].append(
|
||||||
|
(current_label, current_score, current_time)
|
||||||
|
)
|
||||||
|
|
||||||
|
history = self.classification_history[object_id]
|
||||||
|
|
||||||
|
if len(history) < 3:
|
||||||
|
return None, 0.0
|
||||||
|
|
||||||
|
label_counts = {}
|
||||||
|
label_scores = {}
|
||||||
|
total_attempts = len(history)
|
||||||
|
|
||||||
|
for label, score, timestamp in history:
|
||||||
|
if label not in label_counts:
|
||||||
|
label_counts[label] = 0
|
||||||
|
label_scores[label] = []
|
||||||
|
|
||||||
|
label_counts[label] += 1
|
||||||
|
label_scores[label].append(score)
|
||||||
|
|
||||||
|
best_label = max(label_counts, key=label_counts.get)
|
||||||
|
best_count = label_counts[best_label]
|
||||||
|
|
||||||
|
consensus_threshold = total_attempts * 0.6
|
||||||
|
if best_count < consensus_threshold:
|
||||||
|
return None, 0.0
|
||||||
|
|
||||||
|
avg_score = sum(label_scores[best_label]) / len(label_scores[best_label])
|
||||||
|
|
||||||
|
if best_label == "none":
|
||||||
|
return None, 0.0
|
||||||
|
|
||||||
|
return best_label, avg_score
|
||||||
|
|
||||||
def process_frame(self, obj_data, frame):
|
def process_frame(self, obj_data, frame):
|
||||||
if self.metrics and self.model_config.name in self.metrics.classification_cps:
|
if self.metrics and self.model_config.name in self.metrics.classification_cps:
|
||||||
self.metrics.classification_cps[
|
self.metrics.classification_cps[
|
||||||
@ -284,6 +393,9 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
if obj_data["label"] not in self.model_config.object_config.objects:
|
if obj_data["label"] not in self.model_config.object_config.objects:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if obj_data.get("end_time") is not None:
|
||||||
|
return
|
||||||
|
|
||||||
now = datetime.datetime.now().timestamp()
|
now = datetime.datetime.now().timestamp()
|
||||||
x, y, x2, y2 = calculate_region(
|
x, y, x2, y2 = calculate_region(
|
||||||
frame.shape,
|
frame.shape,
|
||||||
@ -331,7 +443,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
probs = res / res.sum(axis=0)
|
probs = res / res.sum(axis=0)
|
||||||
best_id = np.argmax(probs)
|
best_id = np.argmax(probs)
|
||||||
score = round(probs[best_id], 2)
|
score = round(probs[best_id], 2)
|
||||||
previous_score = self.detected_objects.get(obj_data["id"], 0.0)
|
|
||||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||||
|
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
@ -347,30 +458,34 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
logger.debug(f"Score {score} is less than threshold.")
|
logger.debug(f"Score {score} is less than threshold.")
|
||||||
return
|
return
|
||||||
|
|
||||||
if score <= previous_score:
|
|
||||||
logger.debug(f"Score {score} is worse than previous score {previous_score}")
|
|
||||||
return
|
|
||||||
|
|
||||||
sub_label = self.labelmap[best_id]
|
sub_label = self.labelmap[best_id]
|
||||||
self.detected_objects[obj_data["id"]] = score
|
|
||||||
|
|
||||||
if (
|
consensus_label, consensus_score = self.get_weighted_score(
|
||||||
self.model_config.object_config.classification_type
|
obj_data["id"], sub_label, score, now
|
||||||
== ObjectClassificationType.sub_label
|
)
|
||||||
):
|
|
||||||
if sub_label != "none":
|
if consensus_label is not None:
|
||||||
|
if (
|
||||||
|
self.model_config.object_config.classification_type
|
||||||
|
== ObjectClassificationType.sub_label
|
||||||
|
):
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
(obj_data["id"], sub_label, score),
|
(obj_data["id"], consensus_label, consensus_score),
|
||||||
EventMetadataTypeEnum.sub_label,
|
EventMetadataTypeEnum.sub_label,
|
||||||
)
|
)
|
||||||
elif (
|
elif (
|
||||||
self.model_config.object_config.classification_type
|
self.model_config.object_config.classification_type
|
||||||
== ObjectClassificationType.attribute
|
== ObjectClassificationType.attribute
|
||||||
):
|
):
|
||||||
self.sub_label_publisher.publish(
|
self.sub_label_publisher.publish(
|
||||||
(obj_data["id"], self.model_config.name, sub_label, score),
|
(
|
||||||
EventMetadataTypeEnum.attribute.value,
|
obj_data["id"],
|
||||||
)
|
self.model_config.name,
|
||||||
|
consensus_label,
|
||||||
|
consensus_score,
|
||||||
|
),
|
||||||
|
EventMetadataTypeEnum.attribute.value,
|
||||||
|
)
|
||||||
|
|
||||||
def handle_request(self, topic, request_data):
|
def handle_request(self, topic, request_data):
|
||||||
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
||||||
@ -388,8 +503,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def expire_object(self, object_id, camera):
|
def expire_object(self, object_id, camera):
|
||||||
if object_id in self.detected_objects:
|
if object_id in self.classification_history:
|
||||||
self.detected_objects.pop(object_id)
|
self.classification_history.pop(object_id)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
@ -63,18 +63,24 @@ class GenAIClient:
|
|||||||
else:
|
else:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def get_verified_objects() -> str:
|
def get_verified_object_prompt() -> str:
|
||||||
if review_data["recognized_objects"]:
|
if review_data["recognized_objects"]:
|
||||||
return " - " + "\n - ".join(review_data["recognized_objects"])
|
object_list = " - " + "\n - ".join(review_data["recognized_objects"])
|
||||||
|
return f"""## Verified Objects (USE THESE NAMES)
|
||||||
|
When any of the following verified objects are present in the scene, you MUST use these exact names in your title and scene description:
|
||||||
|
{object_list}
|
||||||
|
"""
|
||||||
else:
|
else:
|
||||||
return " None"
|
return ""
|
||||||
|
|
||||||
context_prompt = f"""
|
context_prompt = f"""
|
||||||
Please analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
||||||
|
|
||||||
**Normal activity patterns for this property:**
|
## Normal Activity Patterns for This Property
|
||||||
{activity_context_prompt}
|
{activity_context_prompt}
|
||||||
|
|
||||||
|
## Task Instructions
|
||||||
|
|
||||||
Your task is to provide a clear, accurate description of the scene that:
|
Your task is to provide a clear, accurate description of the scene that:
|
||||||
1. States exactly what is happening based on observable actions and movements.
|
1. States exactly what is happening based on observable actions and movements.
|
||||||
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
|
2. Evaluates whether the observable evidence suggests normal activity for this property or genuine security concerns.
|
||||||
@ -82,6 +88,8 @@ Your task is to provide a clear, accurate description of the scene that:
|
|||||||
|
|
||||||
**IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.**
|
**IMPORTANT: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider higher threat levels if the activity clearly deviates from normal patterns or shows genuine security concerns.**
|
||||||
|
|
||||||
|
## Analysis Guidelines
|
||||||
|
|
||||||
When forming your description:
|
When forming your description:
|
||||||
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
|
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
|
||||||
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
|
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
|
||||||
@ -92,6 +100,8 @@ When forming your description:
|
|||||||
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
|
- Identify patterns that suggest genuine security concerns: testing doors/windows on vehicles or buildings, accessing unauthorized areas, attempting to conceal actions, extended loitering without apparent purpose, taking items, behavior that clearly doesn't align with the zone context and detected objects.
|
||||||
- **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention.
|
- **Weigh all evidence holistically**: Start by checking if the activity matches the normal patterns above. If it does, assign Level 0. Only consider Level 1 if the activity clearly deviates from normal patterns or shows genuine security concerns that warrant attention.
|
||||||
|
|
||||||
|
## Response Format
|
||||||
|
|
||||||
Your response MUST be a flat JSON object with:
|
Your response MUST be a flat JSON object with:
|
||||||
- `title` (string): A concise, one-sentence title that captures the main activity. Include any verified recognized objects (from the "Verified recognized objects" list below) and key detected objects. Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night".
|
- `title` (string): A concise, one-sentence title that captures the main activity. Include any verified recognized objects (from the "Verified recognized objects" list below) and key detected objects. Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night".
|
||||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||||
@ -99,20 +109,22 @@ Your response MUST be a flat JSON object with:
|
|||||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
|
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
|
||||||
{get_concern_prompt()}
|
{get_concern_prompt()}
|
||||||
|
|
||||||
Threat-level definitions:
|
## Threat Level Definitions
|
||||||
|
|
||||||
- 0 — **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidence—considering zone context, detected objects, and timing together—supports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
|
- 0 — **Normal activity (DEFAULT)**: What you observe matches the normal activity patterns above or is consistent with expected activity for this property type. The observable evidence—considering zone context, detected objects, and timing together—supports a benign explanation. **Use this level for routine activities even if minor ambiguous elements exist.**
|
||||||
- 1 — **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.**
|
- 1 — **Potentially suspicious**: Observable behavior raises genuine security concerns that warrant human review. The evidence doesn't support a routine explanation and clearly deviates from the normal patterns above. Examples: testing doors/windows on vehicles or structures, accessing areas that don't align with the activity, taking items that likely don't belong to them, behavior clearly inconsistent with the zone and context, or activity that lacks any visible legitimate indicators. **Only use this level when the activity clearly doesn't match normal patterns.**
|
||||||
- 2 — **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
|
- 2 — **Immediate threat**: Clear evidence of forced entry, break-in, vandalism, aggression, weapons, theft in progress, or active property damage.
|
||||||
|
|
||||||
Sequence details:
|
## Sequence Details
|
||||||
|
|
||||||
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
||||||
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
|
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
|
||||||
- Detected objects: {", ".join(review_data["objects"])}
|
- Detected objects: {", ".join(review_data["objects"])}
|
||||||
- Verified recognized objects (use these names when describing these objects):
|
|
||||||
{get_verified_objects()}
|
|
||||||
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"}
|
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"}
|
||||||
|
|
||||||
**IMPORTANT:**
|
{get_verified_object_prompt()}
|
||||||
|
|
||||||
|
## Important Notes
|
||||||
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
|
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
|
||||||
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
|
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
|
||||||
{get_language_prompt()}
|
{get_language_prompt()}
|
||||||
|
|||||||
@ -11,38 +11,80 @@ import {
|
|||||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
|
import { Event } from "@/types/event";
|
||||||
|
|
||||||
type ObjectTrackOverlayProps = {
|
type ObjectTrackOverlayProps = {
|
||||||
camera: string;
|
camera: string;
|
||||||
selectedObjectId: string;
|
|
||||||
showBoundingBoxes?: boolean;
|
showBoundingBoxes?: boolean;
|
||||||
currentTime: number;
|
currentTime: number;
|
||||||
videoWidth: number;
|
videoWidth: number;
|
||||||
videoHeight: number;
|
videoHeight: number;
|
||||||
className?: string;
|
className?: string;
|
||||||
onSeekToTime?: (timestamp: number, play?: boolean) => void;
|
onSeekToTime?: (timestamp: number, play?: boolean) => void;
|
||||||
objectTimeline?: ObjectLifecycleSequence[];
|
};
|
||||||
|
|
||||||
|
type PathPoint = {
|
||||||
|
x: number;
|
||||||
|
y: number;
|
||||||
|
timestamp: number;
|
||||||
|
lifecycle_item?: ObjectLifecycleSequence;
|
||||||
|
objectId: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type ObjectData = {
|
||||||
|
objectId: string;
|
||||||
|
label: string;
|
||||||
|
color: string;
|
||||||
|
pathPoints: PathPoint[];
|
||||||
|
currentZones: string[];
|
||||||
|
currentBox?: number[];
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function ObjectTrackOverlay({
|
export default function ObjectTrackOverlay({
|
||||||
camera,
|
camera,
|
||||||
selectedObjectId,
|
|
||||||
showBoundingBoxes = false,
|
showBoundingBoxes = false,
|
||||||
currentTime,
|
currentTime,
|
||||||
videoWidth,
|
videoWidth,
|
||||||
videoHeight,
|
videoHeight,
|
||||||
className,
|
className,
|
||||||
onSeekToTime,
|
onSeekToTime,
|
||||||
objectTimeline,
|
|
||||||
}: ObjectTrackOverlayProps) {
|
}: ObjectTrackOverlayProps) {
|
||||||
const { t } = useTranslation("views/events");
|
const { t } = useTranslation("views/events");
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
const { annotationOffset } = useDetailStream();
|
const { annotationOffset, selectedObjectIds } = useDetailStream();
|
||||||
|
|
||||||
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
||||||
|
|
||||||
// Fetch the full event data to get saved path points
|
// Fetch all event data in a single request (CSV ids)
|
||||||
const { data: eventData } = useSWR(["event_ids", { ids: selectedObjectId }]);
|
const { data: eventsData } = useSWR<Event[]>(
|
||||||
|
selectedObjectIds.length > 0
|
||||||
|
? ["event_ids", { ids: selectedObjectIds.join(",") }]
|
||||||
|
: null,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Fetch timeline data for each object ID using fixed number of hooks
|
||||||
|
const { data: timelineData } = useSWR<ObjectLifecycleSequence[]>(
|
||||||
|
selectedObjectIds.length > 0
|
||||||
|
? `timeline?source_id=${selectedObjectIds.join(",")}&limit=1000`
|
||||||
|
: null,
|
||||||
|
{ revalidateOnFocus: false },
|
||||||
|
);
|
||||||
|
|
||||||
|
const timelineResults = useMemo(() => {
|
||||||
|
// Group timeline entries by source_id
|
||||||
|
if (!timelineData) return selectedObjectIds.map(() => []);
|
||||||
|
|
||||||
|
const grouped: Record<string, ObjectLifecycleSequence[]> = {};
|
||||||
|
for (const entry of timelineData) {
|
||||||
|
if (!grouped[entry.source_id]) {
|
||||||
|
grouped[entry.source_id] = [];
|
||||||
|
}
|
||||||
|
grouped[entry.source_id].push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return timeline arrays in the same order as selectedObjectIds
|
||||||
|
return selectedObjectIds.map((id) => grouped[id] || []);
|
||||||
|
}, [selectedObjectIds, timelineData]);
|
||||||
|
|
||||||
const typeColorMap = useMemo(
|
const typeColorMap = useMemo(
|
||||||
() => ({
|
() => ({
|
||||||
@ -58,16 +100,18 @@ export default function ObjectTrackOverlay({
|
|||||||
[],
|
[],
|
||||||
);
|
);
|
||||||
|
|
||||||
const getObjectColor = useMemo(() => {
|
const getObjectColor = useCallback(
|
||||||
return (label: string) => {
|
(label: string, objectId: string) => {
|
||||||
const objectColor = config?.model?.colormap[label];
|
const objectColor = config?.model?.colormap[label];
|
||||||
if (objectColor) {
|
if (objectColor) {
|
||||||
const reversed = [...objectColor].reverse();
|
const reversed = [...objectColor].reverse();
|
||||||
return `rgb(${reversed.join(",")})`;
|
return `rgb(${reversed.join(",")})`;
|
||||||
}
|
}
|
||||||
return "rgb(255, 0, 0)"; // fallback red
|
// Fallback to deterministic color based on object ID
|
||||||
};
|
return generateColorFromId(objectId);
|
||||||
}, [config]);
|
},
|
||||||
|
[config],
|
||||||
|
);
|
||||||
|
|
||||||
const getZoneColor = useCallback(
|
const getZoneColor = useCallback(
|
||||||
(zoneName: string) => {
|
(zoneName: string) => {
|
||||||
@ -81,125 +125,121 @@ export default function ObjectTrackOverlay({
|
|||||||
[config, camera],
|
[config, camera],
|
||||||
);
|
);
|
||||||
|
|
||||||
const currentObjectZones = useMemo(() => {
|
// Build per-object data structures
|
||||||
if (!objectTimeline) return [];
|
const objectsData = useMemo<ObjectData[]>(() => {
|
||||||
|
if (!eventsData || !Array.isArray(eventsData)) return [];
|
||||||
// Find the most recent timeline event at or before effective current time
|
if (config?.cameras[camera]?.onvif.autotracking.enabled_in_config)
|
||||||
const relevantEvents = objectTimeline
|
|
||||||
.filter((event) => event.timestamp <= effectiveCurrentTime)
|
|
||||||
.sort((a, b) => b.timestamp - a.timestamp); // Most recent first
|
|
||||||
|
|
||||||
// Get zones from the most recent event
|
|
||||||
return relevantEvents[0]?.data?.zones || [];
|
|
||||||
}, [objectTimeline, effectiveCurrentTime]);
|
|
||||||
|
|
||||||
const zones = useMemo(() => {
|
|
||||||
if (!config?.cameras?.[camera]?.zones || !currentObjectZones.length)
|
|
||||||
return [];
|
return [];
|
||||||
|
|
||||||
|
return selectedObjectIds
|
||||||
|
.map((objectId, index) => {
|
||||||
|
const eventData = eventsData.find((e) => e.id === objectId);
|
||||||
|
const timelineData = timelineResults[index];
|
||||||
|
|
||||||
|
// get saved path points from event
|
||||||
|
const savedPathPoints: PathPoint[] =
|
||||||
|
eventData?.data?.path_data?.map(
|
||||||
|
([coords, timestamp]: [number[], number]) => ({
|
||||||
|
x: coords[0],
|
||||||
|
y: coords[1],
|
||||||
|
timestamp,
|
||||||
|
lifecycle_item: undefined,
|
||||||
|
objectId,
|
||||||
|
}),
|
||||||
|
) || [];
|
||||||
|
|
||||||
|
// timeline points for this object
|
||||||
|
const eventSequencePoints: PathPoint[] =
|
||||||
|
timelineData
|
||||||
|
?.filter(
|
||||||
|
(event: ObjectLifecycleSequence) => event.data.box !== undefined,
|
||||||
|
)
|
||||||
|
.map((event: ObjectLifecycleSequence) => {
|
||||||
|
const [left, top, width, height] = event.data.box!;
|
||||||
|
return {
|
||||||
|
x: left + width / 2, // Center x
|
||||||
|
y: top + height, // Bottom y
|
||||||
|
timestamp: event.timestamp,
|
||||||
|
lifecycle_item: event,
|
||||||
|
objectId,
|
||||||
|
};
|
||||||
|
}) || [];
|
||||||
|
|
||||||
|
// show full path once current time has reached the object's start time
|
||||||
|
const combinedPoints = [...savedPathPoints, ...eventSequencePoints]
|
||||||
|
.sort((a, b) => a.timestamp - b.timestamp)
|
||||||
|
.filter(
|
||||||
|
(point) =>
|
||||||
|
currentTime >= (eventData?.start_time ?? 0) &&
|
||||||
|
point.timestamp >= (eventData?.start_time ?? 0) &&
|
||||||
|
point.timestamp <= (eventData?.end_time ?? Infinity),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get color for this object
|
||||||
|
const label = eventData?.label || "unknown";
|
||||||
|
const color = getObjectColor(label, objectId);
|
||||||
|
|
||||||
|
// Get current zones
|
||||||
|
const currentZones =
|
||||||
|
timelineData
|
||||||
|
?.filter(
|
||||||
|
(event: ObjectLifecycleSequence) =>
|
||||||
|
event.timestamp <= effectiveCurrentTime,
|
||||||
|
)
|
||||||
|
.sort(
|
||||||
|
(a: ObjectLifecycleSequence, b: ObjectLifecycleSequence) =>
|
||||||
|
b.timestamp - a.timestamp,
|
||||||
|
)[0]?.data?.zones || [];
|
||||||
|
|
||||||
|
// Get current bounding box
|
||||||
|
const currentBox = timelineData
|
||||||
|
?.filter(
|
||||||
|
(event: ObjectLifecycleSequence) =>
|
||||||
|
event.timestamp <= effectiveCurrentTime && event.data.box,
|
||||||
|
)
|
||||||
|
.sort(
|
||||||
|
(a: ObjectLifecycleSequence, b: ObjectLifecycleSequence) =>
|
||||||
|
b.timestamp - a.timestamp,
|
||||||
|
)[0]?.data?.box;
|
||||||
|
|
||||||
|
return {
|
||||||
|
objectId,
|
||||||
|
label,
|
||||||
|
color,
|
||||||
|
pathPoints: combinedPoints,
|
||||||
|
currentZones,
|
||||||
|
currentBox,
|
||||||
|
};
|
||||||
|
})
|
||||||
|
.filter((obj: ObjectData) => obj.pathPoints.length > 0); // Only include objects with path data
|
||||||
|
}, [
|
||||||
|
eventsData,
|
||||||
|
selectedObjectIds,
|
||||||
|
timelineResults,
|
||||||
|
currentTime,
|
||||||
|
effectiveCurrentTime,
|
||||||
|
getObjectColor,
|
||||||
|
config,
|
||||||
|
camera,
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Collect all zones across all objects
|
||||||
|
const allZones = useMemo(() => {
|
||||||
|
if (!config?.cameras?.[camera]?.zones) return [];
|
||||||
|
|
||||||
|
const zoneNames = new Set<string>();
|
||||||
|
objectsData.forEach((obj) => {
|
||||||
|
obj.currentZones.forEach((zone) => zoneNames.add(zone));
|
||||||
|
});
|
||||||
|
|
||||||
return Object.entries(config.cameras[camera].zones)
|
return Object.entries(config.cameras[camera].zones)
|
||||||
.filter(([name]) => currentObjectZones.includes(name))
|
.filter(([name]) => zoneNames.has(name))
|
||||||
.map(([name, zone]) => ({
|
.map(([name, zone]) => ({
|
||||||
name,
|
name,
|
||||||
coordinates: zone.coordinates,
|
coordinates: zone.coordinates,
|
||||||
color: getZoneColor(name),
|
color: getZoneColor(name),
|
||||||
}));
|
}));
|
||||||
}, [config, camera, getZoneColor, currentObjectZones]);
|
}, [config, camera, objectsData, getZoneColor]);
|
||||||
|
|
||||||
// get saved path points from event
|
|
||||||
const savedPathPoints = useMemo(() => {
|
|
||||||
return (
|
|
||||||
eventData?.[0].data?.path_data?.map(
|
|
||||||
([coords, timestamp]: [number[], number]) => ({
|
|
||||||
x: coords[0],
|
|
||||||
y: coords[1],
|
|
||||||
timestamp,
|
|
||||||
lifecycle_item: undefined,
|
|
||||||
}),
|
|
||||||
) || []
|
|
||||||
);
|
|
||||||
}, [eventData]);
|
|
||||||
|
|
||||||
// timeline points for selected event
|
|
||||||
const eventSequencePoints = useMemo(() => {
|
|
||||||
return (
|
|
||||||
objectTimeline
|
|
||||||
?.filter((event) => event.data.box !== undefined)
|
|
||||||
.map((event) => {
|
|
||||||
const [left, top, width, height] = event.data.box!;
|
|
||||||
|
|
||||||
return {
|
|
||||||
x: left + width / 2, // Center x
|
|
||||||
y: top + height, // Bottom y
|
|
||||||
timestamp: event.timestamp,
|
|
||||||
lifecycle_item: event,
|
|
||||||
};
|
|
||||||
}) || []
|
|
||||||
);
|
|
||||||
}, [objectTimeline]);
|
|
||||||
|
|
||||||
// final object path with timeline points included
|
|
||||||
const pathPoints = useMemo(() => {
|
|
||||||
// don't display a path for autotracking cameras
|
|
||||||
if (config?.cameras[camera]?.onvif.autotracking.enabled_in_config)
|
|
||||||
return [];
|
|
||||||
|
|
||||||
const combinedPoints = [...savedPathPoints, ...eventSequencePoints].sort(
|
|
||||||
(a, b) => a.timestamp - b.timestamp,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Filter points around current time (within a reasonable window)
|
|
||||||
const timeWindow = 30; // 30 seconds window
|
|
||||||
return combinedPoints.filter(
|
|
||||||
(point) =>
|
|
||||||
point.timestamp >= currentTime - timeWindow &&
|
|
||||||
point.timestamp <= currentTime + timeWindow,
|
|
||||||
);
|
|
||||||
}, [savedPathPoints, eventSequencePoints, config, camera, currentTime]);
|
|
||||||
|
|
||||||
// get absolute positions on the svg canvas for each point
|
|
||||||
const absolutePositions = useMemo(() => {
|
|
||||||
if (!pathPoints) return [];
|
|
||||||
|
|
||||||
return pathPoints.map((point) => {
|
|
||||||
// Find the corresponding timeline entry for this point
|
|
||||||
const timelineEntry = objectTimeline?.find(
|
|
||||||
(entry) => entry.timestamp == point.timestamp,
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
x: point.x * videoWidth,
|
|
||||||
y: point.y * videoHeight,
|
|
||||||
timestamp: point.timestamp,
|
|
||||||
lifecycle_item:
|
|
||||||
timelineEntry ||
|
|
||||||
(point.box // normal path point
|
|
||||||
? {
|
|
||||||
timestamp: point.timestamp,
|
|
||||||
camera: camera,
|
|
||||||
source: "tracked_object",
|
|
||||||
source_id: selectedObjectId,
|
|
||||||
class_type: "visible" as LifecycleClassType,
|
|
||||||
data: {
|
|
||||||
camera: camera,
|
|
||||||
label: point.label,
|
|
||||||
sub_label: "",
|
|
||||||
box: point.box,
|
|
||||||
region: [0, 0, 0, 0], // placeholder
|
|
||||||
attribute: "",
|
|
||||||
zones: [],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
: undefined),
|
|
||||||
};
|
|
||||||
});
|
|
||||||
}, [
|
|
||||||
pathPoints,
|
|
||||||
videoWidth,
|
|
||||||
videoHeight,
|
|
||||||
objectTimeline,
|
|
||||||
camera,
|
|
||||||
selectedObjectId,
|
|
||||||
]);
|
|
||||||
|
|
||||||
const generateStraightPath = useCallback(
|
const generateStraightPath = useCallback(
|
||||||
(points: { x: number; y: number }[]) => {
|
(points: { x: number; y: number }[]) => {
|
||||||
@ -214,15 +254,20 @@ export default function ObjectTrackOverlay({
|
|||||||
);
|
);
|
||||||
|
|
||||||
const getPointColor = useCallback(
|
const getPointColor = useCallback(
|
||||||
(baseColor: number[], type?: string) => {
|
(baseColorString: string, type?: string) => {
|
||||||
if (type && typeColorMap[type as keyof typeof typeColorMap]) {
|
if (type && typeColorMap[type as keyof typeof typeColorMap]) {
|
||||||
const typeColor = typeColorMap[type as keyof typeof typeColorMap];
|
const typeColor = typeColorMap[type as keyof typeof typeColorMap];
|
||||||
if (typeColor) {
|
if (typeColor) {
|
||||||
return `rgb(${typeColor.join(",")})`;
|
return `rgb(${typeColor.join(",")})`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// normal path point
|
// Parse and darken base color slightly for path points
|
||||||
return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`;
|
const match = baseColorString.match(/\d+/g);
|
||||||
|
if (match) {
|
||||||
|
const [r, g, b] = match.map(Number);
|
||||||
|
return `rgb(${Math.max(0, r - 10)}, ${Math.max(0, g - 10)}, ${Math.max(0, b - 10)})`;
|
||||||
|
}
|
||||||
|
return baseColorString;
|
||||||
},
|
},
|
||||||
[typeColorMap],
|
[typeColorMap],
|
||||||
);
|
);
|
||||||
@ -234,49 +279,8 @@ export default function ObjectTrackOverlay({
|
|||||||
[onSeekToTime],
|
[onSeekToTime],
|
||||||
);
|
);
|
||||||
|
|
||||||
// render bounding box for object at current time if we have a timeline entry
|
|
||||||
const currentBoundingBox = useMemo(() => {
|
|
||||||
if (!objectTimeline) return null;
|
|
||||||
|
|
||||||
// Find the most recent timeline event at or before effective current time with a bounding box
|
|
||||||
const relevantEvents = objectTimeline
|
|
||||||
.filter(
|
|
||||||
(event) => event.timestamp <= effectiveCurrentTime && event.data.box,
|
|
||||||
)
|
|
||||||
.sort((a, b) => b.timestamp - a.timestamp); // Most recent first
|
|
||||||
|
|
||||||
const currentEvent = relevantEvents[0];
|
|
||||||
|
|
||||||
if (!currentEvent?.data.box) return null;
|
|
||||||
|
|
||||||
const [left, top, width, height] = currentEvent.data.box;
|
|
||||||
return {
|
|
||||||
left,
|
|
||||||
top,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
centerX: left + width / 2,
|
|
||||||
centerY: top + height,
|
|
||||||
};
|
|
||||||
}, [objectTimeline, effectiveCurrentTime]);
|
|
||||||
|
|
||||||
const objectColor = useMemo(() => {
|
|
||||||
return pathPoints[0]?.label
|
|
||||||
? getObjectColor(pathPoints[0].label)
|
|
||||||
: "rgb(255, 0, 0)";
|
|
||||||
}, [pathPoints, getObjectColor]);
|
|
||||||
|
|
||||||
const objectColorArray = useMemo(() => {
|
|
||||||
return pathPoints[0]?.label
|
|
||||||
? getObjectColor(pathPoints[0].label).match(/\d+/g)?.map(Number) || [
|
|
||||||
255, 0, 0,
|
|
||||||
]
|
|
||||||
: [255, 0, 0];
|
|
||||||
}, [pathPoints, getObjectColor]);
|
|
||||||
|
|
||||||
// render any zones for object at current time
|
|
||||||
const zonePolygons = useMemo(() => {
|
const zonePolygons = useMemo(() => {
|
||||||
return zones.map((zone) => {
|
return allZones.map((zone) => {
|
||||||
// Convert zone coordinates from normalized (0-1) to pixel coordinates
|
// Convert zone coordinates from normalized (0-1) to pixel coordinates
|
||||||
const points = zone.coordinates
|
const points = zone.coordinates
|
||||||
.split(",")
|
.split(",")
|
||||||
@ -298,9 +302,9 @@ export default function ObjectTrackOverlay({
|
|||||||
stroke: zone.color,
|
stroke: zone.color,
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}, [zones, videoWidth, videoHeight]);
|
}, [allZones, videoWidth, videoHeight]);
|
||||||
|
|
||||||
if (!pathPoints.length || !config) {
|
if (objectsData.length === 0 || !config) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,73 +329,102 @@ export default function ObjectTrackOverlay({
|
|||||||
/>
|
/>
|
||||||
))}
|
))}
|
||||||
|
|
||||||
{absolutePositions.length > 1 && (
|
{objectsData.map((objData) => {
|
||||||
<path
|
const absolutePositions = objData.pathPoints.map((point) => ({
|
||||||
d={generateStraightPath(absolutePositions)}
|
x: point.x * videoWidth,
|
||||||
fill="none"
|
y: point.y * videoHeight,
|
||||||
stroke={objectColor}
|
timestamp: point.timestamp,
|
||||||
strokeWidth="5"
|
lifecycle_item: point.lifecycle_item,
|
||||||
strokeLinecap="round"
|
}));
|
||||||
strokeLinejoin="round"
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{absolutePositions.map((pos, index) => (
|
return (
|
||||||
<Tooltip key={`point-${index}`}>
|
<g key={objData.objectId}>
|
||||||
<TooltipTrigger asChild>
|
{absolutePositions.length > 1 && (
|
||||||
<circle
|
<path
|
||||||
cx={pos.x}
|
d={generateStraightPath(absolutePositions)}
|
||||||
cy={pos.y}
|
fill="none"
|
||||||
r="7"
|
stroke={objData.color}
|
||||||
fill={getPointColor(
|
strokeWidth="5"
|
||||||
objectColorArray,
|
strokeLinecap="round"
|
||||||
pos.lifecycle_item?.class_type,
|
strokeLinejoin="round"
|
||||||
)}
|
/>
|
||||||
stroke="white"
|
)}
|
||||||
strokeWidth="3"
|
|
||||||
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
|
||||||
onClick={() => handlePointClick(pos.timestamp)}
|
|
||||||
/>
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipPortal>
|
|
||||||
<TooltipContent side="top" className="smart-capitalize">
|
|
||||||
{pos.lifecycle_item
|
|
||||||
? `${pos.lifecycle_item.class_type.replace("_", " ")} at ${new Date(pos.timestamp * 1000).toLocaleTimeString()}`
|
|
||||||
: t("objectTrack.trackedPoint")}
|
|
||||||
{onSeekToTime && (
|
|
||||||
<div className="mt-1 text-xs text-muted-foreground">
|
|
||||||
{t("objectTrack.clickToSeek")}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</TooltipContent>
|
|
||||||
</TooltipPortal>
|
|
||||||
</Tooltip>
|
|
||||||
))}
|
|
||||||
|
|
||||||
{currentBoundingBox && showBoundingBoxes && (
|
{absolutePositions.map((pos, index) => (
|
||||||
<g>
|
<Tooltip key={`${objData.objectId}-point-${index}`}>
|
||||||
<rect
|
<TooltipTrigger asChild>
|
||||||
x={currentBoundingBox.left * videoWidth}
|
<circle
|
||||||
y={currentBoundingBox.top * videoHeight}
|
cx={pos.x}
|
||||||
width={currentBoundingBox.width * videoWidth}
|
cy={pos.y}
|
||||||
height={currentBoundingBox.height * videoHeight}
|
r="7"
|
||||||
fill="none"
|
fill={getPointColor(
|
||||||
stroke={objectColor}
|
objData.color,
|
||||||
strokeWidth="5"
|
pos.lifecycle_item?.class_type,
|
||||||
opacity="0.9"
|
)}
|
||||||
/>
|
stroke="white"
|
||||||
|
strokeWidth="3"
|
||||||
|
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
||||||
|
onClick={() => handlePointClick(pos.timestamp)}
|
||||||
|
/>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipPortal>
|
||||||
|
<TooltipContent side="top" className="smart-capitalize">
|
||||||
|
{pos.lifecycle_item
|
||||||
|
? `${pos.lifecycle_item.class_type.replace("_", " ")} at ${new Date(pos.timestamp * 1000).toLocaleTimeString()}`
|
||||||
|
: t("objectTrack.trackedPoint")}
|
||||||
|
{onSeekToTime && (
|
||||||
|
<div className="mt-1 text-xs normal-case text-muted-foreground">
|
||||||
|
{t("objectTrack.clickToSeek")}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</TooltipContent>
|
||||||
|
</TooltipPortal>
|
||||||
|
</Tooltip>
|
||||||
|
))}
|
||||||
|
|
||||||
<circle
|
{objData.currentBox && showBoundingBoxes && (
|
||||||
cx={currentBoundingBox.centerX * videoWidth}
|
<g>
|
||||||
cy={currentBoundingBox.centerY * videoHeight}
|
<rect
|
||||||
r="5"
|
x={objData.currentBox[0] * videoWidth}
|
||||||
fill="rgb(255, 255, 0)" // yellow highlight
|
y={objData.currentBox[1] * videoHeight}
|
||||||
stroke={objectColor}
|
width={objData.currentBox[2] * videoWidth}
|
||||||
strokeWidth="5"
|
height={objData.currentBox[3] * videoHeight}
|
||||||
opacity="1"
|
fill="none"
|
||||||
/>
|
stroke={objData.color}
|
||||||
</g>
|
strokeWidth="5"
|
||||||
)}
|
opacity="0.9"
|
||||||
|
/>
|
||||||
|
<circle
|
||||||
|
cx={
|
||||||
|
(objData.currentBox[0] + objData.currentBox[2] / 2) *
|
||||||
|
videoWidth
|
||||||
|
}
|
||||||
|
cy={
|
||||||
|
(objData.currentBox[1] + objData.currentBox[3]) *
|
||||||
|
videoHeight
|
||||||
|
}
|
||||||
|
r="5"
|
||||||
|
fill="rgb(255, 255, 0)" // yellow highlight
|
||||||
|
stroke={objData.color}
|
||||||
|
strokeWidth="5"
|
||||||
|
opacity="1"
|
||||||
|
/>
|
||||||
|
</g>
|
||||||
|
)}
|
||||||
|
</g>
|
||||||
|
);
|
||||||
|
})}
|
||||||
</svg>
|
</svg>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate a deterministic HSL color from a string (object ID)
|
||||||
|
function generateColorFromId(id: string): string {
|
||||||
|
let hash = 0;
|
||||||
|
for (let i = 0; i < id.length; i++) {
|
||||||
|
hash = id.charCodeAt(i) + ((hash << 5) - hash);
|
||||||
|
}
|
||||||
|
// Use golden ratio to distribute hues evenly
|
||||||
|
const hue = (hash * 137.508) % 360;
|
||||||
|
return `hsl(${hue}, 70%, 50%)`;
|
||||||
|
}
|
||||||
|
|||||||
@ -94,6 +94,10 @@ export default function ObjectLifecycle({
|
|||||||
);
|
);
|
||||||
}, [config, event]);
|
}, [config, event]);
|
||||||
|
|
||||||
|
const label = event.sub_label
|
||||||
|
? event.sub_label
|
||||||
|
: getTranslatedLabel(event.label);
|
||||||
|
|
||||||
const getZoneColor = useCallback(
|
const getZoneColor = useCallback(
|
||||||
(zoneName: string) => {
|
(zoneName: string) => {
|
||||||
const zoneColor =
|
const zoneColor =
|
||||||
@ -628,17 +632,29 @@ export default function ObjectLifecycle({
|
|||||||
}}
|
}}
|
||||||
role="button"
|
role="button"
|
||||||
>
|
>
|
||||||
<div className={cn("ml-1 rounded-full bg-muted-foreground p-2")}>
|
<div
|
||||||
|
className={cn(
|
||||||
|
"relative ml-2 rounded-full bg-muted-foreground p-2",
|
||||||
|
)}
|
||||||
|
>
|
||||||
{getIconForLabel(
|
{getIconForLabel(
|
||||||
event.label,
|
event.sub_label ? event.label + "-verified" : event.label,
|
||||||
"size-6 text-primary dark:text-white",
|
"size-4 text-white",
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
<div className="flex items-end gap-2">
|
<div className="flex items-center gap-2">
|
||||||
<span>{getTranslatedLabel(event.label)}</span>
|
<span className="capitalize">{label}</span>
|
||||||
<span className="text-secondary-foreground">
|
<span className="text-secondary-foreground">
|
||||||
{formattedStart ?? ""} - {formattedEnd ?? ""}
|
{formattedStart ?? ""} - {formattedEnd ?? ""}
|
||||||
</span>
|
</span>
|
||||||
|
{event.data?.recognized_license_plate && (
|
||||||
|
<>
|
||||||
|
·{" "}
|
||||||
|
<span className="text-sm text-secondary-foreground">
|
||||||
|
{event.data.recognized_license_plate}
|
||||||
|
</span>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -20,7 +20,6 @@ import { cn } from "@/lib/utils";
|
|||||||
import { ASPECT_VERTICAL_LAYOUT, RecordingPlayerError } from "@/types/record";
|
import { ASPECT_VERTICAL_LAYOUT, RecordingPlayerError } from "@/types/record";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import ObjectTrackOverlay from "@/components/overlay/ObjectTrackOverlay";
|
import ObjectTrackOverlay from "@/components/overlay/ObjectTrackOverlay";
|
||||||
import { DetailStreamContextType } from "@/context/detail-stream-context";
|
|
||||||
|
|
||||||
// Android native hls does not seek correctly
|
// Android native hls does not seek correctly
|
||||||
const USE_NATIVE_HLS = !isAndroid;
|
const USE_NATIVE_HLS = !isAndroid;
|
||||||
@ -54,8 +53,11 @@ type HlsVideoPlayerProps = {
|
|||||||
onUploadFrame?: (playTime: number) => Promise<AxiosResponse> | undefined;
|
onUploadFrame?: (playTime: number) => Promise<AxiosResponse> | undefined;
|
||||||
toggleFullscreen?: () => void;
|
toggleFullscreen?: () => void;
|
||||||
onError?: (error: RecordingPlayerError) => void;
|
onError?: (error: RecordingPlayerError) => void;
|
||||||
detail?: Partial<DetailStreamContextType>;
|
isDetailMode?: boolean;
|
||||||
|
camera?: string;
|
||||||
|
currentTimeOverride?: number;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function HlsVideoPlayer({
|
export default function HlsVideoPlayer({
|
||||||
videoRef,
|
videoRef,
|
||||||
containerRef,
|
containerRef,
|
||||||
@ -75,17 +77,15 @@ export default function HlsVideoPlayer({
|
|||||||
onUploadFrame,
|
onUploadFrame,
|
||||||
toggleFullscreen,
|
toggleFullscreen,
|
||||||
onError,
|
onError,
|
||||||
detail,
|
isDetailMode = false,
|
||||||
|
camera,
|
||||||
|
currentTimeOverride,
|
||||||
}: HlsVideoPlayerProps) {
|
}: HlsVideoPlayerProps) {
|
||||||
const { t } = useTranslation("components/player");
|
const { t } = useTranslation("components/player");
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
// for detail stream context in History
|
// for detail stream context in History
|
||||||
const selectedObjectId = detail?.selectedObjectId;
|
const currentTime = currentTimeOverride;
|
||||||
const selectedObjectTimeline = detail?.selectedObjectTimeline;
|
|
||||||
const currentTime = detail?.currentTime;
|
|
||||||
const camera = detail?.camera;
|
|
||||||
const isDetailMode = detail?.isDetailMode ?? false;
|
|
||||||
|
|
||||||
// playback
|
// playback
|
||||||
|
|
||||||
@ -316,16 +316,14 @@ export default function HlsVideoPlayer({
|
|||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
{isDetailMode &&
|
{isDetailMode &&
|
||||||
selectedObjectId &&
|
|
||||||
camera &&
|
camera &&
|
||||||
currentTime &&
|
currentTime &&
|
||||||
videoDimensions.width > 0 &&
|
videoDimensions.width > 0 &&
|
||||||
videoDimensions.height > 0 && (
|
videoDimensions.height > 0 && (
|
||||||
<div className="absolute z-50 size-full">
|
<div className="absolute z-50 size-full">
|
||||||
<ObjectTrackOverlay
|
<ObjectTrackOverlay
|
||||||
key={`${selectedObjectId}-${currentTime}`}
|
key={`overlay-${currentTime}`}
|
||||||
camera={camera}
|
camera={camera}
|
||||||
selectedObjectId={selectedObjectId}
|
|
||||||
showBoundingBoxes={!isPlaying}
|
showBoundingBoxes={!isPlaying}
|
||||||
currentTime={currentTime}
|
currentTime={currentTime}
|
||||||
videoWidth={videoDimensions.width}
|
videoWidth={videoDimensions.width}
|
||||||
@ -336,7 +334,6 @@ export default function HlsVideoPlayer({
|
|||||||
onSeekToTime(timestamp, play);
|
onSeekToTime(timestamp, play);
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
objectTimeline={selectedObjectTimeline}
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@ -61,7 +61,11 @@ export default function DynamicVideoPlayer({
|
|||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
// for detail stream context in History
|
// for detail stream context in History
|
||||||
const detail = useDetailStream();
|
const {
|
||||||
|
isDetailMode,
|
||||||
|
camera: contextCamera,
|
||||||
|
currentTime,
|
||||||
|
} = useDetailStream();
|
||||||
|
|
||||||
// controlling playback
|
// controlling playback
|
||||||
|
|
||||||
@ -295,7 +299,9 @@ export default function DynamicVideoPlayer({
|
|||||||
setIsBuffering(true);
|
setIsBuffering(true);
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
detail={detail}
|
isDetailMode={isDetailMode}
|
||||||
|
camera={contextCamera || camera}
|
||||||
|
currentTimeOverride={currentTime}
|
||||||
/>
|
/>
|
||||||
<PreviewPlayer
|
<PreviewPlayer
|
||||||
className={cn(
|
className={cn(
|
||||||
|
|||||||
@ -171,7 +171,11 @@ export default function DetailStream({
|
|||||||
<FrigatePlusDialog
|
<FrigatePlusDialog
|
||||||
upload={upload}
|
upload={upload}
|
||||||
onClose={() => setUpload(undefined)}
|
onClose={() => setUpload(undefined)}
|
||||||
onEventUploaded={() => setUpload(undefined)}
|
onEventUploaded={() => {
|
||||||
|
if (upload) {
|
||||||
|
upload.plus_id = "new_upload";
|
||||||
|
}
|
||||||
|
}}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
<div
|
<div
|
||||||
@ -254,7 +258,9 @@ function ReviewGroup({
|
|||||||
|
|
||||||
const rawIconLabels: string[] = [
|
const rawIconLabels: string[] = [
|
||||||
...(fetchedEvents
|
...(fetchedEvents
|
||||||
? fetchedEvents.map((e) => e.label)
|
? fetchedEvents.map((e) =>
|
||||||
|
e.sub_label ? e.label + "-verified" : e.label,
|
||||||
|
)
|
||||||
: (review.data?.objects ?? [])),
|
: (review.data?.objects ?? [])),
|
||||||
...(review.data?.audio ?? []),
|
...(review.data?.audio ?? []),
|
||||||
];
|
];
|
||||||
@ -317,7 +323,7 @@ function ReviewGroup({
|
|||||||
<div className="ml-1 flex flex-col items-start gap-1.5">
|
<div className="ml-1 flex flex-col items-start gap-1.5">
|
||||||
<div className="flex flex-row gap-3">
|
<div className="flex flex-row gap-3">
|
||||||
<div className="text-sm font-medium">{displayTime}</div>
|
<div className="text-sm font-medium">{displayTime}</div>
|
||||||
<div className="flex items-center gap-2">
|
<div className="relative flex items-center gap-2 text-white">
|
||||||
{iconLabels.slice(0, 5).map((lbl, idx) => (
|
{iconLabels.slice(0, 5).map((lbl, idx) => (
|
||||||
<div
|
<div
|
||||||
key={`${lbl}-${idx}`}
|
key={`${lbl}-${idx}`}
|
||||||
@ -423,30 +429,34 @@ function EventList({
|
|||||||
}: EventListProps) {
|
}: EventListProps) {
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
const { selectedObjectId, setSelectedObjectId } = useDetailStream();
|
const { selectedObjectIds, toggleObjectSelection } = useDetailStream();
|
||||||
|
|
||||||
|
const isSelected = selectedObjectIds.includes(event.id);
|
||||||
|
|
||||||
|
const label = event.sub_label || getTranslatedLabel(event.label);
|
||||||
|
|
||||||
const handleObjectSelect = (event: Event | undefined) => {
|
const handleObjectSelect = (event: Event | undefined) => {
|
||||||
if (event) {
|
if (event) {
|
||||||
onSeek(event.start_time ?? 0);
|
// onSeek(event.start_time ?? 0);
|
||||||
setSelectedObjectId(event.id);
|
toggleObjectSelection(event.id);
|
||||||
} else {
|
} else {
|
||||||
setSelectedObjectId(undefined);
|
toggleObjectSelection(undefined);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Clear selectedObjectId when effectiveTime has passed this event's end_time
|
// Clear selection when effectiveTime has passed this event's end_time
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (selectedObjectId === event.id && effectiveTime && event.end_time) {
|
if (isSelected && effectiveTime && event.end_time) {
|
||||||
if (effectiveTime >= event.end_time) {
|
if (effectiveTime >= event.end_time) {
|
||||||
setSelectedObjectId(undefined);
|
toggleObjectSelection(event.id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, [
|
}, [
|
||||||
selectedObjectId,
|
isSelected,
|
||||||
event.id,
|
event.id,
|
||||||
event.end_time,
|
event.end_time,
|
||||||
effectiveTime,
|
effectiveTime,
|
||||||
setSelectedObjectId,
|
toggleObjectSelection,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
@ -454,48 +464,59 @@ function EventList({
|
|||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"rounded-md bg-secondary p-2",
|
"rounded-md bg-secondary p-2",
|
||||||
event.id == selectedObjectId
|
isSelected
|
||||||
? "bg-secondary-highlight"
|
? "bg-secondary-highlight"
|
||||||
: "outline-transparent duration-500",
|
: "outline-transparent duration-500",
|
||||||
event.id != selectedObjectId &&
|
!isSelected &&
|
||||||
(effectiveTime ?? 0) >= (event.start_time ?? 0) - 0.5 &&
|
(effectiveTime ?? 0) >= (event.start_time ?? 0) - 0.5 &&
|
||||||
(effectiveTime ?? 0) <=
|
(effectiveTime ?? 0) <=
|
||||||
(event.end_time ?? event.start_time ?? 0) + 0.5 &&
|
(event.end_time ?? event.start_time ?? 0) + 0.5 &&
|
||||||
"bg-secondary-highlight",
|
"bg-secondary-highlight",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div className="ml-1.5 flex w-full items-center justify-between">
|
<div className="ml-1.5 flex w-full items-end justify-between">
|
||||||
<div
|
<div className="flex flex-1 items-center gap-2 text-sm font-medium">
|
||||||
className="flex items-center gap-2 text-sm font-medium"
|
|
||||||
onClick={(e) => {
|
|
||||||
e.stopPropagation();
|
|
||||||
handleObjectSelect(
|
|
||||||
event.id == selectedObjectId ? undefined : event,
|
|
||||||
);
|
|
||||||
}}
|
|
||||||
role="button"
|
|
||||||
>
|
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"rounded-full p-1",
|
"relative rounded-full p-1 text-white",
|
||||||
event.id == selectedObjectId
|
isSelected ? "bg-selected" : "bg-muted-foreground",
|
||||||
? "bg-selected"
|
|
||||||
: "bg-muted-foreground",
|
|
||||||
)}
|
)}
|
||||||
|
onClick={(e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
handleObjectSelect(isSelected ? undefined : event);
|
||||||
|
}}
|
||||||
>
|
>
|
||||||
{getIconForLabel(event.label, "size-3 text-white")}
|
{getIconForLabel(
|
||||||
|
event.sub_label ? event.label + "-verified" : event.label,
|
||||||
|
"size-3 text-white",
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
<div className="flex items-end gap-2">
|
<div
|
||||||
<span>{getTranslatedLabel(event.label)}</span>
|
className="flex flex-1 items-center gap-2"
|
||||||
|
onClick={(e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
onSeek(event.start_time ?? 0);
|
||||||
|
}}
|
||||||
|
role="button"
|
||||||
|
>
|
||||||
|
<span className="capitalize">{label}</span>
|
||||||
|
{event.data?.recognized_license_plate && (
|
||||||
|
<>
|
||||||
|
·{" "}
|
||||||
|
<span className="text-sm text-secondary-foreground">
|
||||||
|
{event.data.recognized_license_plate}
|
||||||
|
</span>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div className="mr-2 flex flex-1 flex-row justify-end">
|
<div className="mr-2 flex flex-row justify-end">
|
||||||
<EventMenu
|
<EventMenu
|
||||||
event={event}
|
event={event}
|
||||||
config={config}
|
config={config}
|
||||||
onOpenUpload={(e) => onOpenUpload?.(e)}
|
onOpenUpload={(e) => onOpenUpload?.(e)}
|
||||||
selectedObjectId={selectedObjectId}
|
isSelected={isSelected}
|
||||||
setSelectedObjectId={handleObjectSelect}
|
onToggleSelection={handleObjectSelect}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -12,14 +12,15 @@ import { useNavigate } from "react-router-dom";
|
|||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { Event } from "@/types/event";
|
import { Event } from "@/types/event";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
|
import { useState } from "react";
|
||||||
|
|
||||||
type EventMenuProps = {
|
type EventMenuProps = {
|
||||||
event: Event;
|
event: Event;
|
||||||
config?: FrigateConfig;
|
config?: FrigateConfig;
|
||||||
onOpenUpload?: (e: Event) => void;
|
onOpenUpload?: (e: Event) => void;
|
||||||
onOpenSimilarity?: (e: Event) => void;
|
onOpenSimilarity?: (e: Event) => void;
|
||||||
selectedObjectId?: string;
|
isSelected?: boolean;
|
||||||
setSelectedObjectId?: (event: Event | undefined) => void;
|
onToggleSelection?: (event: Event | undefined) => void;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function EventMenu({
|
export default function EventMenu({
|
||||||
@ -27,25 +28,26 @@ export default function EventMenu({
|
|||||||
config,
|
config,
|
||||||
onOpenUpload,
|
onOpenUpload,
|
||||||
onOpenSimilarity,
|
onOpenSimilarity,
|
||||||
selectedObjectId,
|
isSelected = false,
|
||||||
setSelectedObjectId,
|
onToggleSelection,
|
||||||
}: EventMenuProps) {
|
}: EventMenuProps) {
|
||||||
const apiHost = useApiHost();
|
const apiHost = useApiHost();
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
const { t } = useTranslation("views/explore");
|
const { t } = useTranslation("views/explore");
|
||||||
|
const [isOpen, setIsOpen] = useState(false);
|
||||||
|
|
||||||
const handleObjectSelect = () => {
|
const handleObjectSelect = () => {
|
||||||
if (event.id === selectedObjectId) {
|
if (isSelected) {
|
||||||
setSelectedObjectId?.(undefined);
|
onToggleSelection?.(undefined);
|
||||||
} else {
|
} else {
|
||||||
setSelectedObjectId?.(event);
|
onToggleSelection?.(event);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<span tabIndex={0} className="sr-only" />
|
<span tabIndex={0} className="sr-only" />
|
||||||
<DropdownMenu>
|
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}>
|
||||||
<DropdownMenuTrigger>
|
<DropdownMenuTrigger>
|
||||||
<div className="rounded p-1 pr-2" role="button">
|
<div className="rounded p-1 pr-2" role="button">
|
||||||
<HiDotsHorizontal className="size-4 text-muted-foreground" />
|
<HiDotsHorizontal className="size-4 text-muted-foreground" />
|
||||||
@ -54,7 +56,7 @@ export default function EventMenu({
|
|||||||
<DropdownMenuPortal>
|
<DropdownMenuPortal>
|
||||||
<DropdownMenuContent>
|
<DropdownMenuContent>
|
||||||
<DropdownMenuItem onSelect={handleObjectSelect}>
|
<DropdownMenuItem onSelect={handleObjectSelect}>
|
||||||
{event.id === selectedObjectId
|
{isSelected
|
||||||
? t("itemMenu.hideObjectDetails.label")
|
? t("itemMenu.hideObjectDetails.label")
|
||||||
: t("itemMenu.showObjectDetails.label")}
|
: t("itemMenu.showObjectDetails.label")}
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
@ -85,6 +87,7 @@ export default function EventMenu({
|
|||||||
config?.plus?.enabled && (
|
config?.plus?.enabled && (
|
||||||
<DropdownMenuItem
|
<DropdownMenuItem
|
||||||
onSelect={() => {
|
onSelect={() => {
|
||||||
|
setIsOpen(false);
|
||||||
onOpenUpload?.(event);
|
onOpenUpload?.(event);
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
|
|||||||
@ -1,16 +1,14 @@
|
|||||||
import React, { createContext, useContext, useState, useEffect } from "react";
|
import React, { createContext, useContext, useState, useEffect } from "react";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import { ObjectLifecycleSequence } from "@/types/timeline";
|
|
||||||
|
|
||||||
export interface DetailStreamContextType {
|
export interface DetailStreamContextType {
|
||||||
selectedObjectId: string | undefined;
|
selectedObjectIds: string[];
|
||||||
selectedObjectTimeline?: ObjectLifecycleSequence[];
|
|
||||||
currentTime: number;
|
currentTime: number;
|
||||||
camera: string;
|
camera: string;
|
||||||
annotationOffset: number; // milliseconds
|
annotationOffset: number; // milliseconds
|
||||||
setAnnotationOffset: (ms: number) => void;
|
setAnnotationOffset: (ms: number) => void;
|
||||||
setSelectedObjectId: (id: string | undefined) => void;
|
toggleObjectSelection: (id: string | undefined) => void;
|
||||||
isDetailMode: boolean;
|
isDetailMode: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,13 +29,21 @@ export function DetailStreamProvider({
|
|||||||
currentTime,
|
currentTime,
|
||||||
camera,
|
camera,
|
||||||
}: DetailStreamProviderProps) {
|
}: DetailStreamProviderProps) {
|
||||||
const [selectedObjectId, setSelectedObjectId] = useState<
|
const [selectedObjectIds, setSelectedObjectIds] = useState<string[]>([]);
|
||||||
string | undefined
|
|
||||||
>();
|
|
||||||
|
|
||||||
const { data: selectedObjectTimeline } = useSWR<ObjectLifecycleSequence[]>(
|
const toggleObjectSelection = (id: string | undefined) => {
|
||||||
selectedObjectId ? ["timeline", { source_id: selectedObjectId }] : null,
|
if (id === undefined) {
|
||||||
);
|
setSelectedObjectIds([]);
|
||||||
|
} else {
|
||||||
|
setSelectedObjectIds((prev) => {
|
||||||
|
if (prev.includes(id)) {
|
||||||
|
return prev.filter((existingId) => existingId !== id);
|
||||||
|
} else {
|
||||||
|
return [...prev, id];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
@ -53,13 +59,12 @@ export function DetailStreamProvider({
|
|||||||
}, [config, camera]);
|
}, [config, camera]);
|
||||||
|
|
||||||
const value: DetailStreamContextType = {
|
const value: DetailStreamContextType = {
|
||||||
selectedObjectId,
|
selectedObjectIds,
|
||||||
selectedObjectTimeline,
|
|
||||||
currentTime,
|
currentTime,
|
||||||
camera,
|
camera,
|
||||||
annotationOffset,
|
annotationOffset,
|
||||||
setAnnotationOffset,
|
setAnnotationOffset,
|
||||||
setSelectedObjectId,
|
toggleObjectSelection,
|
||||||
isDetailMode,
|
isDetailMode,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -22,6 +22,7 @@ export interface Event {
|
|||||||
area: number;
|
area: number;
|
||||||
ratio: number;
|
ratio: number;
|
||||||
type: "object" | "audio" | "manual";
|
type: "object" | "audio" | "manual";
|
||||||
|
recognized_license_plate?: string;
|
||||||
path_data: [number[], number][];
|
path_data: [number[], number][];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
import { ObjectLifecycleSequence } from "@/types/timeline";
|
import { ObjectLifecycleSequence } from "@/types/timeline";
|
||||||
import { t } from "i18next";
|
import { t } from "i18next";
|
||||||
import { getTranslatedLabel } from "./i18n";
|
import { getTranslatedLabel } from "./i18n";
|
||||||
|
import { capitalizeFirstLetter } from "./stringUtil";
|
||||||
|
|
||||||
export function getLifecycleItemDescription(
|
export function getLifecycleItemDescription(
|
||||||
lifecycleItem: ObjectLifecycleSequence,
|
lifecycleItem: ObjectLifecycleSequence,
|
||||||
@ -10,7 +11,7 @@ export function getLifecycleItemDescription(
|
|||||||
: lifecycleItem.data.sub_label || lifecycleItem.data.label;
|
: lifecycleItem.data.sub_label || lifecycleItem.data.label;
|
||||||
|
|
||||||
const label = lifecycleItem.data.sub_label
|
const label = lifecycleItem.data.sub_label
|
||||||
? rawLabel
|
? capitalizeFirstLetter(rawLabel)
|
||||||
: getTranslatedLabel(rawLabel);
|
: getTranslatedLabel(rawLabel);
|
||||||
|
|
||||||
switch (lifecycleItem.class_type) {
|
switch (lifecycleItem.class_type) {
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import DetailStream from "@/components/timeline/DetailStream";
|
|||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||||
import { useOverlayState } from "@/hooks/use-overlay-state";
|
import { useOverlayState } from "@/hooks/use-overlay-state";
|
||||||
|
import { useResizeObserver } from "@/hooks/resize-observer";
|
||||||
import { ExportMode } from "@/types/filter";
|
import { ExportMode } from "@/types/filter";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { Preview } from "@/types/preview";
|
import { Preview } from "@/types/preview";
|
||||||
@ -31,12 +32,7 @@ import {
|
|||||||
useRef,
|
useRef,
|
||||||
useState,
|
useState,
|
||||||
} from "react";
|
} from "react";
|
||||||
import {
|
import { isDesktop, isMobile } from "react-device-detect";
|
||||||
isDesktop,
|
|
||||||
isMobile,
|
|
||||||
isMobileOnly,
|
|
||||||
isTablet,
|
|
||||||
} from "react-device-detect";
|
|
||||||
import { IoMdArrowRoundBack } from "react-icons/io";
|
import { IoMdArrowRoundBack } from "react-icons/io";
|
||||||
import { useNavigate } from "react-router-dom";
|
import { useNavigate } from "react-router-dom";
|
||||||
import { Toaster } from "@/components/ui/sonner";
|
import { Toaster } from "@/components/ui/sonner";
|
||||||
@ -55,7 +51,6 @@ import {
|
|||||||
RecordingSegment,
|
RecordingSegment,
|
||||||
RecordingStartingPoint,
|
RecordingStartingPoint,
|
||||||
} from "@/types/record";
|
} from "@/types/record";
|
||||||
import { useResizeObserver } from "@/hooks/resize-observer";
|
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { useFullscreen } from "@/hooks/use-fullscreen";
|
import { useFullscreen } from "@/hooks/use-fullscreen";
|
||||||
import { useTimezone } from "@/hooks/use-date-utils";
|
import { useTimezone } from "@/hooks/use-date-utils";
|
||||||
@ -399,49 +394,47 @@ export function RecordingView({
|
|||||||
}
|
}
|
||||||
}, [mainCameraAspect]);
|
}, [mainCameraAspect]);
|
||||||
|
|
||||||
const [{ width: mainWidth, height: mainHeight }] =
|
// use a resize observer to determine whether to use w-full or h-full based on container aspect ratio
|
||||||
|
const [{ width: containerWidth, height: containerHeight }] =
|
||||||
useResizeObserver(cameraLayoutRef);
|
useResizeObserver(cameraLayoutRef);
|
||||||
|
const [{ width: previewRowWidth, height: previewRowHeight }] =
|
||||||
|
useResizeObserver(previewRowRef);
|
||||||
|
|
||||||
const mainCameraStyle = useMemo(() => {
|
const useHeightBased = useMemo(() => {
|
||||||
if (isMobile || mainCameraAspect != "normal" || !config) {
|
if (!containerWidth || !containerHeight) {
|
||||||
return undefined;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const camera = config.cameras[mainCamera];
|
const cameraAspectRatio = getCameraAspect(mainCamera);
|
||||||
|
if (!cameraAspectRatio) {
|
||||||
if (!camera) {
|
return false;
|
||||||
return undefined;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const aspect = getCameraAspect(mainCamera);
|
// Calculate available space for camera after accounting for preview row
|
||||||
|
// For tall cameras: preview row is side-by-side (takes width)
|
||||||
|
// For wide/normal cameras: preview row is stacked (takes height)
|
||||||
|
const availableWidth =
|
||||||
|
mainCameraAspect == "tall" && previewRowWidth
|
||||||
|
? containerWidth - previewRowWidth
|
||||||
|
: containerWidth;
|
||||||
|
const availableHeight =
|
||||||
|
mainCameraAspect != "tall" && previewRowHeight
|
||||||
|
? containerHeight - previewRowHeight
|
||||||
|
: containerHeight;
|
||||||
|
|
||||||
if (!aspect) {
|
const availableAspectRatio = availableWidth / availableHeight;
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
const availableHeight = mainHeight - 112;
|
// If available space is wider than camera aspect, constrain by height (h-full)
|
||||||
|
// If available space is taller than camera aspect, constrain by width (w-full)
|
||||||
let percent;
|
return availableAspectRatio >= cameraAspectRatio;
|
||||||
if (mainWidth / availableHeight < aspect) {
|
|
||||||
percent = 100;
|
|
||||||
} else {
|
|
||||||
const availableWidth = aspect * availableHeight;
|
|
||||||
percent =
|
|
||||||
(mainWidth < availableWidth
|
|
||||||
? mainWidth / availableWidth
|
|
||||||
: availableWidth / mainWidth) * 100;
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
width: `${Math.round(percent)}%`,
|
|
||||||
};
|
|
||||||
}, [
|
}, [
|
||||||
config,
|
containerWidth,
|
||||||
mainCameraAspect,
|
containerHeight,
|
||||||
mainWidth,
|
previewRowWidth,
|
||||||
mainHeight,
|
previewRowHeight,
|
||||||
mainCamera,
|
|
||||||
getCameraAspect,
|
getCameraAspect,
|
||||||
|
mainCamera,
|
||||||
|
mainCameraAspect,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
const previewRowOverflows = useMemo(() => {
|
const previewRowOverflows = useMemo(() => {
|
||||||
@ -685,19 +678,17 @@ export function RecordingView({
|
|||||||
<div
|
<div
|
||||||
ref={mainLayoutRef}
|
ref={mainLayoutRef}
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex h-full justify-center overflow-hidden",
|
"flex flex-1 overflow-hidden",
|
||||||
isDesktop ? "" : "flex-col gap-2 landscape:flex-row",
|
isDesktop ? "flex-row" : "flex-col gap-2 landscape:flex-row",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div
|
<div
|
||||||
ref={cameraLayoutRef}
|
ref={cameraLayoutRef}
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex flex-1 flex-wrap",
|
"flex flex-1 flex-wrap overflow-hidden",
|
||||||
isDesktop
|
isDesktop
|
||||||
? timelineType === "detail"
|
? "min-w-0 px-4"
|
||||||
? "md:w-[40%] lg:w-[70%] xl:w-full"
|
: "portrait:max-h-[50dvh] portrait:flex-shrink-0 portrait:flex-grow-0 portrait:basis-auto",
|
||||||
: "w-[80%]"
|
|
||||||
: "",
|
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div
|
<div
|
||||||
@ -711,37 +702,25 @@ export function RecordingView({
|
|||||||
<div
|
<div
|
||||||
key={mainCamera}
|
key={mainCamera}
|
||||||
className={cn(
|
className={cn(
|
||||||
"relative",
|
"relative flex max-h-full min-h-0 min-w-0 max-w-full items-center justify-center",
|
||||||
isDesktop
|
isDesktop
|
||||||
? cn(
|
? // Desktop: dynamically switch between w-full and h-full based on
|
||||||
"flex justify-center px-4",
|
// container vs camera aspect ratio to ensure proper fitting
|
||||||
mainCameraAspect == "tall"
|
useHeightBased
|
||||||
? "h-[50%] md:h-[60%] lg:h-[75%] xl:h-[90%]"
|
? "h-full"
|
||||||
: mainCameraAspect == "wide"
|
: "w-full"
|
||||||
? "w-full"
|
|
||||||
: "",
|
|
||||||
)
|
|
||||||
: cn(
|
: cn(
|
||||||
"pt-2 portrait:w-full",
|
"flex-shrink-0 pt-2",
|
||||||
isMobileOnly &&
|
mainCameraAspect == "wide"
|
||||||
(mainCameraAspect == "wide"
|
? "aspect-wide"
|
||||||
? "aspect-wide landscape:w-full"
|
: mainCameraAspect == "tall"
|
||||||
: "aspect-video landscape:h-[94%] landscape:xl:h-[65%]"),
|
? "aspect-tall"
|
||||||
isTablet &&
|
: "aspect-video",
|
||||||
(mainCameraAspect == "wide"
|
"portrait:w-full landscape:h-full",
|
||||||
? "aspect-wide landscape:w-full"
|
|
||||||
: mainCameraAspect == "normal"
|
|
||||||
? "landscape:w-full"
|
|
||||||
: "aspect-video landscape:h-[100%]"),
|
|
||||||
),
|
),
|
||||||
)}
|
)}
|
||||||
style={{
|
style={{
|
||||||
width: mainCameraStyle ? mainCameraStyle.width : undefined,
|
aspectRatio: getCameraAspect(mainCamera),
|
||||||
aspectRatio: isDesktop
|
|
||||||
? mainCameraAspect == "tall"
|
|
||||||
? getCameraAspect(mainCamera)
|
|
||||||
: undefined
|
|
||||||
: Math.max(1, getCameraAspect(mainCamera) ?? 0),
|
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
{isDesktop && (
|
{isDesktop && (
|
||||||
@ -782,10 +761,10 @@ export function RecordingView({
|
|||||||
<div
|
<div
|
||||||
ref={previewRowRef}
|
ref={previewRowRef}
|
||||||
className={cn(
|
className={cn(
|
||||||
"scrollbar-container flex gap-2 overflow-auto",
|
"scrollbar-container flex flex-shrink-0 gap-2 overflow-auto",
|
||||||
mainCameraAspect == "tall"
|
mainCameraAspect == "tall"
|
||||||
? "h-full w-72 flex-col"
|
? "ml-2 h-full w-72 min-w-72 flex-col"
|
||||||
: `h-28 w-full`,
|
: "h-28 min-h-28 w-full",
|
||||||
previewRowOverflows ? "" : "items-center justify-center",
|
previewRowOverflows ? "" : "items-center justify-center",
|
||||||
timelineType == "detail" && isDesktop && "mt-4",
|
timelineType == "detail" && isDesktop && "mt-4",
|
||||||
)}
|
)}
|
||||||
@ -971,10 +950,23 @@ function Timeline({
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"relative",
|
"relative overflow-hidden",
|
||||||
isDesktop
|
isDesktop
|
||||||
? `${timelineType == "timeline" ? "w-[100px]" : timelineType == "detail" ? "w-[30%] min-w-[350px]" : "w-60"} no-scrollbar overflow-y-auto`
|
? cn(
|
||||||
: `overflow-hidden portrait:flex-grow ${timelineType == "timeline" ? "landscape:w-[100px]" : timelineType == "detail" && isDesktop ? "flex-1" : "landscape:w-[300px]"} `,
|
"no-scrollbar overflow-y-auto",
|
||||||
|
timelineType == "timeline"
|
||||||
|
? "w-[100px] flex-shrink-0"
|
||||||
|
: timelineType == "detail"
|
||||||
|
? "min-w-[20rem] max-w-[30%] flex-shrink-0 flex-grow-0 basis-[30rem] md:min-w-[20rem] md:max-w-[25%] lg:min-w-[30rem] lg:max-w-[33%]"
|
||||||
|
: "w-60 flex-shrink-0",
|
||||||
|
)
|
||||||
|
: cn(
|
||||||
|
timelineType == "timeline"
|
||||||
|
? "portrait:flex-grow landscape:w-[100px] landscape:flex-shrink-0"
|
||||||
|
: timelineType == "detail"
|
||||||
|
? "portrait:flex-grow landscape:w-[19rem] landscape:flex-shrink-0"
|
||||||
|
: "portrait:flex-grow landscape:w-[19rem] landscape:flex-shrink-0",
|
||||||
|
),
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{isMobile && (
|
{isMobile && (
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user