mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-06 13:34:13 +03:00
Merge 4ff61a77e7 into 9d4aac2b8e
This commit is contained in:
commit
e2e88d6a67
@ -191,6 +191,7 @@ ONVIF
|
|||||||
openai
|
openai
|
||||||
opencv
|
opencv
|
||||||
openvino
|
openvino
|
||||||
|
overfitting
|
||||||
OWASP
|
OWASP
|
||||||
paddleocr
|
paddleocr
|
||||||
paho
|
paho
|
||||||
|
|||||||
@ -168,6 +168,8 @@ Recorded `speech` events will always use a `whisper` model, regardless of the `m
|
|||||||
|
|
||||||
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
|
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
|
||||||
|
|
||||||
|
Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available.
|
||||||
|
|
||||||
2. Why don't you save live transcription text and use that for `speech` events?
|
2. Why don't you save live transcription text and use that for `speech` events?
|
||||||
|
|
||||||
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
|
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
|
||||||
|
|||||||
@ -69,4 +69,6 @@ Once all images are assigned, training will begin automatically.
|
|||||||
### Improving the Model
|
### Improving the Model
|
||||||
|
|
||||||
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
|
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
|
||||||
- **Data collection**: Use the model’s Recent Classifications tab to gather balanced examples across times of day and weather.
|
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
|
||||||
|
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
|
||||||
|
- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.
|
||||||
|
|||||||
@ -710,6 +710,44 @@ audio_transcription:
|
|||||||
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
|
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
|
||||||
language: en
|
language: en
|
||||||
|
|
||||||
|
# Optional: Configuration for classification models
|
||||||
|
classification:
|
||||||
|
# Optional: Configuration for bird classification
|
||||||
|
bird:
|
||||||
|
# Optional: Enable bird classification (default: shown below)
|
||||||
|
enabled: False
|
||||||
|
# Optional: Minimum classification score required to be considered a match (default: shown below)
|
||||||
|
threshold: 0.9
|
||||||
|
custom:
|
||||||
|
# Required: name of the classification model
|
||||||
|
model_name:
|
||||||
|
# Optional: Enable running the model (default: shown below)
|
||||||
|
enabled: True
|
||||||
|
# Optional: Name of classification model (default: shown below)
|
||||||
|
name: None
|
||||||
|
# Optional: Classification score threshold to change the state (default: shown below)
|
||||||
|
threshold: 0.8
|
||||||
|
# Optional: Number of classification attempts to save in the recent classifications tab (default: shown below)
|
||||||
|
# NOTE: Defaults to 200 for object classification and 100 for state classification if not specified
|
||||||
|
save_attempts: None
|
||||||
|
# Optional: Object classification configuration
|
||||||
|
object_config:
|
||||||
|
# Required: Object types to classify
|
||||||
|
objects: [dog]
|
||||||
|
# Optional: Type of classification that is applied (default: shown below)
|
||||||
|
classification_type: sub_label
|
||||||
|
# Optional: State classification configuration
|
||||||
|
state_config:
|
||||||
|
# Required: Cameras to run classification on
|
||||||
|
cameras:
|
||||||
|
camera_name:
|
||||||
|
# Required: Crop of image frame on this camera to run classification on
|
||||||
|
crop: [0, 180, 220, 400]
|
||||||
|
# Optional: If classification should be run when motion is detected in the crop (default: shown below)
|
||||||
|
motion: False
|
||||||
|
# Optional: Interval to run classification on in seconds (default: shown below)
|
||||||
|
interval: None
|
||||||
|
|
||||||
# Optional: Restream configuration
|
# Optional: Restream configuration
|
||||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
||||||
# NOTE: The default go2rtc API port (1984) must be used,
|
# NOTE: The default go2rtc API port (1984) must be used,
|
||||||
|
|||||||
@ -1731,37 +1731,40 @@ def create_trigger_embedding(
|
|||||||
if event.data.get("type") != "object":
|
if event.data.get("type") != "object":
|
||||||
return
|
return
|
||||||
|
|
||||||
if thumbnail := get_event_thumbnail_bytes(event):
|
# Get the thumbnail
|
||||||
cursor = context.db.execute_sql(
|
thumbnail = get_event_thumbnail_bytes(event)
|
||||||
"""
|
|
||||||
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
if thumbnail is None:
|
||||||
""",
|
return JSONResponse(
|
||||||
[body.data],
|
content={
|
||||||
|
"success": False,
|
||||||
|
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
|
||||||
|
},
|
||||||
|
status_code=400,
|
||||||
)
|
)
|
||||||
|
|
||||||
row = cursor.fetchone() if cursor else None
|
# Try to reuse existing embedding from database
|
||||||
|
cursor = context.db.execute_sql(
|
||||||
|
"""
|
||||||
|
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
||||||
|
""",
|
||||||
|
[body.data],
|
||||||
|
)
|
||||||
|
|
||||||
if row:
|
row = cursor.fetchone() if cursor else None
|
||||||
query_embedding = row[0]
|
|
||||||
embedding = np.frombuffer(query_embedding, dtype=np.float32)
|
if row:
|
||||||
|
query_embedding = row[0]
|
||||||
|
embedding = np.frombuffer(query_embedding, dtype=np.float32)
|
||||||
else:
|
else:
|
||||||
# Extract valid thumbnail
|
# Generate new embedding
|
||||||
thumbnail = get_event_thumbnail_bytes(event)
|
|
||||||
|
|
||||||
if thumbnail is None:
|
|
||||||
return JSONResponse(
|
|
||||||
content={
|
|
||||||
"success": False,
|
|
||||||
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
|
|
||||||
},
|
|
||||||
status_code=400,
|
|
||||||
)
|
|
||||||
|
|
||||||
embedding = context.generate_image_embedding(
|
embedding = context.generate_image_embedding(
|
||||||
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||||
)
|
)
|
||||||
|
|
||||||
if not embedding:
|
if embedding is None or (
|
||||||
|
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
|
||||||
|
):
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content={
|
content={
|
||||||
"success": False,
|
"success": False,
|
||||||
@ -1896,7 +1899,9 @@ def update_trigger_embedding(
|
|||||||
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||||
)
|
)
|
||||||
|
|
||||||
if not embedding:
|
if embedding is None or (
|
||||||
|
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
|
||||||
|
):
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content={
|
content={
|
||||||
"success": False,
|
"success": False,
|
||||||
|
|||||||
@ -105,6 +105,11 @@ class CustomClassificationConfig(FrigateBaseModel):
|
|||||||
threshold: float = Field(
|
threshold: float = Field(
|
||||||
default=0.8, title="Classification score threshold to change the state."
|
default=0.8, title="Classification score threshold to change the state."
|
||||||
)
|
)
|
||||||
|
save_attempts: int | None = Field(
|
||||||
|
default=None,
|
||||||
|
title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.",
|
||||||
|
ge=0,
|
||||||
|
)
|
||||||
object_config: CustomClassificationObjectConfig | None = Field(default=None)
|
object_config: CustomClassificationObjectConfig | None = Field(default=None)
|
||||||
state_config: CustomClassificationStateConfig | None = Field(default=None)
|
state_config: CustomClassificationStateConfig | None = Field(default=None)
|
||||||
|
|
||||||
|
|||||||
@ -250,6 +250,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
if self.interpreter is None:
|
if self.interpreter is None:
|
||||||
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||||
if self._should_save_image(camera, "unknown", 0.0):
|
if self._should_save_image(camera, "unknown", 0.0):
|
||||||
|
save_attempts = (
|
||||||
|
self.model_config.save_attempts
|
||||||
|
if self.model_config.save_attempts is not None
|
||||||
|
else 100
|
||||||
|
)
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
self.train_dir,
|
self.train_dir,
|
||||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||||
@ -257,6 +262,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
now,
|
now,
|
||||||
"unknown",
|
"unknown",
|
||||||
0.0,
|
0.0,
|
||||||
|
max_files=save_attempts,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -277,6 +283,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
detected_state = self.labelmap[best_id]
|
detected_state = self.labelmap[best_id]
|
||||||
|
|
||||||
if self._should_save_image(camera, detected_state, score):
|
if self._should_save_image(camera, detected_state, score):
|
||||||
|
save_attempts = (
|
||||||
|
self.model_config.save_attempts
|
||||||
|
if self.model_config.save_attempts is not None
|
||||||
|
else 100
|
||||||
|
)
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
self.train_dir,
|
self.train_dir,
|
||||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||||
@ -284,6 +295,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
now,
|
now,
|
||||||
detected_state,
|
detected_state,
|
||||||
score,
|
score,
|
||||||
|
max_files=save_attempts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if score < self.model_config.threshold:
|
if score < self.model_config.threshold:
|
||||||
@ -482,6 +494,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if self.interpreter is None:
|
if self.interpreter is None:
|
||||||
|
save_attempts = (
|
||||||
|
self.model_config.save_attempts
|
||||||
|
if self.model_config.save_attempts is not None
|
||||||
|
else 200
|
||||||
|
)
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
self.train_dir,
|
self.train_dir,
|
||||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||||
@ -489,6 +506,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
now,
|
now,
|
||||||
"unknown",
|
"unknown",
|
||||||
0.0,
|
0.0,
|
||||||
|
max_files=save_attempts,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -506,6 +524,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
score = round(probs[best_id], 2)
|
score = round(probs[best_id], 2)
|
||||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||||
|
|
||||||
|
save_attempts = (
|
||||||
|
self.model_config.save_attempts
|
||||||
|
if self.model_config.save_attempts is not None
|
||||||
|
else 200
|
||||||
|
)
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
self.train_dir,
|
self.train_dir,
|
||||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||||
@ -513,7 +536,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
|||||||
now,
|
now,
|
||||||
self.labelmap[best_id],
|
self.labelmap[best_id],
|
||||||
score,
|
score,
|
||||||
max_files=200,
|
max_files=save_attempts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if score < self.model_config.threshold:
|
if score < self.model_config.threshold:
|
||||||
|
|||||||
@ -5,7 +5,7 @@ import shutil
|
|||||||
import threading
|
import threading
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from peewee import fn
|
from peewee import SQL, fn
|
||||||
|
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import RECORD_DIR
|
from frigate.const import RECORD_DIR
|
||||||
@ -44,13 +44,19 @@ class StorageMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
# calculate MB/hr
|
# calculate MB/hr from last 100 segments
|
||||||
try:
|
try:
|
||||||
bandwidth = round(
|
# Subquery to get last 100 segments, then average their bandwidth
|
||||||
Recordings.select(fn.AVG(bandwidth_equation))
|
last_100 = (
|
||||||
|
Recordings.select(bandwidth_equation.alias("bw"))
|
||||||
.where(Recordings.camera == camera, Recordings.segment_size > 0)
|
.where(Recordings.camera == camera, Recordings.segment_size > 0)
|
||||||
|
.order_by(Recordings.start_time.desc())
|
||||||
.limit(100)
|
.limit(100)
|
||||||
.scalar()
|
.alias("recent")
|
||||||
|
)
|
||||||
|
|
||||||
|
bandwidth = round(
|
||||||
|
Recordings.select(fn.AVG(SQL("bw"))).from_(last_100).scalar()
|
||||||
* 3600,
|
* 3600,
|
||||||
2,
|
2,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -330,7 +330,7 @@ def collect_state_classification_examples(
|
|||||||
1. Queries review items from specified cameras
|
1. Queries review items from specified cameras
|
||||||
2. Selects 100 balanced timestamps across the data
|
2. Selects 100 balanced timestamps across the data
|
||||||
3. Extracts keyframes from recordings (cropped to specified regions)
|
3. Extracts keyframes from recordings (cropped to specified regions)
|
||||||
4. Selects 20 most visually distinct images
|
4. Selects 24 most visually distinct images
|
||||||
5. Saves them to the dataset directory
|
5. Saves them to the dataset directory
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -660,7 +660,6 @@ def collect_object_classification_examples(
|
|||||||
Args:
|
Args:
|
||||||
model_name: Name of the classification model
|
model_name: Name of the classification model
|
||||||
label: Object label to collect (e.g., "person", "car")
|
label: Object label to collect (e.g., "person", "car")
|
||||||
cameras: List of camera names to collect examples from
|
|
||||||
"""
|
"""
|
||||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||||
temp_dir = os.path.join(dataset_dir, "temp")
|
temp_dir = os.path.join(dataset_dir, "temp")
|
||||||
|
|||||||
@ -170,6 +170,10 @@
|
|||||||
"label": "Download snapshot",
|
"label": "Download snapshot",
|
||||||
"aria": "Download snapshot"
|
"aria": "Download snapshot"
|
||||||
},
|
},
|
||||||
|
"downloadCleanSnapshot": {
|
||||||
|
"label": "Download clean snapshot",
|
||||||
|
"aria": "Download clean snapshot"
|
||||||
|
},
|
||||||
"viewTrackingDetails": {
|
"viewTrackingDetails": {
|
||||||
"label": "View tracking details",
|
"label": "View tracking details",
|
||||||
"aria": "Show the tracking details"
|
"aria": "Show the tracking details"
|
||||||
|
|||||||
@ -108,6 +108,18 @@ export default function SearchResultActions({
|
|||||||
</a>
|
</a>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
)}
|
)}
|
||||||
|
{searchResult.has_snapshot &&
|
||||||
|
config?.cameras[searchResult.camera].snapshots.clean_copy && (
|
||||||
|
<MenuItem aria-label={t("itemMenu.downloadCleanSnapshot.aria")}>
|
||||||
|
<a
|
||||||
|
className="flex items-center"
|
||||||
|
href={`${baseUrl}api/events/${searchResult.id}/snapshot-clean.webp`}
|
||||||
|
download={`${searchResult.camera}_${searchResult.label}-clean.webp`}
|
||||||
|
>
|
||||||
|
<span>{t("itemMenu.downloadCleanSnapshot.label")}</span>
|
||||||
|
</a>
|
||||||
|
</MenuItem>
|
||||||
|
)}
|
||||||
{searchResult.data.type == "object" && (
|
{searchResult.data.type == "object" && (
|
||||||
<MenuItem
|
<MenuItem
|
||||||
aria-label={t("itemMenu.viewTrackingDetails.aria")}
|
aria-label={t("itemMenu.viewTrackingDetails.aria")}
|
||||||
|
|||||||
@ -69,6 +69,20 @@ export default function DetailActionsMenu({
|
|||||||
</a>
|
</a>
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
)}
|
)}
|
||||||
|
{search.has_snapshot &&
|
||||||
|
config?.cameras[search.camera].snapshots.clean_copy && (
|
||||||
|
<DropdownMenuItem>
|
||||||
|
<a
|
||||||
|
className="w-full"
|
||||||
|
href={`${baseUrl}api/events/${search.id}/snapshot-clean.webp`}
|
||||||
|
download={`${search.camera}_${search.label}-clean.webp`}
|
||||||
|
>
|
||||||
|
<div className="flex cursor-pointer items-center gap-2">
|
||||||
|
<span>{t("itemMenu.downloadCleanSnapshot.label")}</span>
|
||||||
|
</div>
|
||||||
|
</a>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
)}
|
||||||
{search.has_clip && (
|
{search.has_clip && (
|
||||||
<DropdownMenuItem>
|
<DropdownMenuItem>
|
||||||
<a
|
<a
|
||||||
|
|||||||
@ -498,7 +498,7 @@ export default function SearchDetailDialog({
|
|||||||
|
|
||||||
const views = [...SEARCH_TABS];
|
const views = [...SEARCH_TABS];
|
||||||
|
|
||||||
if (search.data.type != "object" || !search.has_clip) {
|
if (!search.has_clip) {
|
||||||
const index = views.indexOf("tracking_details");
|
const index = views.indexOf("tracking_details");
|
||||||
views.splice(index, 1);
|
views.splice(index, 1);
|
||||||
}
|
}
|
||||||
@ -548,7 +548,7 @@ export default function SearchDetailDialog({
|
|||||||
"relative flex items-center justify-between",
|
"relative flex items-center justify-between",
|
||||||
"w-full",
|
"w-full",
|
||||||
// match dialog's max-width classes
|
// match dialog's max-width classes
|
||||||
"sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
|
"max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
@ -594,8 +594,7 @@ export default function SearchDetailDialog({
|
|||||||
ref={isDesktop ? dialogContentRef : undefined}
|
ref={isDesktop ? dialogContentRef : undefined}
|
||||||
className={cn(
|
className={cn(
|
||||||
"scrollbar-container overflow-y-auto",
|
"scrollbar-container overflow-y-auto",
|
||||||
isDesktop &&
|
isDesktop && "max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
|
||||||
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
|
|
||||||
isMobile && "flex h-full flex-col px-4",
|
isMobile && "flex h-full flex-col px-4",
|
||||||
)}
|
)}
|
||||||
onEscapeKeyDown={(event) => {
|
onEscapeKeyDown={(event) => {
|
||||||
|
|||||||
@ -622,7 +622,7 @@ export function TrackingDetails({
|
|||||||
|
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
isDesktop && "justify-between overflow-hidden md:basis-2/5",
|
isDesktop && "justify-between overflow-hidden lg:basis-2/5",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
{isDesktop && tabs && (
|
{isDesktop && tabs && (
|
||||||
@ -900,96 +900,99 @@ function LifecycleIconRow({
|
|||||||
<div className="text-md flex items-start break-words text-left">
|
<div className="text-md flex items-start break-words text-left">
|
||||||
{getLifecycleItemDescription(item)}
|
{getLifecycleItemDescription(item)}
|
||||||
</div>
|
</div>
|
||||||
<div className="my-2 ml-2 flex flex-col flex-wrap items-start gap-1.5 text-xs text-secondary-foreground">
|
{/* Only show Score/Ratio/Area for object events, not for audio (heard) or manual API (external) events */}
|
||||||
<div className="flex items-center gap-1.5">
|
{item.class_type !== "heard" && item.class_type !== "external" && (
|
||||||
<span className="text-primary-variant">
|
<div className="my-2 ml-2 flex flex-col flex-wrap items-start gap-1.5 text-xs text-secondary-foreground">
|
||||||
{t("trackingDetails.lifecycleItemDesc.header.score")}
|
<div className="flex items-center gap-1.5">
|
||||||
</span>
|
<span className="text-primary-variant">
|
||||||
<span className="font-medium text-primary">{score}</span>
|
{t("trackingDetails.lifecycleItemDesc.header.score")}
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-1.5">
|
|
||||||
<span className="text-primary-variant">
|
|
||||||
{t("trackingDetails.lifecycleItemDesc.header.ratio")}
|
|
||||||
</span>
|
|
||||||
<span className="font-medium text-primary">{ratio}</span>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-1.5">
|
|
||||||
<span className="text-primary-variant">
|
|
||||||
{t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
|
|
||||||
{attributeAreaPx !== undefined &&
|
|
||||||
attributeAreaPct !== undefined && (
|
|
||||||
<span className="text-primary-variant">
|
|
||||||
({getTranslatedLabel(item.data.label)})
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
</span>
|
|
||||||
{areaPx !== undefined && areaPct !== undefined ? (
|
|
||||||
<span className="font-medium text-primary">
|
|
||||||
{t("information.pixels", { ns: "common", area: areaPx })} ·{" "}
|
|
||||||
{areaPct}%
|
|
||||||
</span>
|
</span>
|
||||||
) : (
|
<span className="font-medium text-primary">{score}</span>
|
||||||
<span>N/A</span>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
{attributeAreaPx !== undefined &&
|
|
||||||
attributeAreaPct !== undefined && (
|
|
||||||
<div className="flex items-center gap-1.5">
|
|
||||||
<span className="text-primary-variant">
|
|
||||||
{t("trackingDetails.lifecycleItemDesc.header.area")} (
|
|
||||||
{getTranslatedLabel(item.data.attribute)})
|
|
||||||
</span>
|
|
||||||
<span className="font-medium text-primary">
|
|
||||||
{t("information.pixels", {
|
|
||||||
ns: "common",
|
|
||||||
area: attributeAreaPx,
|
|
||||||
})}{" "}
|
|
||||||
· {attributeAreaPct}%
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{item.data?.zones && item.data.zones.length > 0 && (
|
|
||||||
<div className="mt-1 flex flex-wrap items-center gap-2">
|
|
||||||
{item.data.zones.map((zone, zidx) => {
|
|
||||||
const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
|
|
||||||
return (
|
|
||||||
<Badge
|
|
||||||
key={`${zone}-${zidx}`}
|
|
||||||
variant="outline"
|
|
||||||
className="inline-flex cursor-pointer items-center gap-2"
|
|
||||||
onClick={(e: React.MouseEvent) => {
|
|
||||||
e.stopPropagation();
|
|
||||||
setSelectedZone(zone);
|
|
||||||
}}
|
|
||||||
style={{
|
|
||||||
borderColor: `rgba(${color}, 0.6)`,
|
|
||||||
background: `rgba(${color}, 0.08)`,
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<span
|
|
||||||
className="size-1 rounded-full"
|
|
||||||
style={{
|
|
||||||
display: "inline-block",
|
|
||||||
width: 10,
|
|
||||||
height: 10,
|
|
||||||
backgroundColor: `rgb(${color})`,
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
<span
|
|
||||||
className={cn(
|
|
||||||
item.data?.zones_friendly_names?.[zidx] === zone &&
|
|
||||||
"smart-capitalize",
|
|
||||||
)}
|
|
||||||
>
|
|
||||||
{item.data?.zones_friendly_names?.[zidx]}
|
|
||||||
</span>
|
|
||||||
</Badge>
|
|
||||||
);
|
|
||||||
})}
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
<div className="flex items-center gap-1.5">
|
||||||
</div>
|
<span className="text-primary-variant">
|
||||||
|
{t("trackingDetails.lifecycleItemDesc.header.ratio")}
|
||||||
|
</span>
|
||||||
|
<span className="font-medium text-primary">{ratio}</span>
|
||||||
|
</div>
|
||||||
|
<div className="flex items-center gap-1.5">
|
||||||
|
<span className="text-primary-variant">
|
||||||
|
{t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
|
||||||
|
{attributeAreaPx !== undefined &&
|
||||||
|
attributeAreaPct !== undefined && (
|
||||||
|
<span className="text-primary-variant">
|
||||||
|
({getTranslatedLabel(item.data.label)})
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</span>
|
||||||
|
{areaPx !== undefined && areaPct !== undefined ? (
|
||||||
|
<span className="font-medium text-primary">
|
||||||
|
{t("information.pixels", { ns: "common", area: areaPx })}{" "}
|
||||||
|
· {areaPct}%
|
||||||
|
</span>
|
||||||
|
) : (
|
||||||
|
<span>N/A</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
{attributeAreaPx !== undefined &&
|
||||||
|
attributeAreaPct !== undefined && (
|
||||||
|
<div className="flex items-center gap-1.5">
|
||||||
|
<span className="text-primary-variant">
|
||||||
|
{t("trackingDetails.lifecycleItemDesc.header.area")} (
|
||||||
|
{getTranslatedLabel(item.data.attribute)})
|
||||||
|
</span>
|
||||||
|
<span className="font-medium text-primary">
|
||||||
|
{t("information.pixels", {
|
||||||
|
ns: "common",
|
||||||
|
area: attributeAreaPx,
|
||||||
|
})}{" "}
|
||||||
|
· {attributeAreaPct}%
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{item.data?.zones && item.data.zones.length > 0 && (
|
||||||
|
<div className="mt-1 flex flex-wrap items-center gap-2">
|
||||||
|
{item.data.zones.map((zone, zidx) => {
|
||||||
|
const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
|
||||||
|
return (
|
||||||
|
<Badge
|
||||||
|
key={`${zone}-${zidx}`}
|
||||||
|
variant="outline"
|
||||||
|
className="inline-flex cursor-pointer items-center gap-2"
|
||||||
|
onClick={(e: React.MouseEvent) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
setSelectedZone(zone);
|
||||||
|
}}
|
||||||
|
style={{
|
||||||
|
borderColor: `rgba(${color}, 0.6)`,
|
||||||
|
background: `rgba(${color}, 0.08)`,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
className="size-1 rounded-full"
|
||||||
|
style={{
|
||||||
|
display: "inline-block",
|
||||||
|
width: 10,
|
||||||
|
height: 10,
|
||||||
|
backgroundColor: `rgb(${color})`,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
<span
|
||||||
|
className={cn(
|
||||||
|
item.data?.zones_friendly_names?.[zidx] === zone &&
|
||||||
|
"smart-capitalize",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
{item.data?.zones_friendly_names?.[zidx]}
|
||||||
|
</span>
|
||||||
|
</Badge>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div className="ml-3 flex-shrink-0 px-1 text-right text-xs text-primary-variant">
|
<div className="ml-3 flex-shrink-0 px-1 text-right text-xs text-primary-variant">
|
||||||
|
|||||||
@ -305,6 +305,7 @@ export type CustomClassificationModelConfig = {
|
|||||||
enabled: boolean;
|
enabled: boolean;
|
||||||
name: string;
|
name: string;
|
||||||
threshold: number;
|
threshold: number;
|
||||||
|
save_attempts?: number;
|
||||||
object_config?: {
|
object_config?: {
|
||||||
objects: string[];
|
objects: string[];
|
||||||
classification_type: string;
|
classification_type: string;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user