mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-06 13:34:13 +03:00
Compare commits
5 Commits
eacd4c2359
...
7672ce39ed
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7672ce39ed | ||
|
|
9ab78f496c | ||
|
|
8a360eecf8 | ||
|
|
1f9669bbe5 | ||
|
|
793906bb68 |
@ -191,6 +191,7 @@ ONVIF
|
||||
openai
|
||||
opencv
|
||||
openvino
|
||||
overfitting
|
||||
OWASP
|
||||
paddleocr
|
||||
paho
|
||||
|
||||
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
@ -136,7 +136,6 @@ jobs:
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt,mode=max
|
||||
- name: AMD/ROCm general build
|
||||
env:
|
||||
AMDGPU: gfx
|
||||
HSA_OVERRIDE: 0
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG ROCM=1
|
||||
ARG AMDGPU=gfx900
|
||||
ARG HSA_OVERRIDE_GFX_VERSION
|
||||
ARG HSA_OVERRIDE
|
||||
|
||||
@ -11,7 +10,6 @@ ARG HSA_OVERRIDE
|
||||
FROM wget AS rocm
|
||||
|
||||
ARG ROCM
|
||||
ARG AMDGPU
|
||||
|
||||
RUN apt update -qq && \
|
||||
apt install -y wget gpg && \
|
||||
@ -36,7 +34,10 @@ FROM deps AS deps-prelim
|
||||
COPY docker/rocm/debian-backports.sources /etc/apt/sources.list.d/debian-backports.sources
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libnuma1 && \
|
||||
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers
|
||||
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers && \
|
||||
# Install C++ standard library headers for HIPRTC kernel compilation fallback
|
||||
apt-get install -qq -y libstdc++-12-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /opt/frigate
|
||||
COPY --from=rootfs / /
|
||||
@ -54,12 +55,14 @@ RUN pip3 uninstall -y onnxruntime \
|
||||
FROM scratch AS rocm-dist
|
||||
|
||||
ARG ROCM
|
||||
ARG AMDGPU
|
||||
|
||||
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx11* /opt/rocm-$ROCM/share/miopen/db/
|
||||
# Copy rocBLAS library files for gfx10xx and gfx11xx only
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx10* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx11* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-dist/ /
|
||||
|
||||
#######################################################################
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
variable "AMDGPU" {
|
||||
default = "gfx900"
|
||||
}
|
||||
variable "ROCM" {
|
||||
default = "7.1.1"
|
||||
}
|
||||
@ -38,7 +35,6 @@ target rocm {
|
||||
}
|
||||
platforms = ["linux/amd64"]
|
||||
args = {
|
||||
AMDGPU = AMDGPU,
|
||||
ROCM = ROCM,
|
||||
HSA_OVERRIDE_GFX_VERSION = HSA_OVERRIDE_GFX_VERSION,
|
||||
HSA_OVERRIDE = HSA_OVERRIDE
|
||||
|
||||
@ -1,53 +1,15 @@
|
||||
BOARDS += rocm
|
||||
|
||||
# AMD/ROCm is chunky so we build couple of smaller images for specific chipsets
|
||||
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
|
||||
|
||||
local-rocm: version
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \
|
||||
--load \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=frigate:latest-rocm \
|
||||
--load
|
||||
|
||||
build-rocm: version
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm
|
||||
|
||||
push-rocm: build-rocm
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
|
||||
--push \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \
|
||||
--push
|
||||
|
||||
@ -168,6 +168,8 @@ Recorded `speech` events will always use a `whisper` model, regardless of the `m
|
||||
|
||||
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
|
||||
|
||||
Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available.
|
||||
|
||||
2. Why don't you save live transcription text and use that for `speech` events?
|
||||
|
||||
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
|
||||
|
||||
@ -69,4 +69,6 @@ Once all images are assigned, training will begin automatically.
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
|
||||
- **Data collection**: Use the model’s Recent Classifications tab to gather balanced examples across times of day and weather.
|
||||
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
|
||||
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
|
||||
- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.
|
||||
|
||||
@ -13,7 +13,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
**Most Hardware**
|
||||
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB, Mini PCIe, and m.2 formats allowing for a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
- <CommunityBadge /> [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
|
||||
- <CommunityBadge /> [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
|
||||
@ -69,12 +69,10 @@ Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8
|
||||
|
||||
## Edge TPU Detector
|
||||
|
||||
The Edge TPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
|
||||
The Edge TPU detector type runs TensorFlow Lite models utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
|
||||
|
||||
The Edge TPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds.
|
||||
|
||||
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
|
||||
|
||||
:::tip
|
||||
|
||||
See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edge TPU is not detected.
|
||||
@ -146,6 +144,52 @@ detectors:
|
||||
device: pci
|
||||
```
|
||||
|
||||
### EdgeTPU Supported Models
|
||||
|
||||
| Model | Notes |
|
||||
| ------------------------------------- | ------------------------------------------- |
|
||||
| [MobileNet v2](#ssdlite-mobilenet-v2) | Default model |
|
||||
| [YOLOv9](#yolo-v9) | More accurate but slower than default model |
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
|
||||
|
||||
A Tensorflow Lite is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an INT8 precision model.
|
||||
|
||||
#### YOLO v9
|
||||
|
||||
[YOLOv9](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite) models that are compiled for Tensorflow Lite and properly quantized are supported, but not included by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. Note that the model may require a custom label file (eg. [use this 17 label file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) for the model linked above.)
|
||||
|
||||
<details>
|
||||
<summary>YOLOv9 Setup & Config</summary>
|
||||
|
||||
:::warning
|
||||
|
||||
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
coral:
|
||||
type: edgetpu
|
||||
device: usb
|
||||
|
||||
model:
|
||||
model_type: yolo-generic
|
||||
width: 320 # <--- should match the imgsize of the model, typically 320
|
||||
height: 320 # <--- should match the imgsize of the model, typically 320
|
||||
path: /config/model_cache/yolov9-s-relu6-best_320_int8_edgetpu.tflite
|
||||
labelmap_path: /labelmap/labels-coco-17.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 17 objects.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Hailo-8
|
||||
|
||||
@ -710,6 +710,44 @@ audio_transcription:
|
||||
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
|
||||
language: en
|
||||
|
||||
# Optional: Configuration for classification models
|
||||
classification:
|
||||
# Optional: Configuration for bird classification
|
||||
bird:
|
||||
# Optional: Enable bird classification (default: shown below)
|
||||
enabled: False
|
||||
# Optional: Minimum classification score required to be considered a match (default: shown below)
|
||||
threshold: 0.9
|
||||
custom:
|
||||
# Required: name of the classification model
|
||||
model_name:
|
||||
# Optional: Enable running the model (default: shown below)
|
||||
enabled: True
|
||||
# Optional: Name of classification model (default: shown below)
|
||||
name: None
|
||||
# Optional: Classification score threshold to change the state (default: shown below)
|
||||
threshold: 0.8
|
||||
# Optional: Number of classification attempts to save in the recent classifications tab (default: shown below)
|
||||
# NOTE: Defaults to 200 for object classification and 100 for state classification if not specified
|
||||
save_attempts: None
|
||||
# Optional: Object classification configuration
|
||||
object_config:
|
||||
# Required: Object types to classify
|
||||
objects: [dog]
|
||||
# Optional: Type of classification that is applied (default: shown below)
|
||||
classification_type: sub_label
|
||||
# Optional: State classification configuration
|
||||
state_config:
|
||||
# Required: Cameras to run classification on
|
||||
cameras:
|
||||
camera_name:
|
||||
# Required: Crop of image frame on this camera to run classification on
|
||||
crop: [0, 180, 220, 400]
|
||||
# Optional: If classification should be run when motion is detected in the crop (default: shown below)
|
||||
motion: False
|
||||
# Optional: Interval to run classification on in seconds (default: shown below)
|
||||
interval: None
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
|
||||
@ -1731,37 +1731,40 @@ def create_trigger_embedding(
|
||||
if event.data.get("type") != "object":
|
||||
return
|
||||
|
||||
if thumbnail := get_event_thumbnail_bytes(event):
|
||||
cursor = context.db.execute_sql(
|
||||
"""
|
||||
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
||||
""",
|
||||
[body.data],
|
||||
# Get the thumbnail
|
||||
thumbnail = get_event_thumbnail_bytes(event)
|
||||
|
||||
if thumbnail is None:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
row = cursor.fetchone() if cursor else None
|
||||
# Try to reuse existing embedding from database
|
||||
cursor = context.db.execute_sql(
|
||||
"""
|
||||
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
||||
""",
|
||||
[body.data],
|
||||
)
|
||||
|
||||
if row:
|
||||
query_embedding = row[0]
|
||||
embedding = np.frombuffer(query_embedding, dtype=np.float32)
|
||||
row = cursor.fetchone() if cursor else None
|
||||
|
||||
if row:
|
||||
query_embedding = row[0]
|
||||
embedding = np.frombuffer(query_embedding, dtype=np.float32)
|
||||
else:
|
||||
# Extract valid thumbnail
|
||||
thumbnail = get_event_thumbnail_bytes(event)
|
||||
|
||||
if thumbnail is None:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
# Generate new embedding
|
||||
embedding = context.generate_image_embedding(
|
||||
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||
)
|
||||
|
||||
if not embedding:
|
||||
if embedding is None or (
|
||||
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
|
||||
):
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -1896,7 +1899,9 @@ def update_trigger_embedding(
|
||||
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||
)
|
||||
|
||||
if not embedding:
|
||||
if embedding is None or (
|
||||
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
|
||||
):
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
|
||||
@ -105,6 +105,11 @@ class CustomClassificationConfig(FrigateBaseModel):
|
||||
threshold: float = Field(
|
||||
default=0.8, title="Classification score threshold to change the state."
|
||||
)
|
||||
save_attempts: int | None = Field(
|
||||
default=None,
|
||||
title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.",
|
||||
ge=0,
|
||||
)
|
||||
object_config: CustomClassificationObjectConfig | None = Field(default=None)
|
||||
state_config: CustomClassificationStateConfig | None = Field(default=None)
|
||||
|
||||
|
||||
@ -250,6 +250,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
if self.interpreter is None:
|
||||
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||
if self._should_save_image(camera, "unknown", 0.0):
|
||||
save_attempts = (
|
||||
self.model_config.save_attempts
|
||||
if self.model_config.save_attempts is not None
|
||||
else 100
|
||||
)
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
@ -257,6 +262,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
max_files=save_attempts,
|
||||
)
|
||||
return
|
||||
|
||||
@ -277,6 +283,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
detected_state = self.labelmap[best_id]
|
||||
|
||||
if self._should_save_image(camera, detected_state, score):
|
||||
save_attempts = (
|
||||
self.model_config.save_attempts
|
||||
if self.model_config.save_attempts is not None
|
||||
else 100
|
||||
)
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
@ -284,6 +295,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
now,
|
||||
detected_state,
|
||||
score,
|
||||
max_files=save_attempts,
|
||||
)
|
||||
|
||||
if score < self.model_config.threshold:
|
||||
@ -482,6 +494,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
save_attempts = (
|
||||
self.model_config.save_attempts
|
||||
if self.model_config.save_attempts is not None
|
||||
else 200
|
||||
)
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
@ -489,6 +506,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
max_files=save_attempts,
|
||||
)
|
||||
return
|
||||
|
||||
@ -506,6 +524,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
score = round(probs[best_id], 2)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||
|
||||
save_attempts = (
|
||||
self.model_config.save_attempts
|
||||
if self.model_config.save_attempts is not None
|
||||
else 200
|
||||
)
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
@ -513,7 +536,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
max_files=200,
|
||||
max_files=save_attempts,
|
||||
)
|
||||
|
||||
if score < self.model_config.threshold:
|
||||
|
||||
@ -1,19 +1,20 @@
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter, load_delegate
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter, load_delegate
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "edgetpu"
|
||||
@ -26,6 +27,10 @@ class EdgeTpuDetectorConfig(BaseDetectorConfig):
|
||||
|
||||
class EdgeTpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
supported_models = [
|
||||
ModelTypeEnum.ssd,
|
||||
ModelTypeEnum.yologeneric,
|
||||
]
|
||||
|
||||
def __init__(self, detector_config: EdgeTpuDetectorConfig):
|
||||
device_config = {}
|
||||
@ -63,31 +68,294 @@ class EdgeTpuTfl(DetectionApi):
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.model_width = detector_config.model.width
|
||||
self.model_height = detector_config.model.height
|
||||
|
||||
self.min_score = 0.4
|
||||
self.max_detections = 20
|
||||
|
||||
self.model_type = detector_config.model.model_type
|
||||
self.model_requires_int8 = self.tensor_input_details[0]["dtype"] == np.int8
|
||||
|
||||
if self.model_type == ModelTypeEnum.yologeneric:
|
||||
logger.debug("Using YOLO preprocessing/postprocessing")
|
||||
|
||||
if len(self.tensor_output_details) not in [2, 3]:
|
||||
logger.error(
|
||||
f"Invalid count of output tensors in YOLO model. Found {len(self.tensor_output_details)}, expecting 2 or 3."
|
||||
)
|
||||
raise
|
||||
|
||||
self.reg_max = 16 # = 64 dfl_channels // 4 # YOLO standard
|
||||
self.min_logit_value = np.log(
|
||||
self.min_score / (1 - self.min_score)
|
||||
) # for filtering
|
||||
self._generate_anchors_and_strides() # decode bounding box DFL
|
||||
self.project = np.arange(
|
||||
self.reg_max, dtype=np.float32
|
||||
) # for decoding bounding box DFL information
|
||||
|
||||
# Determine YOLO tensor indices and quantization scales for
|
||||
# boxes and class_scores the tensor ordering and names are
|
||||
# not reliable, so use tensor shape to detect which tensor
|
||||
# holds boxes or class scores.
|
||||
# The tensors have shapes (B, N, C)
|
||||
# where N is the number of candidates (=2100 for 320x320)
|
||||
# this may guess wrong if the number of classes is exactly 64
|
||||
output_boxes_index = None
|
||||
output_classes_index = None
|
||||
for i, x in enumerate(self.tensor_output_details):
|
||||
# the nominal index seems to start at 1 instead of 0
|
||||
if len(x["shape"]) == 3 and x["shape"][2] == 64:
|
||||
output_boxes_index = i
|
||||
elif len(x["shape"]) == 3 and x["shape"][2] > 1:
|
||||
# require the number of classes to be more than 1
|
||||
# to differentiate from (not used) max score tensor
|
||||
output_classes_index = i
|
||||
if output_boxes_index is None or output_classes_index is None:
|
||||
logger.warning("Unrecognized model output, unexpected tensor shapes.")
|
||||
output_classes_index = (
|
||||
0
|
||||
if (output_boxes_index is None or output_classes_index == 1)
|
||||
else 1
|
||||
) # 0 is default guess
|
||||
output_boxes_index = 1 if (output_boxes_index == 0) else 0
|
||||
|
||||
scores_details = self.tensor_output_details[output_classes_index]
|
||||
self.scores_tensor_index = scores_details["index"]
|
||||
self.scores_scale, self.scores_zero_point = scores_details["quantization"]
|
||||
# calculate the quantized version of the min_score
|
||||
self.min_score_quantized = int(
|
||||
(self.min_logit_value / self.scores_scale) + self.scores_zero_point
|
||||
)
|
||||
self.logit_shift_to_positive_values = (
|
||||
max(0, math.ceil((128 + self.scores_zero_point) * self.scores_scale))
|
||||
+ 1
|
||||
) # round up
|
||||
|
||||
boxes_details = self.tensor_output_details[output_boxes_index]
|
||||
self.boxes_tensor_index = boxes_details["index"]
|
||||
self.boxes_scale, self.boxes_zero_point = boxes_details["quantization"]
|
||||
|
||||
elif self.model_type == ModelTypeEnum.ssd:
|
||||
logger.debug("Using SSD preprocessing/postprocessing")
|
||||
|
||||
# SSD model indices (4 outputs: boxes, class_ids, scores, count)
|
||||
for x in self.tensor_output_details:
|
||||
if len(x["shape"]) == 3:
|
||||
self.output_boxes_index = x["index"]
|
||||
elif len(x["shape"]) == 1:
|
||||
self.output_count_index = x["index"]
|
||||
|
||||
self.output_class_ids_index = None
|
||||
self.output_class_scores_index = None
|
||||
|
||||
else:
|
||||
raise Exception(
|
||||
f"{self.model_type} is currently not supported for edgetpu. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
def _generate_anchors_and_strides(self):
|
||||
# for decoding the bounding box DFL information into xy coordinates
|
||||
all_anchors = []
|
||||
all_strides = []
|
||||
strides = (8, 16, 32) # YOLO's small, medium, large detection heads
|
||||
|
||||
for stride in strides:
|
||||
feat_h, feat_w = self.model_height // stride, self.model_width // stride
|
||||
|
||||
grid_y, grid_x = np.meshgrid(
|
||||
np.arange(feat_h, dtype=np.float32),
|
||||
np.arange(feat_w, dtype=np.float32),
|
||||
indexing="ij",
|
||||
)
|
||||
|
||||
grid_coords = np.stack((grid_x.flatten(), grid_y.flatten()), axis=1)
|
||||
anchor_points = grid_coords + 0.5
|
||||
|
||||
all_anchors.append(anchor_points)
|
||||
all_strides.append(np.full((feat_h * feat_w, 1), stride, dtype=np.float32))
|
||||
|
||||
self.anchors = np.concatenate(all_anchors, axis=0)
|
||||
self.anchor_strides = np.concatenate(all_strides, axis=0)
|
||||
|
||||
def determine_indexes_for_non_yolo_models(self):
|
||||
"""Legacy method for SSD models."""
|
||||
if (
|
||||
self.output_class_ids_index is None
|
||||
or self.output_class_scores_index is None
|
||||
):
|
||||
for i in range(4):
|
||||
index = self.tensor_output_details[i]["index"]
|
||||
if (
|
||||
index != self.output_boxes_index
|
||||
and index != self.output_count_index
|
||||
):
|
||||
if (
|
||||
np.mod(np.float32(self.interpreter.tensor(index)()[0][0]), 1)
|
||||
== 0.0
|
||||
):
|
||||
self.output_class_ids_index = index
|
||||
else:
|
||||
self.output_scores_index = index
|
||||
|
||||
def pre_process(self, tensor_input):
|
||||
if self.model_requires_int8:
|
||||
tensor_input = np.bitwise_xor(tensor_input, 128).view(
|
||||
np.int8
|
||||
) # shift by -128
|
||||
return tensor_input
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
tensor_input = self.pre_process(tensor_input)
|
||||
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
if self.model_type == ModelTypeEnum.yologeneric:
|
||||
# Multi-tensor YOLO model with (non-standard B(H*W)C output format).
|
||||
# (the comments indicate the shape of tensors,
|
||||
# using "2100" as the anchor count (for image size of 320x320),
|
||||
# "NC" as number of classes,
|
||||
# "N" as the count that survive after min-score filtering)
|
||||
# TENSOR A) class scores (1, 2100, NC) with logit values
|
||||
# TENSOR B) box coordinates (1, 2100, 64) encoded as dfl scores
|
||||
# Recommend that the model clamp the logit values in tensor (A)
|
||||
# to the range [-4,+4] to preserve precision from [2%,98%]
|
||||
# and because NMS requires the min_score parameter to be >= 0
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
# don't dequantize scores data yet, wait until the low-confidence
|
||||
# candidates are filtered out from the overall result set.
|
||||
# This reduces the work and makes post-processing faster.
|
||||
# this method works with raw quantized numbers when possible,
|
||||
# which relies on the value of the scale factor to be >0.
|
||||
# This speeds up max and argmax operations.
|
||||
# Get max confidence for each detection and create the mask
|
||||
detections = np.zeros(
|
||||
(self.max_detections, 6), np.float32
|
||||
) # initialize zero results
|
||||
scores_output_quantized = self.interpreter.get_tensor(
|
||||
self.scores_tensor_index
|
||||
)[0] # (2100, NC)
|
||||
max_scores_quantized = np.max(scores_output_quantized, axis=1) # (2100,)
|
||||
mask = max_scores_quantized >= self.min_score_quantized # (2100,)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
if not np.any(mask):
|
||||
return detections # empty results
|
||||
|
||||
max_scores_filtered_shiftedpositive = (
|
||||
(max_scores_quantized[mask] - self.scores_zero_point)
|
||||
* self.scores_scale
|
||||
) + self.logit_shift_to_positive_values # (N,1) shifted logit values
|
||||
scores_output_quantized_filtered = scores_output_quantized[mask]
|
||||
|
||||
# dequantize boxes. NMS needs them to be in float format
|
||||
# remove candidates with probabilities < threshold
|
||||
boxes_output_quantized_filtered = (
|
||||
self.interpreter.get_tensor(self.boxes_tensor_index)[0]
|
||||
)[mask] # (N, 64)
|
||||
boxes_output_filtered = (
|
||||
boxes_output_quantized_filtered.astype(np.float32)
|
||||
- self.boxes_zero_point
|
||||
) * self.boxes_scale
|
||||
|
||||
# 2. Decode DFL to distances (ltrb)
|
||||
dfl_distributions = boxes_output_filtered.reshape(
|
||||
-1, 4, self.reg_max
|
||||
) # (N, 4, 16)
|
||||
|
||||
# Softmax over the 16 bins
|
||||
dfl_max = np.max(dfl_distributions, axis=2, keepdims=True)
|
||||
dfl_exp = np.exp(dfl_distributions - dfl_max)
|
||||
dfl_probs = dfl_exp / np.sum(dfl_exp, axis=2, keepdims=True) # (N, 4, 16)
|
||||
|
||||
# Weighted sum: (N, 4, 16) * (16,) -> (N, 4)
|
||||
distances = np.einsum("pcr,r->pc", dfl_probs, self.project)
|
||||
|
||||
# Calculate box corners in pixel coordinates
|
||||
anchors_filtered = self.anchors[mask]
|
||||
anchor_strides_filtered = self.anchor_strides[mask]
|
||||
x1y1 = (
|
||||
anchors_filtered - distances[:, [0, 1]]
|
||||
) * anchor_strides_filtered # (N, 2)
|
||||
x2y2 = (
|
||||
anchors_filtered + distances[:, [2, 3]]
|
||||
) * anchor_strides_filtered # (N, 2)
|
||||
boxes_filtered_decoded = np.concatenate((x1y1, x2y2), axis=-1) # (N, 4)
|
||||
|
||||
# 9. Apply NMS. Use logit scores here to defer sigmoid()
|
||||
# until after filtering out redundant boxes
|
||||
# Shift the logit scores to be non-negative (required by cv2)
|
||||
indices = cv2.dnn.NMSBoxes(
|
||||
bboxes=boxes_filtered_decoded,
|
||||
scores=max_scores_filtered_shiftedpositive,
|
||||
score_threshold=(
|
||||
self.min_logit_value + self.logit_shift_to_positive_values
|
||||
),
|
||||
nms_threshold=0.4, # should this be a model config setting?
|
||||
)
|
||||
num_detections = len(indices)
|
||||
if num_detections == 0:
|
||||
return detections # empty results
|
||||
|
||||
nms_indices = np.array(indices, dtype=np.int32).ravel() # or .flatten()
|
||||
if num_detections > self.max_detections:
|
||||
nms_indices = nms_indices[: self.max_detections]
|
||||
num_detections = self.max_detections
|
||||
kept_logits_quantized = scores_output_quantized_filtered[nms_indices]
|
||||
class_ids_post_nms = np.argmax(kept_logits_quantized, axis=1)
|
||||
|
||||
# Extract the final boxes and scores using fancy indexing
|
||||
final_boxes = boxes_filtered_decoded[nms_indices]
|
||||
final_scores_logits = (
|
||||
max_scores_filtered_shiftedpositive[nms_indices]
|
||||
- self.logit_shift_to_positive_values
|
||||
) # Unshifted logits
|
||||
|
||||
# Detections array format: [class_id, score, ymin, xmin, ymax, xmax]
|
||||
detections[:num_detections, 0] = class_ids_post_nms
|
||||
detections[:num_detections, 1] = 1.0 / (
|
||||
1.0 + np.exp(-final_scores_logits)
|
||||
) # sigmoid
|
||||
detections[:num_detections, 2] = final_boxes[:, 1] / self.model_height
|
||||
detections[:num_detections, 3] = final_boxes[:, 0] / self.model_width
|
||||
detections[:num_detections, 4] = final_boxes[:, 3] / self.model_height
|
||||
detections[:num_detections, 5] = final_boxes[:, 2] / self.model_width
|
||||
return detections
|
||||
|
||||
elif self.model_type == ModelTypeEnum.ssd:
|
||||
self.determine_indexes_for_non_yolo_models()
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(
|
||||
self.tensor_output_details[1]["index"]
|
||||
)()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[
|
||||
0
|
||||
]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
return detections
|
||||
detections = np.zeros((self.max_detections, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < self.min_score:
|
||||
break
|
||||
if i == self.max_detections:
|
||||
logger.debug(f"Too many detections ({count})!")
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
|
||||
else:
|
||||
raise Exception(
|
||||
f"{self.model_type} is currently not supported for edgetpu. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
@ -5,7 +5,7 @@ import shutil
|
||||
import threading
|
||||
from pathlib import Path
|
||||
|
||||
from peewee import fn
|
||||
from peewee import SQL, fn
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import RECORD_DIR
|
||||
@ -44,13 +44,19 @@ class StorageMaintainer(threading.Thread):
|
||||
)
|
||||
}
|
||||
|
||||
# calculate MB/hr
|
||||
# calculate MB/hr from last 100 segments
|
||||
try:
|
||||
bandwidth = round(
|
||||
Recordings.select(fn.AVG(bandwidth_equation))
|
||||
# Subquery to get last 100 segments, then average their bandwidth
|
||||
last_100 = (
|
||||
Recordings.select(bandwidth_equation.alias("bw"))
|
||||
.where(Recordings.camera == camera, Recordings.segment_size > 0)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(100)
|
||||
.scalar()
|
||||
.alias("recent")
|
||||
)
|
||||
|
||||
bandwidth = round(
|
||||
Recordings.select(fn.AVG(SQL("bw"))).from_(last_100).scalar()
|
||||
* 3600,
|
||||
2,
|
||||
)
|
||||
|
||||
@ -330,7 +330,7 @@ def collect_state_classification_examples(
|
||||
1. Queries review items from specified cameras
|
||||
2. Selects 100 balanced timestamps across the data
|
||||
3. Extracts keyframes from recordings (cropped to specified regions)
|
||||
4. Selects 20 most visually distinct images
|
||||
4. Selects 24 most visually distinct images
|
||||
5. Saves them to the dataset directory
|
||||
|
||||
Args:
|
||||
@ -660,7 +660,6 @@ def collect_object_classification_examples(
|
||||
Args:
|
||||
model_name: Name of the classification model
|
||||
label: Object label to collect (e.g., "person", "car")
|
||||
cameras: List of camera names to collect examples from
|
||||
"""
|
||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
|
||||
@ -170,6 +170,10 @@
|
||||
"label": "Download snapshot",
|
||||
"aria": "Download snapshot"
|
||||
},
|
||||
"downloadCleanSnapshot": {
|
||||
"label": "Download clean snapshot",
|
||||
"aria": "Download clean snapshot"
|
||||
},
|
||||
"viewTrackingDetails": {
|
||||
"label": "View tracking details",
|
||||
"aria": "Show the tracking details"
|
||||
|
||||
@ -108,6 +108,18 @@ export default function SearchResultActions({
|
||||
</a>
|
||||
</MenuItem>
|
||||
)}
|
||||
{searchResult.has_snapshot &&
|
||||
config?.cameras[searchResult.camera].snapshots.clean_copy && (
|
||||
<MenuItem aria-label={t("itemMenu.downloadCleanSnapshot.aria")}>
|
||||
<a
|
||||
className="flex items-center"
|
||||
href={`${baseUrl}api/events/${searchResult.id}/snapshot-clean.webp`}
|
||||
download={`${searchResult.camera}_${searchResult.label}-clean.webp`}
|
||||
>
|
||||
<span>{t("itemMenu.downloadCleanSnapshot.label")}</span>
|
||||
</a>
|
||||
</MenuItem>
|
||||
)}
|
||||
{searchResult.data.type == "object" && (
|
||||
<MenuItem
|
||||
aria-label={t("itemMenu.viewTrackingDetails.aria")}
|
||||
|
||||
@ -69,6 +69,20 @@ export default function DetailActionsMenu({
|
||||
</a>
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{search.has_snapshot &&
|
||||
config?.cameras[search.camera].snapshots.clean_copy && (
|
||||
<DropdownMenuItem>
|
||||
<a
|
||||
className="w-full"
|
||||
href={`${baseUrl}api/events/${search.id}/snapshot-clean.webp`}
|
||||
download={`${search.camera}_${search.label}-clean.webp`}
|
||||
>
|
||||
<div className="flex cursor-pointer items-center gap-2">
|
||||
<span>{t("itemMenu.downloadCleanSnapshot.label")}</span>
|
||||
</div>
|
||||
</a>
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{search.has_clip && (
|
||||
<DropdownMenuItem>
|
||||
<a
|
||||
|
||||
@ -498,7 +498,7 @@ export default function SearchDetailDialog({
|
||||
|
||||
const views = [...SEARCH_TABS];
|
||||
|
||||
if (search.data.type != "object" || !search.has_clip) {
|
||||
if (!search.has_clip) {
|
||||
const index = views.indexOf("tracking_details");
|
||||
views.splice(index, 1);
|
||||
}
|
||||
@ -548,7 +548,7 @@ export default function SearchDetailDialog({
|
||||
"relative flex items-center justify-between",
|
||||
"w-full",
|
||||
// match dialog's max-width classes
|
||||
"sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
|
||||
"max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
|
||||
)}
|
||||
>
|
||||
<Tooltip>
|
||||
@ -594,8 +594,7 @@ export default function SearchDetailDialog({
|
||||
ref={isDesktop ? dialogContentRef : undefined}
|
||||
className={cn(
|
||||
"scrollbar-container overflow-y-auto",
|
||||
isDesktop &&
|
||||
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
|
||||
isDesktop && "max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
|
||||
isMobile && "flex h-full flex-col px-4",
|
||||
)}
|
||||
onEscapeKeyDown={(event) => {
|
||||
|
||||
@ -622,7 +622,7 @@ export function TrackingDetails({
|
||||
|
||||
<div
|
||||
className={cn(
|
||||
isDesktop && "justify-between overflow-hidden md:basis-2/5",
|
||||
isDesktop && "justify-between overflow-hidden lg:basis-2/5",
|
||||
)}
|
||||
>
|
||||
{isDesktop && tabs && (
|
||||
@ -900,96 +900,99 @@ function LifecycleIconRow({
|
||||
<div className="text-md flex items-start break-words text-left">
|
||||
{getLifecycleItemDescription(item)}
|
||||
</div>
|
||||
<div className="my-2 ml-2 flex flex-col flex-wrap items-start gap-1.5 text-xs text-secondary-foreground">
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.score")}
|
||||
</span>
|
||||
<span className="font-medium text-primary">{score}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.ratio")}
|
||||
</span>
|
||||
<span className="font-medium text-primary">{ratio}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
|
||||
{attributeAreaPx !== undefined &&
|
||||
attributeAreaPct !== undefined && (
|
||||
<span className="text-primary-variant">
|
||||
({getTranslatedLabel(item.data.label)})
|
||||
</span>
|
||||
)}
|
||||
</span>
|
||||
{areaPx !== undefined && areaPct !== undefined ? (
|
||||
<span className="font-medium text-primary">
|
||||
{t("information.pixels", { ns: "common", area: areaPx })} ·{" "}
|
||||
{areaPct}%
|
||||
{/* Only show Score/Ratio/Area for object events, not for audio (heard) or manual API (external) events */}
|
||||
{item.class_type !== "heard" && item.class_type !== "external" && (
|
||||
<div className="my-2 ml-2 flex flex-col flex-wrap items-start gap-1.5 text-xs text-secondary-foreground">
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.score")}
|
||||
</span>
|
||||
) : (
|
||||
<span>N/A</span>
|
||||
)}
|
||||
</div>
|
||||
{attributeAreaPx !== undefined &&
|
||||
attributeAreaPct !== undefined && (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.area")} (
|
||||
{getTranslatedLabel(item.data.attribute)})
|
||||
</span>
|
||||
<span className="font-medium text-primary">
|
||||
{t("information.pixels", {
|
||||
ns: "common",
|
||||
area: attributeAreaPx,
|
||||
})}{" "}
|
||||
· {attributeAreaPct}%
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{item.data?.zones && item.data.zones.length > 0 && (
|
||||
<div className="mt-1 flex flex-wrap items-center gap-2">
|
||||
{item.data.zones.map((zone, zidx) => {
|
||||
const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
|
||||
return (
|
||||
<Badge
|
||||
key={`${zone}-${zidx}`}
|
||||
variant="outline"
|
||||
className="inline-flex cursor-pointer items-center gap-2"
|
||||
onClick={(e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setSelectedZone(zone);
|
||||
}}
|
||||
style={{
|
||||
borderColor: `rgba(${color}, 0.6)`,
|
||||
background: `rgba(${color}, 0.08)`,
|
||||
}}
|
||||
>
|
||||
<span
|
||||
className="size-1 rounded-full"
|
||||
style={{
|
||||
display: "inline-block",
|
||||
width: 10,
|
||||
height: 10,
|
||||
backgroundColor: `rgb(${color})`,
|
||||
}}
|
||||
/>
|
||||
<span
|
||||
className={cn(
|
||||
item.data?.zones_friendly_names?.[zidx] === zone &&
|
||||
"smart-capitalize",
|
||||
)}
|
||||
>
|
||||
{item.data?.zones_friendly_names?.[zidx]}
|
||||
</span>
|
||||
</Badge>
|
||||
);
|
||||
})}
|
||||
<span className="font-medium text-primary">{score}</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.ratio")}
|
||||
</span>
|
||||
<span className="font-medium text-primary">{ratio}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
|
||||
{attributeAreaPx !== undefined &&
|
||||
attributeAreaPct !== undefined && (
|
||||
<span className="text-primary-variant">
|
||||
({getTranslatedLabel(item.data.label)})
|
||||
</span>
|
||||
)}
|
||||
</span>
|
||||
{areaPx !== undefined && areaPct !== undefined ? (
|
||||
<span className="font-medium text-primary">
|
||||
{t("information.pixels", { ns: "common", area: areaPx })}{" "}
|
||||
· {areaPct}%
|
||||
</span>
|
||||
) : (
|
||||
<span>N/A</span>
|
||||
)}
|
||||
</div>
|
||||
{attributeAreaPx !== undefined &&
|
||||
attributeAreaPct !== undefined && (
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span className="text-primary-variant">
|
||||
{t("trackingDetails.lifecycleItemDesc.header.area")} (
|
||||
{getTranslatedLabel(item.data.attribute)})
|
||||
</span>
|
||||
<span className="font-medium text-primary">
|
||||
{t("information.pixels", {
|
||||
ns: "common",
|
||||
area: attributeAreaPx,
|
||||
})}{" "}
|
||||
· {attributeAreaPct}%
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{item.data?.zones && item.data.zones.length > 0 && (
|
||||
<div className="mt-1 flex flex-wrap items-center gap-2">
|
||||
{item.data.zones.map((zone, zidx) => {
|
||||
const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
|
||||
return (
|
||||
<Badge
|
||||
key={`${zone}-${zidx}`}
|
||||
variant="outline"
|
||||
className="inline-flex cursor-pointer items-center gap-2"
|
||||
onClick={(e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setSelectedZone(zone);
|
||||
}}
|
||||
style={{
|
||||
borderColor: `rgba(${color}, 0.6)`,
|
||||
background: `rgba(${color}, 0.08)`,
|
||||
}}
|
||||
>
|
||||
<span
|
||||
className="size-1 rounded-full"
|
||||
style={{
|
||||
display: "inline-block",
|
||||
width: 10,
|
||||
height: 10,
|
||||
backgroundColor: `rgb(${color})`,
|
||||
}}
|
||||
/>
|
||||
<span
|
||||
className={cn(
|
||||
item.data?.zones_friendly_names?.[zidx] === zone &&
|
||||
"smart-capitalize",
|
||||
)}
|
||||
>
|
||||
{item.data?.zones_friendly_names?.[zidx]}
|
||||
</span>
|
||||
</Badge>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="ml-3 flex-shrink-0 px-1 text-right text-xs text-primary-variant">
|
||||
|
||||
@ -37,6 +37,7 @@ import EnrichmentsSettingsView from "@/views/settings/EnrichmentsSettingsView";
|
||||
import UiSettingsView from "@/views/settings/UiSettingsView";
|
||||
import FrigatePlusSettingsView from "@/views/settings/FrigatePlusSettingsView";
|
||||
import { useSearchEffect } from "@/hooks/use-overlay-state";
|
||||
import { usePersistence } from "@/hooks/use-persistence";
|
||||
import { useNavigate, useSearchParams } from "react-router-dom";
|
||||
import { useInitialCameraState } from "@/api/ws";
|
||||
import { useIsAdmin } from "@/hooks/use-is-admin";
|
||||
@ -207,7 +208,21 @@ export default function Settings() {
|
||||
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
|
||||
}, [config]);
|
||||
|
||||
const [selectedCamera, setSelectedCamera] = useState<string>("");
|
||||
const [persistedCamera, setPersistedCamera] = usePersistence(
|
||||
"selectedCamera",
|
||||
"",
|
||||
);
|
||||
const [selectedCamera, setSelectedCamera] = useState(persistedCamera);
|
||||
useEffect(() => {
|
||||
if (persistedCamera) {
|
||||
setSelectedCamera(persistedCamera);
|
||||
}
|
||||
}, [persistedCamera]);
|
||||
useEffect(() => {
|
||||
if (selectedCamera) {
|
||||
setPersistedCamera(selectedCamera);
|
||||
}
|
||||
}, [selectedCamera, setPersistedCamera]);
|
||||
|
||||
const { payload: allCameraStates } = useInitialCameraState(
|
||||
cameras.length > 0 ? cameras[0].name : "",
|
||||
|
||||
@ -305,6 +305,7 @@ export type CustomClassificationModelConfig = {
|
||||
enabled: boolean;
|
||||
name: string;
|
||||
threshold: number;
|
||||
save_attempts?: number;
|
||||
object_config?: {
|
||||
objects: string[];
|
||||
classification_type: string;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user