mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-16 10:06:42 +03:00
Compare commits
5 Commits
b1dc29f309
...
79eee8d2cd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
79eee8d2cd | ||
|
|
c136e5e8bd | ||
|
|
9ab78f496c | ||
|
|
8a360eecf8 | ||
|
|
f01dd335c2 |
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
@ -136,7 +136,6 @@ jobs:
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt,mode=max
|
||||
- name: AMD/ROCm general build
|
||||
env:
|
||||
AMDGPU: gfx
|
||||
HSA_OVERRIDE: 0
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG ROCM=1
|
||||
ARG AMDGPU=gfx900
|
||||
ARG HSA_OVERRIDE_GFX_VERSION
|
||||
ARG HSA_OVERRIDE
|
||||
|
||||
@ -11,7 +10,6 @@ ARG HSA_OVERRIDE
|
||||
FROM wget AS rocm
|
||||
|
||||
ARG ROCM
|
||||
ARG AMDGPU
|
||||
|
||||
RUN apt update -qq && \
|
||||
apt install -y wget gpg && \
|
||||
@ -36,7 +34,10 @@ FROM deps AS deps-prelim
|
||||
COPY docker/rocm/debian-backports.sources /etc/apt/sources.list.d/debian-backports.sources
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libnuma1 && \
|
||||
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers
|
||||
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers && \
|
||||
# Install C++ standard library headers for HIPRTC kernel compilation fallback
|
||||
apt-get install -qq -y libstdc++-12-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /opt/frigate
|
||||
COPY --from=rootfs / /
|
||||
@ -54,12 +55,14 @@ RUN pip3 uninstall -y onnxruntime \
|
||||
FROM scratch AS rocm-dist
|
||||
|
||||
ARG ROCM
|
||||
ARG AMDGPU
|
||||
|
||||
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx11* /opt/rocm-$ROCM/share/miopen/db/
|
||||
# Copy rocBLAS library files for gfx10xx and gfx11xx only
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx10* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx11* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-dist/ /
|
||||
|
||||
#######################################################################
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
variable "AMDGPU" {
|
||||
default = "gfx900"
|
||||
}
|
||||
variable "ROCM" {
|
||||
default = "7.1.1"
|
||||
}
|
||||
@ -38,7 +35,6 @@ target rocm {
|
||||
}
|
||||
platforms = ["linux/amd64"]
|
||||
args = {
|
||||
AMDGPU = AMDGPU,
|
||||
ROCM = ROCM,
|
||||
HSA_OVERRIDE_GFX_VERSION = HSA_OVERRIDE_GFX_VERSION,
|
||||
HSA_OVERRIDE = HSA_OVERRIDE
|
||||
|
||||
@ -1,53 +1,15 @@
|
||||
BOARDS += rocm
|
||||
|
||||
# AMD/ROCm is chunky so we build couple of smaller images for specific chipsets
|
||||
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
|
||||
|
||||
local-rocm: version
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \
|
||||
--load \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=frigate:latest-rocm \
|
||||
--load
|
||||
|
||||
build-rocm: version
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm
|
||||
|
||||
push-rocm: build-rocm
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
|
||||
--push \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \
|
||||
--push
|
||||
|
||||
@ -111,3 +111,9 @@ review:
|
||||
## Review Reports
|
||||
|
||||
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.
|
||||
|
||||
### Requesting Reports Programmatically
|
||||
|
||||
Review reports can be requested via the [API](/integrations/api#review-summarization) by sending a POST request to `/api/review/summarize/start/{start_ts}/end/{end_ts}` with Unix timestamps.
|
||||
|
||||
For Home Assistant users, there is a built-in service (`frigate.generate_review_summary`) that makes it easy to request review reports as part of automations or scripts. This allows you to automatically generate daily summaries, vacation reports, or custom time period reports based on your specific needs.
|
||||
|
||||
@ -107,7 +107,7 @@ Fine-tune the LPR feature using these optional parameters at the global level of
|
||||
|
||||
### Normalization Rules
|
||||
|
||||
- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially. Each rule must have a `pattern` (which can be a string or a regex, prepended by `r`) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
|
||||
- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially and are applied _before_ the `format` regex, if specified. Each rule must have a `pattern` (which can be a string or a regex, prepended by `r`) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
|
||||
|
||||
These rules must be defined at the global level of your `lpr` config.
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
**Most Hardware**
|
||||
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB, Mini PCIe, and m.2 formats allowing for a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
- <CommunityBadge /> [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
|
||||
- <CommunityBadge /> [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
|
||||
@ -69,12 +69,10 @@ Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8
|
||||
|
||||
## Edge TPU Detector
|
||||
|
||||
The Edge TPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
|
||||
The Edge TPU detector type runs TensorFlow Lite models utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
|
||||
|
||||
The Edge TPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds.
|
||||
|
||||
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
|
||||
|
||||
:::tip
|
||||
|
||||
See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edge TPU is not detected.
|
||||
@ -146,6 +144,46 @@ detectors:
|
||||
device: pci
|
||||
```
|
||||
|
||||
### EdgeTPU Supported Models
|
||||
|
||||
| Model | Notes |
|
||||
| ------------------------------------- | ------------------------------------------- |
|
||||
| [MobileNet v2](#ssdlite-mobilenet-v2) | Default model |
|
||||
| [YOLOv9](#yolo-v9) | More accurate but slower than default model |
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
|
||||
|
||||
A Tensorflow Lite is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an INT8 precision model.
|
||||
|
||||
#### YOLO v9
|
||||
|
||||
[YOLOv9](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite) models that are compiled for Tensorflow Lite and properly quantized are supported, but not included by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. Note that the model may require a custom label file (eg. [use this 17 label file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) for the model linked above.)
|
||||
|
||||
<details>
|
||||
<summary>YOLOv9 Setup & Config</summary>
|
||||
|
||||
After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
coral:
|
||||
type: edgetpu
|
||||
device: usb
|
||||
|
||||
model:
|
||||
model_type: yolo-generic
|
||||
width: 320 # <--- should match the imgsize of the model, typically 320
|
||||
height: 320 # <--- should match the imgsize of the model, typically 320
|
||||
path: /config/model_cache/yolov9-s-relu6-best_320_int8_edgetpu.tflite
|
||||
labelmap_path: /labelmap/labels-coco-17.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 17 objects.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## Hailo-8
|
||||
@ -364,7 +402,7 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
|
||||
|
||||
:::warning
|
||||
|
||||
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
If you are using a Frigate+ model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
|
||||
:::
|
||||
|
||||
@ -704,7 +742,7 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
|
||||
|
||||
:::warning
|
||||
|
||||
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
If you are using a Frigate+ model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
1135
docs/static/frigate-api.yaml
vendored
1135
docs/static/frigate-api.yaml
vendored
File diff suppressed because it is too large
Load Diff
@ -29,7 +29,6 @@ class EventsDescriptionBody(BaseModel):
|
||||
|
||||
|
||||
class EventsCreateBody(BaseModel):
|
||||
source_type: Optional[str] = "api"
|
||||
sub_label: Optional[str] = None
|
||||
score: Optional[float] = 0
|
||||
duration: Optional[int] = 30
|
||||
|
||||
@ -346,7 +346,7 @@ def events(
|
||||
"/events/explore",
|
||||
response_model=list[EventResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get summary of objects.",
|
||||
summary="Get summary of objects",
|
||||
description="""Gets a summary of objects from the database.
|
||||
Returns a list of objects with a max of `limit` objects for each label.
|
||||
""",
|
||||
@ -439,7 +439,7 @@ def events_explore(
|
||||
"/event_ids",
|
||||
response_model=list[EventResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get events by ids.",
|
||||
summary="Get events by ids",
|
||||
description="""Gets events by a list of ids.
|
||||
Returns a list of events.
|
||||
""",
|
||||
@ -473,7 +473,7 @@ async def event_ids(ids: str, request: Request):
|
||||
@router.get(
|
||||
"/events/search",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Search events.",
|
||||
summary="Search events",
|
||||
description="""Searches for events in the database.
|
||||
Returns a list of events.
|
||||
""",
|
||||
@ -924,7 +924,7 @@ def events_summary(
|
||||
"/events/{event_id}",
|
||||
response_model=EventResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get event by id.",
|
||||
summary="Get event by id",
|
||||
description="Gets an event by its id.",
|
||||
)
|
||||
async def event(event_id: str, request: Request):
|
||||
@ -968,7 +968,7 @@ def set_retain(event_id: str):
|
||||
"/events/{event_id}/plus",
|
||||
response_model=EventUploadPlusResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Send event to Frigate+.",
|
||||
summary="Send event to Frigate+",
|
||||
description="""Sends an event to Frigate+.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1207,7 +1207,7 @@ async def false_positive(request: Request, event_id: str):
|
||||
"/events/{event_id}/retain",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Stop event from being retained indefinitely.",
|
||||
summary="Stop event from being retained indefinitely",
|
||||
description="""Stops an event from being retained indefinitely.
|
||||
Returns a success message or an error if the event is not found.
|
||||
NOTE: This is a legacy endpoint and is not supported in the frontend.
|
||||
@ -1236,7 +1236,7 @@ async def delete_retain(event_id: str, request: Request):
|
||||
"/events/{event_id}/sub_label",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Set event sub label.",
|
||||
summary="Set event sub label",
|
||||
description="""Sets an event's sub label.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1295,7 +1295,7 @@ async def set_sub_label(
|
||||
"/events/{event_id}/recognized_license_plate",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Set event license plate.",
|
||||
summary="Set event license plate",
|
||||
description="""Sets an event's license plate.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1355,7 +1355,7 @@ async def set_plate(
|
||||
"/events/{event_id}/description",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Set event description.",
|
||||
summary="Set event description",
|
||||
description="""Sets an event's description.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1411,7 +1411,7 @@ async def set_description(
|
||||
"/events/{event_id}/description/regenerate",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Regenerate event description.",
|
||||
summary="Regenerate event description",
|
||||
description="""Regenerates an event's description.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1463,8 +1463,8 @@ async def regenerate_description(
|
||||
@router.post(
|
||||
"/description/generate",
|
||||
response_model=GenericResponse,
|
||||
# dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Generate description embedding.",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Generate description embedding",
|
||||
description="""Generates an embedding for an event's description.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1529,7 +1529,7 @@ async def delete_single_event(event_id: str, request: Request) -> dict:
|
||||
"/events/{event_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete event.",
|
||||
summary="Delete event",
|
||||
description="""Deletes an event from the database.
|
||||
Returns a success message or an error if the event is not found.
|
||||
""",
|
||||
@ -1544,7 +1544,7 @@ async def delete_event(request: Request, event_id: str):
|
||||
"/events/",
|
||||
response_model=EventMultiDeleteResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete events.",
|
||||
summary="Delete events",
|
||||
description="""Deletes a list of events from the database.
|
||||
Returns a success message or an error if the events are not found.
|
||||
""",
|
||||
@ -1578,7 +1578,7 @@ async def delete_events(request: Request, body: EventsDeleteBody):
|
||||
"/events/{camera_name}/{label}/create",
|
||||
response_model=EventCreateResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create manual event.",
|
||||
summary="Create manual event",
|
||||
description="""Creates a manual event in the database.
|
||||
Returns a success message or an error if the event is not found.
|
||||
NOTES:
|
||||
@ -1620,7 +1620,7 @@ def create_event(
|
||||
body.score,
|
||||
body.sub_label,
|
||||
body.duration,
|
||||
body.source_type,
|
||||
"api",
|
||||
body.draw,
|
||||
),
|
||||
EventMetadataTypeEnum.manual_event_create.value,
|
||||
@ -1642,7 +1642,7 @@ def create_event(
|
||||
"/events/{event_id}/end",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="End manual event.",
|
||||
summary="End manual event",
|
||||
description="""Ends a manual event.
|
||||
Returns a success message or an error if the event is not found.
|
||||
NOTE: This should only be used for manual events.
|
||||
@ -1652,10 +1652,27 @@ async def end_event(request: Request, event_id: str, body: EventsEndBody):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
await require_camera_access(event.camera, request=request)
|
||||
|
||||
if body.end_time is not None and body.end_time < event.start_time:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"end_time ({body.end_time}) cannot be before start_time ({event.start_time}).",
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
end_time = body.end_time or datetime.datetime.now().timestamp()
|
||||
request.app.event_metadata_updater.publish(
|
||||
(event_id, end_time), EventMetadataTypeEnum.manual_event_end.value
|
||||
)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": f"Event {event_id} not found."}),
|
||||
status_code=404,
|
||||
)
|
||||
except Exception:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
@ -1674,7 +1691,7 @@ async def end_event(request: Request, event_id: str, body: EventsEndBody):
|
||||
"/trigger/embedding",
|
||||
response_model=dict,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create trigger embedding.",
|
||||
summary="Create trigger embedding",
|
||||
description="""Creates a trigger embedding for a specific trigger.
|
||||
Returns a success message or an error if the trigger is not found.
|
||||
""",
|
||||
@ -1832,7 +1849,7 @@ def create_trigger_embedding(
|
||||
"/trigger/embedding/{camera_name}/{name}",
|
||||
response_model=dict,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Update trigger embedding.",
|
||||
summary="Update trigger embedding",
|
||||
description="""Updates a trigger embedding for a specific trigger.
|
||||
Returns a success message or an error if the trigger is not found.
|
||||
""",
|
||||
@ -1997,7 +2014,7 @@ def update_trigger_embedding(
|
||||
"/trigger/embedding/{camera_name}/{name}",
|
||||
response_model=dict,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete trigger embedding.",
|
||||
summary="Delete trigger embedding",
|
||||
description="""Deletes a trigger embedding for a specific trigger.
|
||||
Returns a success message or an error if the trigger is not found.
|
||||
""",
|
||||
@ -2071,7 +2088,7 @@ def delete_trigger_embedding(
|
||||
"/triggers/status/{camera_name}",
|
||||
response_model=dict,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get triggers status.",
|
||||
summary="Get triggers status",
|
||||
description="""Gets the status of all triggers for a specific camera.
|
||||
Returns a success message or an error if the camera is not found.
|
||||
""",
|
||||
|
||||
@ -209,10 +209,22 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
logger.debug(
|
||||
f"Found GenAI Review Summary request for {start_ts} to {end_ts}"
|
||||
)
|
||||
items: list[dict[str, Any]] = [
|
||||
r["data"]["metadata"]
|
||||
|
||||
# Query all review segments with camera and time information
|
||||
segments: list[dict[str, Any]] = [
|
||||
{
|
||||
"camera": r["camera"].replace("_", " ").title(),
|
||||
"start_time": r["start_time"],
|
||||
"end_time": r["end_time"],
|
||||
"metadata": r["data"]["metadata"],
|
||||
}
|
||||
for r in (
|
||||
ReviewSegment.select(ReviewSegment.data)
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.camera,
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.data,
|
||||
)
|
||||
.where(
|
||||
(ReviewSegment.data["metadata"].is_null(False))
|
||||
& (ReviewSegment.start_time < end_ts)
|
||||
@ -224,21 +236,66 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
)
|
||||
]
|
||||
|
||||
if len(items) == 0:
|
||||
if len(segments) == 0:
|
||||
logger.debug("No review items with metadata found during time period")
|
||||
return "No activity was found during this time."
|
||||
return "No activity was found during this time period."
|
||||
|
||||
important_items = list(
|
||||
filter(
|
||||
lambda item: item.get("potential_threat_level", 0) > 0
|
||||
or item.get("other_concerns"),
|
||||
items,
|
||||
)
|
||||
)
|
||||
# Identify primary items (important items that need review)
|
||||
primary_segments = [
|
||||
seg
|
||||
for seg in segments
|
||||
if seg["metadata"].get("potential_threat_level", 0) > 0
|
||||
or seg["metadata"].get("other_concerns")
|
||||
]
|
||||
|
||||
if not important_items:
|
||||
if not primary_segments:
|
||||
return "No concerns were found during this time period."
|
||||
|
||||
# For each primary segment, find overlapping contextual items from other cameras
|
||||
all_items_for_summary = []
|
||||
|
||||
for primary_seg in primary_segments:
|
||||
# Add the primary item with marker
|
||||
primary_item = copy.deepcopy(primary_seg["metadata"])
|
||||
primary_item["_is_primary"] = True
|
||||
primary_item["_camera"] = primary_seg["camera"]
|
||||
all_items_for_summary.append(primary_item)
|
||||
|
||||
# Find overlapping contextual items from other cameras
|
||||
primary_start = primary_seg["start_time"]
|
||||
primary_end = primary_seg["end_time"]
|
||||
primary_camera = primary_seg["camera"]
|
||||
|
||||
for seg in segments:
|
||||
seg_camera = seg["camera"]
|
||||
|
||||
if seg_camera == primary_camera:
|
||||
continue
|
||||
|
||||
if seg in primary_segments:
|
||||
continue
|
||||
|
||||
seg_start = seg["start_time"]
|
||||
seg_end = seg["end_time"]
|
||||
|
||||
if seg_start < primary_end and primary_start < seg_end:
|
||||
contextual_item = copy.deepcopy(seg["metadata"])
|
||||
contextual_item["_is_primary"] = False
|
||||
contextual_item["_camera"] = seg_camera
|
||||
contextual_item["_related_to_camera"] = primary_camera
|
||||
|
||||
if not any(
|
||||
item.get("_camera") == seg_camera
|
||||
and item.get("time") == contextual_item.get("time")
|
||||
for item in all_items_for_summary
|
||||
):
|
||||
all_items_for_summary.append(contextual_item)
|
||||
|
||||
logger.debug(
|
||||
f"Summary includes {len(primary_segments)} primary items and "
|
||||
f"{len(all_items_for_summary) - len(primary_segments)} contextual items"
|
||||
)
|
||||
|
||||
if self.config.review.genai.debug_save_thumbnails:
|
||||
Path(
|
||||
os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}")
|
||||
@ -247,7 +304,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
return self.genai_client.generate_review_summary(
|
||||
start_ts,
|
||||
end_ts,
|
||||
important_items,
|
||||
all_items_for_summary,
|
||||
self.config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
else:
|
||||
|
||||
@ -1,19 +1,20 @@
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter, load_delegate
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter, load_delegate
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "edgetpu"
|
||||
@ -26,6 +27,10 @@ class EdgeTpuDetectorConfig(BaseDetectorConfig):
|
||||
|
||||
class EdgeTpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
supported_models = [
|
||||
ModelTypeEnum.ssd,
|
||||
ModelTypeEnum.yologeneric,
|
||||
]
|
||||
|
||||
def __init__(self, detector_config: EdgeTpuDetectorConfig):
|
||||
device_config = {}
|
||||
@ -63,31 +68,294 @@ class EdgeTpuTfl(DetectionApi):
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.model_width = detector_config.model.width
|
||||
self.model_height = detector_config.model.height
|
||||
|
||||
self.min_score = 0.4
|
||||
self.max_detections = 20
|
||||
|
||||
self.model_type = detector_config.model.model_type
|
||||
self.model_requires_int8 = self.tensor_input_details[0]["dtype"] == np.int8
|
||||
|
||||
if self.model_type == ModelTypeEnum.yologeneric:
|
||||
logger.debug("Using YOLO preprocessing/postprocessing")
|
||||
|
||||
if len(self.tensor_output_details) not in [2, 3]:
|
||||
logger.error(
|
||||
f"Invalid count of output tensors in YOLO model. Found {len(self.tensor_output_details)}, expecting 2 or 3."
|
||||
)
|
||||
raise
|
||||
|
||||
self.reg_max = 16 # = 64 dfl_channels // 4 # YOLO standard
|
||||
self.min_logit_value = np.log(
|
||||
self.min_score / (1 - self.min_score)
|
||||
) # for filtering
|
||||
self._generate_anchors_and_strides() # decode bounding box DFL
|
||||
self.project = np.arange(
|
||||
self.reg_max, dtype=np.float32
|
||||
) # for decoding bounding box DFL information
|
||||
|
||||
# Determine YOLO tensor indices and quantization scales for
|
||||
# boxes and class_scores the tensor ordering and names are
|
||||
# not reliable, so use tensor shape to detect which tensor
|
||||
# holds boxes or class scores.
|
||||
# The tensors have shapes (B, N, C)
|
||||
# where N is the number of candidates (=2100 for 320x320)
|
||||
# this may guess wrong if the number of classes is exactly 64
|
||||
output_boxes_index = None
|
||||
output_classes_index = None
|
||||
for i, x in enumerate(self.tensor_output_details):
|
||||
# the nominal index seems to start at 1 instead of 0
|
||||
if len(x["shape"]) == 3 and x["shape"][2] == 64:
|
||||
output_boxes_index = i
|
||||
elif len(x["shape"]) == 3 and x["shape"][2] > 1:
|
||||
# require the number of classes to be more than 1
|
||||
# to differentiate from (not used) max score tensor
|
||||
output_classes_index = i
|
||||
if output_boxes_index is None or output_classes_index is None:
|
||||
logger.warning("Unrecognized model output, unexpected tensor shapes.")
|
||||
output_classes_index = (
|
||||
0
|
||||
if (output_boxes_index is None or output_classes_index == 1)
|
||||
else 1
|
||||
) # 0 is default guess
|
||||
output_boxes_index = 1 if (output_boxes_index == 0) else 0
|
||||
|
||||
scores_details = self.tensor_output_details[output_classes_index]
|
||||
self.scores_tensor_index = scores_details["index"]
|
||||
self.scores_scale, self.scores_zero_point = scores_details["quantization"]
|
||||
# calculate the quantized version of the min_score
|
||||
self.min_score_quantized = int(
|
||||
(self.min_logit_value / self.scores_scale) + self.scores_zero_point
|
||||
)
|
||||
self.logit_shift_to_positive_values = (
|
||||
max(0, math.ceil((128 + self.scores_zero_point) * self.scores_scale))
|
||||
+ 1
|
||||
) # round up
|
||||
|
||||
boxes_details = self.tensor_output_details[output_boxes_index]
|
||||
self.boxes_tensor_index = boxes_details["index"]
|
||||
self.boxes_scale, self.boxes_zero_point = boxes_details["quantization"]
|
||||
|
||||
elif self.model_type == ModelTypeEnum.ssd:
|
||||
logger.debug("Using SSD preprocessing/postprocessing")
|
||||
|
||||
# SSD model indices (4 outputs: boxes, class_ids, scores, count)
|
||||
for x in self.tensor_output_details:
|
||||
if len(x["shape"]) == 3:
|
||||
self.output_boxes_index = x["index"]
|
||||
elif len(x["shape"]) == 1:
|
||||
self.output_count_index = x["index"]
|
||||
|
||||
self.output_class_ids_index = None
|
||||
self.output_class_scores_index = None
|
||||
|
||||
else:
|
||||
raise Exception(
|
||||
f"{self.model_type} is currently not supported for edgetpu. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
def _generate_anchors_and_strides(self):
|
||||
# for decoding the bounding box DFL information into xy coordinates
|
||||
all_anchors = []
|
||||
all_strides = []
|
||||
strides = (8, 16, 32) # YOLO's small, medium, large detection heads
|
||||
|
||||
for stride in strides:
|
||||
feat_h, feat_w = self.model_height // stride, self.model_width // stride
|
||||
|
||||
grid_y, grid_x = np.meshgrid(
|
||||
np.arange(feat_h, dtype=np.float32),
|
||||
np.arange(feat_w, dtype=np.float32),
|
||||
indexing="ij",
|
||||
)
|
||||
|
||||
grid_coords = np.stack((grid_x.flatten(), grid_y.flatten()), axis=1)
|
||||
anchor_points = grid_coords + 0.5
|
||||
|
||||
all_anchors.append(anchor_points)
|
||||
all_strides.append(np.full((feat_h * feat_w, 1), stride, dtype=np.float32))
|
||||
|
||||
self.anchors = np.concatenate(all_anchors, axis=0)
|
||||
self.anchor_strides = np.concatenate(all_strides, axis=0)
|
||||
|
||||
def determine_indexes_for_non_yolo_models(self):
|
||||
"""Legacy method for SSD models."""
|
||||
if (
|
||||
self.output_class_ids_index is None
|
||||
or self.output_class_scores_index is None
|
||||
):
|
||||
for i in range(4):
|
||||
index = self.tensor_output_details[i]["index"]
|
||||
if (
|
||||
index != self.output_boxes_index
|
||||
and index != self.output_count_index
|
||||
):
|
||||
if (
|
||||
np.mod(np.float32(self.interpreter.tensor(index)()[0][0]), 1)
|
||||
== 0.0
|
||||
):
|
||||
self.output_class_ids_index = index
|
||||
else:
|
||||
self.output_scores_index = index
|
||||
|
||||
def pre_process(self, tensor_input):
|
||||
if self.model_requires_int8:
|
||||
tensor_input = np.bitwise_xor(tensor_input, 128).view(
|
||||
np.int8
|
||||
) # shift by -128
|
||||
return tensor_input
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
tensor_input = self.pre_process(tensor_input)
|
||||
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
if self.model_type == ModelTypeEnum.yologeneric:
|
||||
# Multi-tensor YOLO model with (non-standard B(H*W)C output format).
|
||||
# (the comments indicate the shape of tensors,
|
||||
# using "2100" as the anchor count (for image size of 320x320),
|
||||
# "NC" as number of classes,
|
||||
# "N" as the count that survive after min-score filtering)
|
||||
# TENSOR A) class scores (1, 2100, NC) with logit values
|
||||
# TENSOR B) box coordinates (1, 2100, 64) encoded as dfl scores
|
||||
# Recommend that the model clamp the logit values in tensor (A)
|
||||
# to the range [-4,+4] to preserve precision from [2%,98%]
|
||||
# and because NMS requires the min_score parameter to be >= 0
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
# don't dequantize scores data yet, wait until the low-confidence
|
||||
# candidates are filtered out from the overall result set.
|
||||
# This reduces the work and makes post-processing faster.
|
||||
# this method works with raw quantized numbers when possible,
|
||||
# which relies on the value of the scale factor to be >0.
|
||||
# This speeds up max and argmax operations.
|
||||
# Get max confidence for each detection and create the mask
|
||||
detections = np.zeros(
|
||||
(self.max_detections, 6), np.float32
|
||||
) # initialize zero results
|
||||
scores_output_quantized = self.interpreter.get_tensor(
|
||||
self.scores_tensor_index
|
||||
)[0] # (2100, NC)
|
||||
max_scores_quantized = np.max(scores_output_quantized, axis=1) # (2100,)
|
||||
mask = max_scores_quantized >= self.min_score_quantized # (2100,)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
if not np.any(mask):
|
||||
return detections # empty results
|
||||
|
||||
max_scores_filtered_shiftedpositive = (
|
||||
(max_scores_quantized[mask] - self.scores_zero_point)
|
||||
* self.scores_scale
|
||||
) + self.logit_shift_to_positive_values # (N,1) shifted logit values
|
||||
scores_output_quantized_filtered = scores_output_quantized[mask]
|
||||
|
||||
# dequantize boxes. NMS needs them to be in float format
|
||||
# remove candidates with probabilities < threshold
|
||||
boxes_output_quantized_filtered = (
|
||||
self.interpreter.get_tensor(self.boxes_tensor_index)[0]
|
||||
)[mask] # (N, 64)
|
||||
boxes_output_filtered = (
|
||||
boxes_output_quantized_filtered.astype(np.float32)
|
||||
- self.boxes_zero_point
|
||||
) * self.boxes_scale
|
||||
|
||||
# 2. Decode DFL to distances (ltrb)
|
||||
dfl_distributions = boxes_output_filtered.reshape(
|
||||
-1, 4, self.reg_max
|
||||
) # (N, 4, 16)
|
||||
|
||||
# Softmax over the 16 bins
|
||||
dfl_max = np.max(dfl_distributions, axis=2, keepdims=True)
|
||||
dfl_exp = np.exp(dfl_distributions - dfl_max)
|
||||
dfl_probs = dfl_exp / np.sum(dfl_exp, axis=2, keepdims=True) # (N, 4, 16)
|
||||
|
||||
# Weighted sum: (N, 4, 16) * (16,) -> (N, 4)
|
||||
distances = np.einsum("pcr,r->pc", dfl_probs, self.project)
|
||||
|
||||
# Calculate box corners in pixel coordinates
|
||||
anchors_filtered = self.anchors[mask]
|
||||
anchor_strides_filtered = self.anchor_strides[mask]
|
||||
x1y1 = (
|
||||
anchors_filtered - distances[:, [0, 1]]
|
||||
) * anchor_strides_filtered # (N, 2)
|
||||
x2y2 = (
|
||||
anchors_filtered + distances[:, [2, 3]]
|
||||
) * anchor_strides_filtered # (N, 2)
|
||||
boxes_filtered_decoded = np.concatenate((x1y1, x2y2), axis=-1) # (N, 4)
|
||||
|
||||
# 9. Apply NMS. Use logit scores here to defer sigmoid()
|
||||
# until after filtering out redundant boxes
|
||||
# Shift the logit scores to be non-negative (required by cv2)
|
||||
indices = cv2.dnn.NMSBoxes(
|
||||
bboxes=boxes_filtered_decoded,
|
||||
scores=max_scores_filtered_shiftedpositive,
|
||||
score_threshold=(
|
||||
self.min_logit_value + self.logit_shift_to_positive_values
|
||||
),
|
||||
nms_threshold=0.4, # should this be a model config setting?
|
||||
)
|
||||
num_detections = len(indices)
|
||||
if num_detections == 0:
|
||||
return detections # empty results
|
||||
|
||||
nms_indices = np.array(indices, dtype=np.int32).ravel() # or .flatten()
|
||||
if num_detections > self.max_detections:
|
||||
nms_indices = nms_indices[: self.max_detections]
|
||||
num_detections = self.max_detections
|
||||
kept_logits_quantized = scores_output_quantized_filtered[nms_indices]
|
||||
class_ids_post_nms = np.argmax(kept_logits_quantized, axis=1)
|
||||
|
||||
# Extract the final boxes and scores using fancy indexing
|
||||
final_boxes = boxes_filtered_decoded[nms_indices]
|
||||
final_scores_logits = (
|
||||
max_scores_filtered_shiftedpositive[nms_indices]
|
||||
- self.logit_shift_to_positive_values
|
||||
) # Unshifted logits
|
||||
|
||||
# Detections array format: [class_id, score, ymin, xmin, ymax, xmax]
|
||||
detections[:num_detections, 0] = class_ids_post_nms
|
||||
detections[:num_detections, 1] = 1.0 / (
|
||||
1.0 + np.exp(-final_scores_logits)
|
||||
) # sigmoid
|
||||
detections[:num_detections, 2] = final_boxes[:, 1] / self.model_height
|
||||
detections[:num_detections, 3] = final_boxes[:, 0] / self.model_width
|
||||
detections[:num_detections, 4] = final_boxes[:, 3] / self.model_height
|
||||
detections[:num_detections, 5] = final_boxes[:, 2] / self.model_width
|
||||
return detections
|
||||
|
||||
elif self.model_type == ModelTypeEnum.ssd:
|
||||
self.determine_indexes_for_non_yolo_models()
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(
|
||||
self.tensor_output_details[1]["index"]
|
||||
)()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[
|
||||
0
|
||||
]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
return detections
|
||||
detections = np.zeros((self.max_detections, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < self.min_score:
|
||||
break
|
||||
if i == self.max_detections:
|
||||
logger.debug(f"Too many detections ({count})!")
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
|
||||
else:
|
||||
raise Exception(
|
||||
f"{self.model_type} is currently not supported for edgetpu. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
@ -185,44 +185,66 @@ Each line represents a detection state, not necessarily unique individuals. Pare
|
||||
timeline_summary_prompt = f"""
|
||||
You are a security officer.
|
||||
Time range: {time_range}.
|
||||
Input: JSON list with "title", "scene", "confidence", "potential_threat_level" (1-2), "other_concerns".
|
||||
Input: JSON list with "title", "scene", "confidence", "potential_threat_level" (0-2), "other_concerns", "_is_primary", "_camera".
|
||||
|
||||
Task: Write a concise, human-presentable security report in markdown format.
|
||||
|
||||
CRITICAL - Understanding Primary vs Contextual Items:
|
||||
- Items with "_is_primary": true are events that REQUIRE REVIEW and MUST be included in the report
|
||||
- Items with "_is_primary": false are additional context from other camera perspectives that overlap in time
|
||||
- **DO NOT create separate bullet points or sections for contextual items**
|
||||
- **ONLY use contextual items to enrich and inform the description of primary items**
|
||||
- The "_camera" field indicates which camera captured each event
|
||||
- **When a contextual item provides relevant background, you MUST incorporate it directly into the primary event's bullet point**
|
||||
- Contextual information often explains or de-escalates seemingly suspicious primary events
|
||||
|
||||
Rules for the report:
|
||||
|
||||
- Title & overview
|
||||
- Start with:
|
||||
# Security Summary - {time_range}
|
||||
- Write a 1-2 sentence situational overview capturing the general pattern of the period.
|
||||
- Keep the overview high-level; specific details will be in the event bullets below.
|
||||
|
||||
- Event details
|
||||
- Present events in chronological order as a bullet list.
|
||||
- **If multiple events occur within the same minute or overlapping time range, COMBINE them into a single bullet.**
|
||||
- Summarize the distinct activities as sub-points under the shared timestamp.
|
||||
- If no timestamp is given, preserve order but label as “Time not specified.”
|
||||
- **ONLY create bullet points for PRIMARY items (_is_primary: true)**
|
||||
- **Do NOT create sections or bullets for events that don't exist**
|
||||
- Do NOT create separate bullets for contextual items
|
||||
- Present primary events in chronological order as a bullet list.
|
||||
- **CRITICAL: When contextual items overlap with a primary event, you MUST weave that information directly into the same bullet point**
|
||||
- Format: **[Timestamp]** - [Description incorporating any contextual information]. [Camera info]. (threat level: X)
|
||||
- If contextual information provides an explanation (e.g., delivery truck → person is likely delivery driver), reflect this understanding in your description and potentially adjust the perceived threat level
|
||||
- If multiple PRIMARY events occur within the same minute, combine them into a single bullet with sub-points.
|
||||
- Use bold timestamps for clarity.
|
||||
- Group bullets under subheadings when multiple events fall into the same category (e.g., Vehicle Activity, Porch Activity, Unusual Behavior).
|
||||
- Camera format: "Camera: [camera name]" or mention contextual cameras inline when relevant
|
||||
- Group bullets under subheadings ONLY when you have actual PRIMARY events to list (e.g., Porch Activity, Unusual Behavior).
|
||||
|
||||
- Threat levels
|
||||
- Always show the threat level for each event using these labels:
|
||||
- Show the threat level for PRIMARY events using these labels:
|
||||
- Threat level 0: "Normal"
|
||||
- Threat level 1: "Needs review"
|
||||
- Threat level 2: "Security concern"
|
||||
- Format as (threat level: Normal), (threat level: Needs review), or (threat level: Security concern).
|
||||
- If multiple events at the same time share the same threat level, only state it once.
|
||||
- **When contextual items clearly explain a primary event (e.g., delivery truck explains person at door), you should describe it as normal activity and note the explanation**
|
||||
- **Your description and tone should reflect the fuller understanding provided by contextual information**
|
||||
- Example: Primary event says "unidentified person with face covering" but context shows delivery truck → describe as "delivery person (truck visible on Front Driveway Cam)" rather than emphasizing suspicious elements
|
||||
- The stored threat level remains as originally classified, but your narrative should reflect the contextual understanding
|
||||
- If multiple PRIMARY events at the same time share the same threat level, only state it once.
|
||||
|
||||
- Final assessment
|
||||
- End with a Final Assessment section.
|
||||
- If all events are threat level 0:
|
||||
- If all primary events are threat level 0 or explained by contextual items:
|
||||
Final assessment: Only normal residential activity observed during this period.
|
||||
- If threat level 1 events are present:
|
||||
Final assessment: Some activity requires review but no security concerns identified.
|
||||
- If threat level 2 events are present, clearly summarize them as Security concerns requiring immediate attention.
|
||||
- Keep this section brief - do not repeat details from the event descriptions above.
|
||||
|
||||
- Conciseness
|
||||
- Do not repeat benign clothing/appearance details unless they distinguish individuals.
|
||||
- Summarize similar routine events instead of restating full scene descriptions.
|
||||
- When incorporating contextual information, do so briefly and naturally within the primary event description.
|
||||
- Avoid lengthy explanatory notes - integrate context seamlessly into the narrative.
|
||||
"""
|
||||
|
||||
for item in segments:
|
||||
|
||||
6
web/package-lock.json
generated
6
web/package-lock.json
generated
@ -6901,9 +6901,9 @@
|
||||
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
|
||||
},
|
||||
"node_modules/js-yaml": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
|
||||
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
|
||||
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@ -177,7 +177,7 @@
|
||||
"generateSuccess": "Successfully generated sample images",
|
||||
"missingStatesWarning": {
|
||||
"title": "Missing State Examples",
|
||||
"description": "You haven't selected examples for all states. The model will not be trained until all states have images. After continuing, use the Recent Classifications view to classify images for the missing states, then train the model."
|
||||
"description": "It's recommended to select examples for all states for best results. You can continue without selecting all states, but the model will not be trained until all states have images. After continuing, use the Recent Classifications view to classify images for the missing states, then train the model."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -500,7 +500,7 @@
|
||||
"name": {
|
||||
"title": "Name",
|
||||
"inputPlaceHolder": "Enter a name…",
|
||||
"tips": "Name must be at least 2 characters, must have at least one letter, and must not be the name of a camera or another zone."
|
||||
"tips": "Name must be at least 2 characters, must have at least one letter, and must not be the name of a camera or another zone on this camera."
|
||||
},
|
||||
"inertia": {
|
||||
"title": "Inertia",
|
||||
|
||||
12
web/src/api/auth-redirect.ts
Normal file
12
web/src/api/auth-redirect.ts
Normal file
@ -0,0 +1,12 @@
|
||||
// Module-level flag to prevent multiple simultaneous redirects
|
||||
// (eg, when multiple SWR queries fail with 401 at once, or when
|
||||
// both ApiProvider and ProtectedRoute try to redirect)
|
||||
let _isRedirectingToLogin = false;
|
||||
|
||||
export function isRedirectingToLogin(): boolean {
|
||||
return _isRedirectingToLogin;
|
||||
}
|
||||
|
||||
export function setRedirectingToLogin(value: boolean): void {
|
||||
_isRedirectingToLogin = value;
|
||||
}
|
||||
@ -3,6 +3,7 @@ import { SWRConfig } from "swr";
|
||||
import { WsProvider } from "./ws";
|
||||
import axios from "axios";
|
||||
import { ReactNode } from "react";
|
||||
import { isRedirectingToLogin, setRedirectingToLogin } from "./auth-redirect";
|
||||
|
||||
axios.defaults.baseURL = `${baseUrl}api/`;
|
||||
|
||||
@ -31,7 +32,8 @@ export function ApiProvider({ children, options }: ApiProviderType) {
|
||||
) {
|
||||
// redirect to the login page if not already there
|
||||
const loginPage = error.response.headers.get("location") ?? "login";
|
||||
if (window.location.href !== loginPage) {
|
||||
if (window.location.href !== loginPage && !isRedirectingToLogin()) {
|
||||
setRedirectingToLogin(true);
|
||||
window.location.href = loginPage;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,7 +1,11 @@
|
||||
import { useContext } from "react";
|
||||
import { useContext, useEffect } from "react";
|
||||
import { Navigate, Outlet } from "react-router-dom";
|
||||
import { AuthContext } from "@/context/auth-context";
|
||||
import ActivityIndicator from "../indicators/activity-indicator";
|
||||
import {
|
||||
isRedirectingToLogin,
|
||||
setRedirectingToLogin,
|
||||
} from "@/api/auth-redirect";
|
||||
|
||||
export default function ProtectedRoute({
|
||||
requiredRoles,
|
||||
@ -10,6 +14,20 @@ export default function ProtectedRoute({
|
||||
}) {
|
||||
const { auth } = useContext(AuthContext);
|
||||
|
||||
// Redirect to login page when not authenticated
|
||||
// don't use <Navigate> because we need a full page load to reset state
|
||||
useEffect(() => {
|
||||
if (
|
||||
!auth.isLoading &&
|
||||
auth.isAuthenticated &&
|
||||
!auth.user &&
|
||||
!isRedirectingToLogin()
|
||||
) {
|
||||
setRedirectingToLogin(true);
|
||||
window.location.href = "/login";
|
||||
}
|
||||
}, [auth.isLoading, auth.isAuthenticated, auth.user]);
|
||||
|
||||
if (auth.isLoading) {
|
||||
return (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||
@ -23,7 +41,9 @@ export default function ProtectedRoute({
|
||||
|
||||
// Authenticated mode (8971): require login
|
||||
if (!auth.user) {
|
||||
return <Navigate to="/login" replace />;
|
||||
return (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||
);
|
||||
}
|
||||
|
||||
// If role is null (shouldn’t happen if isAuthenticated, but type safety), fallback
|
||||
|
||||
@ -23,6 +23,7 @@ import { useTranslation } from "react-i18next";
|
||||
import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay";
|
||||
import BlurredIconButton from "../button/BlurredIconButton";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
||||
import { useIsAdmin } from "@/hooks/use-is-admin";
|
||||
|
||||
type ExportProps = {
|
||||
className: string;
|
||||
@ -40,6 +41,7 @@ export default function ExportCard({
|
||||
onDelete,
|
||||
}: ExportProps) {
|
||||
const { t } = useTranslation(["views/exports"]);
|
||||
const isAdmin = useIsAdmin();
|
||||
const [hovered, setHovered] = useState(false);
|
||||
const [loading, setLoading] = useState(
|
||||
exportedRecording.thumb_path.length > 0,
|
||||
@ -195,7 +197,7 @@ export default function ExportCard({
|
||||
</Tooltip>
|
||||
</a>
|
||||
)}
|
||||
{!exportedRecording.in_progress && (
|
||||
{isAdmin && !exportedRecording.in_progress && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<BlurredIconButton
|
||||
@ -212,21 +214,23 @@ export default function ExportCard({
|
||||
<TooltipContent>{t("tooltip.editName")}</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<BlurredIconButton
|
||||
onClick={() =>
|
||||
onDelete({
|
||||
file: exportedRecording.id,
|
||||
exportName: exportedRecording.name,
|
||||
})
|
||||
}
|
||||
>
|
||||
<LuTrash className="size-4 fill-destructive text-destructive hover:text-white" />
|
||||
</BlurredIconButton>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{t("tooltip.deleteExport")}</TooltipContent>
|
||||
</Tooltip>
|
||||
{isAdmin && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<BlurredIconButton
|
||||
onClick={() =>
|
||||
onDelete({
|
||||
file: exportedRecording.id,
|
||||
exportName: exportedRecording.name,
|
||||
})
|
||||
}
|
||||
>
|
||||
<LuTrash className="size-4 fill-destructive text-destructive hover:text-white" />
|
||||
</BlurredIconButton>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{t("tooltip.deleteExport")}</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@ -407,30 +407,6 @@ export default function Step3ChooseExamples({
|
||||
return allClasses.every((className) => statesWithExamples.has(className));
|
||||
}, [step1Data.modelType, allClasses, statesWithExamples]);
|
||||
|
||||
// For state models on the last class, require all images to be classified
|
||||
// But allow proceeding even if not all states have examples (with warning)
|
||||
const canProceed = useMemo(() => {
|
||||
if (step1Data.modelType === "state" && isLastClass) {
|
||||
// Check if all 24 images will be classified after current selections are applied
|
||||
const totalImages = unknownImages.slice(0, 24).length;
|
||||
|
||||
// Count images that will be classified (either already classified or currently selected)
|
||||
const allImages = unknownImages.slice(0, 24);
|
||||
const willBeClassified = allImages.filter((img) => {
|
||||
return imageClassifications[img] || selectedImages.has(img);
|
||||
}).length;
|
||||
|
||||
return willBeClassified >= totalImages;
|
||||
}
|
||||
return true;
|
||||
}, [
|
||||
step1Data.modelType,
|
||||
isLastClass,
|
||||
unknownImages,
|
||||
imageClassifications,
|
||||
selectedImages,
|
||||
]);
|
||||
|
||||
const hasUnclassifiedImages = useMemo(() => {
|
||||
if (!unknownImages) return false;
|
||||
const allImages = unknownImages.slice(0, 24);
|
||||
@ -594,9 +570,7 @@ export default function Step3ChooseExamples({
|
||||
}
|
||||
variant="select"
|
||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||
disabled={
|
||||
!hasGenerated || isGenerating || isProcessing || !canProceed
|
||||
}
|
||||
disabled={!hasGenerated || isGenerating || isProcessing}
|
||||
>
|
||||
{isProcessing && <ActivityIndicator className="size-4" />}
|
||||
{t("button.continue", { ns: "common" })}
|
||||
|
||||
@ -559,6 +559,7 @@ export function TrackingDetails({
|
||||
isDetailMode={true}
|
||||
camera={event.camera}
|
||||
currentTimeOverride={currentTime}
|
||||
enableGapControllerRecovery={true}
|
||||
/>
|
||||
{isVideoLoading && (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||
|
||||
@ -5,7 +5,7 @@ import {
|
||||
useRef,
|
||||
useState,
|
||||
} from "react";
|
||||
import Hls from "hls.js";
|
||||
import Hls, { HlsConfig } from "hls.js";
|
||||
import { isDesktop, isMobile } from "react-device-detect";
|
||||
import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
|
||||
import VideoControls from "./VideoControls";
|
||||
@ -57,6 +57,7 @@ type HlsVideoPlayerProps = {
|
||||
isDetailMode?: boolean;
|
||||
camera?: string;
|
||||
currentTimeOverride?: number;
|
||||
enableGapControllerRecovery?: boolean;
|
||||
};
|
||||
|
||||
export default function HlsVideoPlayer({
|
||||
@ -81,6 +82,7 @@ export default function HlsVideoPlayer({
|
||||
isDetailMode = false,
|
||||
camera,
|
||||
currentTimeOverride,
|
||||
enableGapControllerRecovery = false,
|
||||
}: HlsVideoPlayerProps) {
|
||||
const { t } = useTranslation("components/player");
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
@ -170,11 +172,23 @@ export default function HlsVideoPlayer({
|
||||
return;
|
||||
}
|
||||
|
||||
hlsRef.current = new Hls({
|
||||
// Base HLS configuration
|
||||
const baseConfig: Partial<HlsConfig> = {
|
||||
maxBufferLength: 10,
|
||||
maxBufferSize: 20 * 1000 * 1000,
|
||||
startPosition: currentSource.startPosition,
|
||||
});
|
||||
};
|
||||
|
||||
const hlsConfig = { ...baseConfig };
|
||||
|
||||
if (enableGapControllerRecovery) {
|
||||
hlsConfig.highBufferWatchdogPeriod = 1; // Check for stalls every 1 second (default: 3)
|
||||
hlsConfig.nudgeOffset = 0.2; // Nudge playhead forward 0.2s when stalled (default: 0.1)
|
||||
hlsConfig.nudgeMaxRetry = 5; // Try up to 5 nudges before giving up (default: 3)
|
||||
hlsConfig.maxBufferHole = 0.5; // Tolerate up to 0.5s gaps between fragments (default: 0.1)
|
||||
}
|
||||
|
||||
hlsRef.current = new Hls(hlsConfig);
|
||||
hlsRef.current.attachMedia(videoRef.current);
|
||||
hlsRef.current.loadSource(currentSource.playlist);
|
||||
videoRef.current.playbackRate = currentPlaybackRate;
|
||||
@ -187,7 +201,13 @@ export default function HlsVideoPlayer({
|
||||
hlsRef.current.destroy();
|
||||
}
|
||||
};
|
||||
}, [videoRef, hlsRef, useHlsCompat, currentSource]);
|
||||
}, [
|
||||
videoRef,
|
||||
hlsRef,
|
||||
useHlsCompat,
|
||||
currentSource,
|
||||
enableGapControllerRecovery,
|
||||
]);
|
||||
|
||||
// state handling
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user