mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-05 21:17:43 +03:00
Merge branch 'blakeblackshear:dev' into dev
This commit is contained in:
commit
3cc1382439
27
.github/workflows/ci.yml
vendored
27
.github/workflows/ci.yml
vendored
@ -202,33 +202,6 @@ jobs:
|
||||
set: |
|
||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||
*.cache-from=type=gha
|
||||
combined_extra_builds:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Combined Extra Builds
|
||||
needs:
|
||||
- amd64_build
|
||||
- arm64_build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Hailo-8l build
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: h8l
|
||||
files: docker/hailo8l/h8l.hcl
|
||||
set: |
|
||||
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
|
||||
# The majority of users running arm64 are rpi users, so the rpi
|
||||
# build should be the primary arm64 image
|
||||
assemble_default_build:
|
||||
|
||||
@ -1,42 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Build Python wheels
|
||||
FROM wheels AS h8l-wheels
|
||||
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
|
||||
|
||||
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
||||
|
||||
# Create a directory to store the built wheels
|
||||
RUN mkdir /h8l-wheels
|
||||
|
||||
# Build the wheels
|
||||
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
|
||||
|
||||
FROM wget AS hailort
|
||||
ARG TARGETARCH
|
||||
RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||
/deps/install_hailort.sh
|
||||
|
||||
# Use deps as the base image
|
||||
FROM deps AS h8l-frigate
|
||||
|
||||
# Copy the wheels from the wheels stage
|
||||
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
|
||||
COPY --from=hailort /hailo-wheels /deps/hailo-wheels
|
||||
COPY --from=hailort /rootfs/ /
|
||||
|
||||
# Install the wheels
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
||||
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
||||
|
||||
# Copy base files from the rootfs stage
|
||||
COPY --from=rootfs / /
|
||||
|
||||
# Set workdir
|
||||
WORKDIR /opt/frigate/
|
||||
@ -1,34 +0,0 @@
|
||||
target wget {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64","linux/amd64"]
|
||||
target = "wget"
|
||||
}
|
||||
|
||||
target wheels {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64","linux/amd64"]
|
||||
target = "wheels"
|
||||
}
|
||||
|
||||
target deps {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64","linux/amd64"]
|
||||
target = "deps"
|
||||
}
|
||||
|
||||
target rootfs {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/arm64","linux/amd64"]
|
||||
target = "rootfs"
|
||||
}
|
||||
|
||||
target h8l {
|
||||
dockerfile = "docker/hailo8l/Dockerfile"
|
||||
contexts = {
|
||||
wget = "target:wget"
|
||||
wheels = "target:wheels"
|
||||
deps = "target:deps"
|
||||
rootfs = "target:rootfs"
|
||||
}
|
||||
platforms = ["linux/arm64","linux/amd64"]
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
BOARDS += h8l
|
||||
|
||||
local-h8l: version
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
|
||||
--set h8l.tags=frigate:latest-h8l \
|
||||
--load
|
||||
|
||||
build-h8l: version
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
|
||||
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l
|
||||
|
||||
push-h8l: build-h8l
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
|
||||
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l \
|
||||
--push
|
||||
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.20.0"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
arch="aarch64"
|
||||
fi
|
||||
|
||||
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" |
|
||||
tar -C / -xzf -
|
||||
|
||||
mkdir -p /hailo-wheels
|
||||
|
||||
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
contextlib2==0.6.*
|
||||
distlib==0.3.*
|
||||
filelock==3.8.*
|
||||
future==0.18.*
|
||||
importlib-metadata==5.1.*
|
||||
importlib-resources==5.1.*
|
||||
netaddr==0.8.*
|
||||
netifaces==0.10.*
|
||||
verboselogs==1.7.*
|
||||
virtualenv==20.17.*
|
||||
@ -170,6 +170,9 @@ RUN /build_pysqlite3.sh
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
|
||||
|
||||
# Install HailoRT & Wheels
|
||||
RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||
/deps/install_hailort.sh
|
||||
|
||||
# Collect deps in a single layer
|
||||
FROM scratch AS deps-rootfs
|
||||
@ -180,6 +183,7 @@ COPY --from=libusb-build /usr/local/lib /usr/local/lib
|
||||
COPY --from=tempio /rootfs/ /
|
||||
COPY --from=s6-overlay /rootfs/ /
|
||||
COPY --from=models /rootfs/ /
|
||||
COPY --from=wheels /rootfs/ /
|
||||
COPY docker/main/rootfs/ /
|
||||
|
||||
|
||||
|
||||
14
docker/main/install_hailort.sh
Executable file
14
docker/main/install_hailort.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.20.0"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
arch="aarch64"
|
||||
fi
|
||||
|
||||
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
|
||||
@ -53,4 +53,18 @@ pywebpush == 2.0.*
|
||||
# alpr
|
||||
pyclipper == 1.3.*
|
||||
shapely == 2.0.*
|
||||
Levenshtein==0.26.*
|
||||
prometheus-client == 0.21.*
|
||||
# HailoRT Wheels
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
contextlib2==0.6.*
|
||||
distlib==0.3.*
|
||||
filelock==3.8.*
|
||||
future==0.18.*
|
||||
importlib-metadata==5.1.*
|
||||
importlib-resources==5.1.*
|
||||
netaddr==0.8.*
|
||||
netifaces==0.10.*
|
||||
verboselogs==1.7.*
|
||||
virtualenv==20.17.*
|
||||
|
||||
@ -66,29 +66,32 @@ elif go2rtc_config["log"].get("format") is None:
|
||||
go2rtc_config["log"]["format"] = "text"
|
||||
|
||||
# ensure there is a default webrtc config
|
||||
if not go2rtc_config.get("webrtc"):
|
||||
if go2rtc_config.get("webrtc") is None:
|
||||
go2rtc_config["webrtc"] = {}
|
||||
|
||||
# go2rtc should listen on 8555 tcp & udp by default
|
||||
if not go2rtc_config["webrtc"].get("listen"):
|
||||
if go2rtc_config["webrtc"].get("listen") is None:
|
||||
go2rtc_config["webrtc"]["listen"] = ":8555"
|
||||
|
||||
if not go2rtc_config["webrtc"].get("candidates", []):
|
||||
if go2rtc_config["webrtc"].get("candidates") is None:
|
||||
default_candidates = []
|
||||
# use internal candidate if it was discovered when running through the add-on
|
||||
internal_candidate = os.environ.get(
|
||||
"FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL", None
|
||||
)
|
||||
internal_candidate = os.environ.get("FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL")
|
||||
if internal_candidate is not None:
|
||||
default_candidates.append(internal_candidate)
|
||||
# should set default stun server so webrtc can work
|
||||
default_candidates.append("stun:8555")
|
||||
|
||||
go2rtc_config["webrtc"] = {"candidates": default_candidates}
|
||||
else:
|
||||
print(
|
||||
"[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually",
|
||||
)
|
||||
go2rtc_config["webrtc"]["candidates"] = default_candidates
|
||||
|
||||
# This prevents WebRTC from attempting to establish a connection to the internal
|
||||
# docker IPs which are not accessible from outside the container itself and just
|
||||
# wastes time during negotiation. Note that this is only necessary because
|
||||
# Frigate container doesn't run in host network mode.
|
||||
if go2rtc_config["webrtc"].get("filter") is None:
|
||||
go2rtc_config["webrtc"]["filter"] = {"candidates": []}
|
||||
elif go2rtc_config["webrtc"]["filter"].get("candidates") is None:
|
||||
go2rtc_config["webrtc"]["filter"]["candidates"] = []
|
||||
|
||||
# sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac
|
||||
# this means user does not need to specify audio codec when using restream
|
||||
|
||||
@ -109,6 +109,14 @@ http {
|
||||
expires off;
|
||||
|
||||
keepalive_disable safari;
|
||||
|
||||
# vod module returns 502 for non-existent media
|
||||
# https://github.com/kaltura/nginx-vod-module/issues/468
|
||||
error_page 502 =404 /vod-not-found;
|
||||
}
|
||||
|
||||
location = /vod-not-found {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location /stream/ {
|
||||
|
||||
@ -32,7 +32,7 @@ Examples of available modules are:
|
||||
|
||||
#### Go2RTC Logging
|
||||
|
||||
See [the go2rtc docs](for logging configuration)
|
||||
See [the go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#module-log) for logging configuration
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
|
||||
@ -22,7 +22,7 @@ Note that mjpeg cameras require encoding the video into h264 for recording, and
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
mjpeg_cam: "ffmpeg:{your_mjpeg_stream_url}#video=h264#hardware" # <- use hardware acceleration to create an h264 stream usable for other components.
|
||||
mjpeg_cam: "ffmpeg:http://your_mjpeg_stream_url#video=h264#hardware" # <- use hardware acceleration to create an h264 stream usable for other components.
|
||||
|
||||
cameras:
|
||||
...
|
||||
@ -85,7 +85,7 @@ This camera is H.265 only. To be able to play clips on some devices (like MacOs
|
||||
cameras:
|
||||
annkec800: # <------ Name the camera
|
||||
ffmpeg:
|
||||
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||
output_args:
|
||||
record: preset-record-generic-audio-aac
|
||||
|
||||
|
||||
@ -5,9 +5,11 @@ title: License Plate Recognition (LPR)
|
||||
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera.
|
||||
|
||||
Users running a Frigate+ model should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
|
||||
Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
|
||||
|
||||
LPR is most effective when the vehicle’s license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining its detection and keeping the most confident result. LPR will not run on stationary vehicles.
|
||||
Users without a model that detects license plates can still run LPR. A small, CPU inference, YOLOv9 license plate detection model will be used instead. You should _not_ define `license_plate` in your list of objects to track.
|
||||
|
||||
LPR is most effective when the vehicle’s license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining recognition and keeping the most confident result. LPR will not run on stationary vehicles.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
@ -19,27 +21,68 @@ License plate recognition is disabled by default. Enable it in your config file:
|
||||
|
||||
```yaml
|
||||
lpr:
|
||||
enabled: true
|
||||
enabled: True
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
Several options are available to fine-tune the LPR feature. For example, you can adjust the `min_area` setting, which defines the minimum size in pixels a license plate must be before LPR runs. The default is 500 pixels.
|
||||
Fine-tune the LPR feature using these optional parameters:
|
||||
|
||||
Additionally, you can define `known_plates` as strings or regular expressions, allowing Frigate to label tracked vehicles with custom sub_labels when a recognized plate is detected. This information is then accessible in the UI, filters, and notifications.
|
||||
### Detection
|
||||
|
||||
- **`detection_threshold`**: License plate object detection confidence score required before recognition runs.
|
||||
- Default: `0.7`
|
||||
- Note: If you are using a Frigate+ model and you set the `threshold` in your objects config for `license_plate` higher than this value, recognition will never run. It's best to ensure these values match, or this `detection_threshold` is lower than your object config `threshold`.
|
||||
- **`min_area`**: Defines the minimum size (in pixels) a license plate must be before recognition runs.
|
||||
- Default: `1000` pixels.
|
||||
- Depending on the resolution of your cameras, you can increase this value to ignore small or distant plates.
|
||||
|
||||
### Recognition
|
||||
|
||||
- **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a sub label.
|
||||
- Default: `0.9`.
|
||||
- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub-label to an object.
|
||||
- Use this to filter out short, incomplete, or incorrect detections.
|
||||
- **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded.
|
||||
- `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7"
|
||||
- `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC"
|
||||
|
||||
### Matching
|
||||
|
||||
- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` objects when a recognized plate matches a known value.
|
||||
- These labels appear in the UI, filters, and notifications.
|
||||
- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
|
||||
- For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
|
||||
- This parameter will not operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
|
||||
|
||||
### Examples
|
||||
|
||||
```yaml
|
||||
lpr:
|
||||
enabled: true
|
||||
min_area: 500
|
||||
enabled: True
|
||||
min_area: 1500 # Ignore plates smaller than 1500 pixels
|
||||
min_plate_length: 4 # Only recognize plates with 4 or more characters
|
||||
known_plates:
|
||||
Wife's Car:
|
||||
- "ABC-1234"
|
||||
- "ABC-I234"
|
||||
- "ABC-I234" # Accounts for potential confusion between the number one (1) and capital letter I
|
||||
Johnny:
|
||||
- "J*N-*234" # Using wildcards for H/M and 1/I
|
||||
- "J*N-*234" # Matches JHN-1234 and JMN-I234, but also note that "*" matches any number of characters
|
||||
Sally:
|
||||
- "[S5]LL-1234" # Matches SLL-1234 and 5LL-1234
|
||||
- "[S5]LL-1234" # Matches both SLL-1234 and 5LL-1234
|
||||
```
|
||||
|
||||
In this example, "Wife's Car" will appear as the label for any vehicle matching the plate "ABC-1234." The model might occasionally interpret the digit 1 as a capital I (e.g., "ABC-I234"), so both variations are listed. Similarly, multiple possible variations are specified for Johnny and Sally.
|
||||
```yaml
|
||||
lpr:
|
||||
enabled: True
|
||||
min_area: 4000 # Run recognition on larger plates only
|
||||
recognition_threshold: 0.85
|
||||
format: "^[A-Z]{3}-[0-9]{4}$" # Only recognize plates that are three letters, followed by a dash, followed by 4 numbers
|
||||
match_distance: 1 # Allow one character variation in plate matching
|
||||
known_plates:
|
||||
Delivery Van:
|
||||
- "RJK-5678"
|
||||
- "UPS-1234"
|
||||
Employee Parking:
|
||||
- "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z
|
||||
```
|
||||
|
||||
@ -57,7 +57,7 @@ You can configure Frigate to allow manual selection of the stream you want to vi
|
||||
|
||||
Additionally, when creating and editing camera groups in the UI, you can choose the stream you want to use for your camera group's Live dashboard.
|
||||
|
||||
::: note
|
||||
:::note
|
||||
|
||||
Frigate's default dashboard ("All Cameras") will always use the first entry you've defined in `streams:` when playing live streams from your cameras.
|
||||
|
||||
|
||||
@ -201,15 +201,7 @@ This detector also supports YOLOX. Frigate does not come with any YOLOX models p
|
||||
|
||||
#### YOLO-NAS
|
||||
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
@ -231,13 +223,43 @@ model:
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
#### YOLOv9
|
||||
|
||||
[YOLOv9](https://github.com/MultimediaTechLab/YOLO) models are supported, but not included by default.
|
||||
|
||||
:::tip
|
||||
|
||||
The YOLOv9 detector has been designed to support YOLOv9 models, but may support other YOLO model architectures as well.
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: GPU
|
||||
|
||||
model:
|
||||
model_type: yolov9
|
||||
width: 640 # <--- should match the imgsize set during model export
|
||||
height: 640 # <--- should match the imgsize set during model export
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/yolov9-t.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
## NVidia TensorRT Detector
|
||||
|
||||
Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection.
|
||||
|
||||
### Minimum Hardware Support
|
||||
|
||||
The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=530`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
@ -265,6 +287,8 @@ If your GPU does not support FP16 operations, you can pass the environment varia
|
||||
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
|
||||
<details>
|
||||
<summary>Available Models</summary>
|
||||
```
|
||||
yolov3-288
|
||||
yolov3-416
|
||||
@ -293,6 +317,7 @@ yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
```
|
||||
</details>
|
||||
|
||||
An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this:
|
||||
|
||||
@ -420,15 +445,7 @@ There is no default model provided, the following formats are supported:
|
||||
|
||||
#### YOLO-NAS
|
||||
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
@ -490,15 +507,7 @@ There is no default model provided, the following formats are supported:
|
||||
|
||||
#### YOLO-NAS
|
||||
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
@ -716,4 +725,24 @@ Explanation of the paramters:
|
||||
- `soc`: the SoC this model was build for (e.g. "rk3588")
|
||||
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
|
||||
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
|
||||
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
|
||||
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
|
||||
|
||||
# Models
|
||||
|
||||
Some model types are not included in Frigate by default.
|
||||
|
||||
## Downloading Models
|
||||
|
||||
Here are some tips for getting different model types
|
||||
|
||||
### Downloading YOLO-NAS Model
|
||||
|
||||
You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
|
||||
@ -340,6 +340,8 @@ objects:
|
||||
review:
|
||||
# Optional: alerts configuration
|
||||
alerts:
|
||||
# Optional: enables alerts for the camera (default: shown below)
|
||||
enabled: True
|
||||
# Optional: labels that qualify as an alert (default: shown below)
|
||||
labels:
|
||||
- car
|
||||
@ -352,6 +354,8 @@ review:
|
||||
- driveway
|
||||
# Optional: detections configuration
|
||||
detections:
|
||||
# Optional: enables detections for the camera (default: shown below)
|
||||
enabled: True
|
||||
# Optional: labels that qualify as a detection (default: all labels that are tracked / listened to)
|
||||
labels:
|
||||
- car
|
||||
@ -542,6 +546,25 @@ face_recognition:
|
||||
# NOTE: small model runs on CPU and large model runs on GPU
|
||||
model_size: "small"
|
||||
|
||||
# Optional: Configuration for license plate recognition capability
|
||||
lpr:
|
||||
# Optional: Enable license plate recognition (default: shown below)
|
||||
enabled: False
|
||||
# Optional: License plate object confidence score required to begin running recognition (default: shown below)
|
||||
detection_threshold: 0.7
|
||||
# Optional: Minimum area of license plate to begin running recognition (default: shown below)
|
||||
min_area: 1000
|
||||
# Optional: Recognition confidence score required to add the plate to the object as a sub label (default: shown below)
|
||||
recognition_threshold: 0.9
|
||||
# Optional: Minimum number of characters a license plate must have to be added to the object as a sub label (default: shown below)
|
||||
min_plate_length: 4
|
||||
# Optional: Regular expression for the expected format of a license plate (default: shown below)
|
||||
format: None
|
||||
# Optional: Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate
|
||||
match_distance: 1
|
||||
# Optional: Known plates to track (strings or regular expressions) (default: shown below)
|
||||
known_plates: {}
|
||||
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
# NOTE: Semantic Search must be enabled for this to do anything.
|
||||
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
||||
|
||||
@ -157,8 +157,8 @@ The average speed of your object as it moved through your zone is saved in Friga
|
||||
|
||||
#### Best practices and caveats
|
||||
|
||||
- Speed estimation works best with a straight road or path when your object travels in a straight line across that path. If your object makes turns, speed estimation may not be accurate.
|
||||
- Create a zone where the bottom center of your object's bounding box travels directly through it and does not become obscured at any time.
|
||||
- Speed estimation works best with a straight road or path when your object travels in a straight line across that path. Avoid creating your zone near intersections or anywhere that objects would make a turn. If the bounding box changes shape (either because the object made a turn or became partially obscured, for example), speed estimation will not be accurate.
|
||||
- Create a zone where the bottom center of your object's bounding box travels directly through it and does not become obscured at any time. See the photo example above.
|
||||
- Depending on the size and location of your zone, you may want to decrease the zone's `inertia` value from the default of 3.
|
||||
- The more accurate your real-world dimensions can be measured, the more accurate speed estimation will be. However, due to the way Frigate's tracking algorithm works, you may need to tweak the real-world distance values so that estimated speeds better match real-world speeds.
|
||||
- Once an object leaves the zone, speed accuracy will likely decrease due to perspective distortion and misalignment with the calibrated area. Therefore, speed values will show as a zero through MQTT and will not be visible on the debug view when an object is outside of a speed tracking zone.
|
||||
|
||||
@ -117,7 +117,7 @@ For other installations, follow these steps for installation:
|
||||
|
||||
#### Setup
|
||||
|
||||
To set up Frigate, follow the default installation instructions, but use a Docker image with the `-h8l` suffix, for example: `ghcr.io/blakeblackshear/frigate:stable-h8l`
|
||||
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
|
||||
|
||||
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
|
||||
|
||||
|
||||
@ -316,6 +316,22 @@ Topic with current state of the PTZ autotracker for a camera. Published values a
|
||||
|
||||
Topic to determine if PTZ autotracker is actively tracking an object. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/review_alerts/set`
|
||||
|
||||
Topic to turn review alerts for a camera on or off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/review_alerts/state`
|
||||
|
||||
Topic with current state of review alerts for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/review_detections/set`
|
||||
|
||||
Topic to turn review detections for a camera on or off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/review_detections/state`
|
||||
|
||||
Topic with current state of review detections for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/birdseye/set`
|
||||
|
||||
Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode
|
||||
|
||||
@ -54,6 +54,10 @@ The most common reason for the PCIe Coral not being detected is that the driver
|
||||
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||
|
||||
## Attempting to load TPU as pci & Fatal Python error: Illegal instruction
|
||||
|
||||
This is an issue due to outdated gasket driver when being used with new linux kernels. Installing an updated driver from https://github.com/jnicolson/gasket-builder has been reported to fix the issue.
|
||||
|
||||
### Not detected on Raspberry Pi5
|
||||
|
||||
A kernel update to the RPi5 means an upate to config.txt is required, see [the raspberry pi forum for more info](https://forums.raspberrypi.com/viewtopic.php?t=363682&sid=cb59b026a412f0dc041595951273a9ca&start=25)
|
||||
|
||||
5
frigate/api/defs/request/export_rename_body.py
Normal file
5
frigate/api/defs/request/export_rename_body.py
Normal file
@ -0,0 +1,5 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportRenameBody(BaseModel):
|
||||
name: str = Field(title="Friendly name", max_length=256)
|
||||
@ -12,6 +12,7 @@ from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||
from frigate.api.defs.request.export_rename_body import ExportRenameBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import EXPORT_DIR
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
@ -129,8 +130,8 @@ def export_recording(
|
||||
)
|
||||
|
||||
|
||||
@router.patch("/export/{event_id}/{new_name}")
|
||||
def export_rename(event_id: str, new_name: str):
|
||||
@router.patch("/export/{event_id}/rename")
|
||||
def export_rename(event_id: str, body: ExportRenameBody):
|
||||
try:
|
||||
export: Export = Export.get(Export.id == event_id)
|
||||
except DoesNotExist:
|
||||
@ -144,7 +145,7 @@ def export_rename(event_id: str, new_name: str):
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
export.name = new_name
|
||||
export.name = body.name
|
||||
export.save()
|
||||
return JSONResponse(
|
||||
content=(
|
||||
|
||||
@ -1088,30 +1088,8 @@ def event_clip(request: Request, event_id: str):
|
||||
content={"success": False, "message": "Clip not available"}, status_code=404
|
||||
)
|
||||
|
||||
file_name = f"{event.camera}-{event.id}.mp4"
|
||||
clip_path = os.path.join(CLIPS_DIR, file_name)
|
||||
|
||||
if not os.path.isfile(clip_path):
|
||||
end_ts = (
|
||||
datetime.now().timestamp() if event.end_time is None else event.end_time
|
||||
)
|
||||
return recording_clip(request, event.camera, event.start_time, end_ts)
|
||||
|
||||
headers = {
|
||||
"Content-Description": "File Transfer",
|
||||
"Cache-Control": "no-cache",
|
||||
"Content-Type": "video/mp4",
|
||||
"Content-Length": str(os.path.getsize(clip_path)),
|
||||
# nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
|
||||
"X-Accel-Redirect": f"/clips/{file_name}",
|
||||
}
|
||||
|
||||
return FileResponse(
|
||||
clip_path,
|
||||
media_type="video/mp4",
|
||||
filename=file_name,
|
||||
headers=headers,
|
||||
)
|
||||
end_ts = datetime.now().timestamp() if event.end_time is None else event.end_time
|
||||
return recording_clip(request, event.camera, event.start_time, end_ts)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/preview.gif")
|
||||
|
||||
@ -65,6 +65,8 @@ class Dispatcher:
|
||||
"snapshots": self._on_snapshots_command,
|
||||
"birdseye": self._on_birdseye_command,
|
||||
"birdseye_mode": self._on_birdseye_mode_command,
|
||||
"review_alerts": self._on_alerts_command,
|
||||
"review_detections": self._on_detections_command,
|
||||
}
|
||||
self._global_settings_handlers: dict[str, Callable] = {
|
||||
"notifications": self._on_global_notification_command,
|
||||
@ -178,6 +180,8 @@ class Dispatcher:
|
||||
"autotracking": self.config.cameras[
|
||||
camera
|
||||
].onvif.autotracking.enabled,
|
||||
"alerts": self.config.cameras[camera].review.alerts.enabled,
|
||||
"detections": self.config.cameras[camera].review.detections.enabled,
|
||||
}
|
||||
|
||||
self.publish("camera_activity", json.dumps(camera_status))
|
||||
@ -565,3 +569,47 @@ class Dispatcher:
|
||||
),
|
||||
retain=True,
|
||||
)
|
||||
|
||||
def _on_alerts_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for alerts topic."""
|
||||
review_settings = self.config.cameras[camera_name].review
|
||||
|
||||
if payload == "ON":
|
||||
if not self.config.cameras[camera_name].review.alerts.enabled_in_config:
|
||||
logger.error(
|
||||
"Alerts must be enabled in the config to be turned on via MQTT."
|
||||
)
|
||||
return
|
||||
|
||||
if not review_settings.alerts.enabled:
|
||||
logger.info(f"Turning on alerts for {camera_name}")
|
||||
review_settings.alerts.enabled = True
|
||||
elif payload == "OFF":
|
||||
if review_settings.alerts.enabled:
|
||||
logger.info(f"Turning off alerts for {camera_name}")
|
||||
review_settings.alerts.enabled = False
|
||||
|
||||
self.config_updater.publish(f"config/review/{camera_name}", review_settings)
|
||||
self.publish(f"{camera_name}/review_alerts/state", payload, retain=True)
|
||||
|
||||
def _on_detections_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for detections topic."""
|
||||
review_settings = self.config.cameras[camera_name].review
|
||||
|
||||
if payload == "ON":
|
||||
if not self.config.cameras[camera_name].review.detections.enabled_in_config:
|
||||
logger.error(
|
||||
"Detections must be enabled in the config to be turned on via MQTT."
|
||||
)
|
||||
return
|
||||
|
||||
if not review_settings.detections.enabled:
|
||||
logger.info(f"Turning on detections for {camera_name}")
|
||||
review_settings.detections.enabled = True
|
||||
elif payload == "OFF":
|
||||
if review_settings.detections.enabled:
|
||||
logger.info(f"Turning off detections for {camera_name}")
|
||||
review_settings.detections.enabled = False
|
||||
|
||||
self.config_updater.publish(f"config/review/{camera_name}", review_settings)
|
||||
self.publish(f"{camera_name}/review_detections/state", payload, retain=True)
|
||||
|
||||
@ -107,6 +107,16 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
),
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/review_alerts/state",
|
||||
"ON" if camera.review.alerts.enabled_in_config else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/review_detections/state",
|
||||
"ON" if camera.review.detections.enabled_in_config else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
|
||||
if self.config.notifications.enabled_in_config:
|
||||
self.publish(
|
||||
|
||||
@ -13,6 +13,8 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"]
|
||||
class AlertsConfig(FrigateBaseModel):
|
||||
"""Configure alerts"""
|
||||
|
||||
enabled: bool = Field(default=True, title="Enable alerts.")
|
||||
|
||||
labels: list[str] = Field(
|
||||
default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for."
|
||||
)
|
||||
@ -21,6 +23,10 @@ class AlertsConfig(FrigateBaseModel):
|
||||
title="List of required zones to be entered in order to save the event as an alert.",
|
||||
)
|
||||
|
||||
enabled_in_config: Optional[bool] = Field(
|
||||
default=None, title="Keep track of original state of alerts."
|
||||
)
|
||||
|
||||
@field_validator("required_zones", mode="before")
|
||||
@classmethod
|
||||
def validate_required_zones(cls, v):
|
||||
@ -33,6 +39,8 @@ class AlertsConfig(FrigateBaseModel):
|
||||
class DetectionsConfig(FrigateBaseModel):
|
||||
"""Configure detections"""
|
||||
|
||||
enabled: bool = Field(default=True, title="Enable detections.")
|
||||
|
||||
labels: Optional[list[str]] = Field(
|
||||
default=None, title="Labels to create detections for."
|
||||
)
|
||||
@ -41,6 +49,10 @@ class DetectionsConfig(FrigateBaseModel):
|
||||
title="List of required zones to be entered in order to save the event as a detection.",
|
||||
)
|
||||
|
||||
enabled_in_config: Optional[bool] = Field(
|
||||
default=None, title="Keep track of original state of detections."
|
||||
)
|
||||
|
||||
@field_validator("required_zones", mode="before")
|
||||
@classmethod
|
||||
def validate_required_zones(cls, v):
|
||||
|
||||
@ -61,14 +61,35 @@ class FaceRecognitionConfig(FrigateBaseModel):
|
||||
|
||||
class LicensePlateRecognitionConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable license plate recognition.")
|
||||
threshold: float = Field(
|
||||
default=0.9,
|
||||
title="License plate confidence score required to be added to the object as a sub label.",
|
||||
detection_threshold: float = Field(
|
||||
default=0.7,
|
||||
title="License plate object confidence score required to begin running recognition.",
|
||||
gt=0.0,
|
||||
le=1.0,
|
||||
)
|
||||
min_area: int = Field(
|
||||
default=500,
|
||||
title="Min area of license plate to consider running license plate recognition.",
|
||||
default=1000,
|
||||
title="Minimum area of license plate to begin running recognition.",
|
||||
)
|
||||
recognition_threshold: float = Field(
|
||||
default=0.9,
|
||||
title="Recognition confidence score required to add the plate to the object as a sub label.",
|
||||
gt=0.0,
|
||||
le=1.0,
|
||||
)
|
||||
min_plate_length: int = Field(
|
||||
default=4,
|
||||
title="Minimum number of characters a license plate must have to be added to the object as a sub label.",
|
||||
)
|
||||
format: Optional[str] = Field(
|
||||
default=None,
|
||||
title="Regular expression for the expected format of license plate.",
|
||||
)
|
||||
match_distance: int = Field(
|
||||
default=1,
|
||||
title="Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate.",
|
||||
ge=0,
|
||||
)
|
||||
known_plates: Optional[Dict[str, List[str]]] = Field(
|
||||
default={}, title="Known plates to track."
|
||||
default={}, title="Known plates to track (strings or regular expressions)."
|
||||
)
|
||||
|
||||
@ -534,6 +534,12 @@ class FrigateConfig(FrigateBaseModel):
|
||||
camera_config.onvif.autotracking.enabled_in_config = (
|
||||
camera_config.onvif.autotracking.enabled
|
||||
)
|
||||
camera_config.review.alerts.enabled_in_config = (
|
||||
camera_config.review.alerts.enabled
|
||||
)
|
||||
camera_config.review.detections.enabled_in_config = (
|
||||
camera_config.review.detections.enabled
|
||||
)
|
||||
|
||||
# Add default filters
|
||||
object_keys = camera_config.objects.track
|
||||
|
||||
@ -1,34 +1,42 @@
|
||||
"""Handle processing images for face detection and recognition."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import math
|
||||
from typing import List, Tuple
|
||||
import re
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
from Levenshtein import distance
|
||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config.classification import LicensePlateRecognitionConfig
|
||||
from frigate.embeddings.embeddings import Embeddings
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import FRIGATE_LOCALHOST
|
||||
from frigate.embeddings.functions.onnx import GenericONNXEmbedding, ModelTypeEnum
|
||||
from frigate.util.image import area
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
from .api import RealTimeProcessorApi
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MIN_PLATE_LENGTH = 3
|
||||
WRITE_DEBUG_IMAGES = False
|
||||
|
||||
|
||||
class LicensePlateRecognition:
|
||||
def __init__(
|
||||
self,
|
||||
config: LicensePlateRecognitionConfig,
|
||||
requestor: InterProcessRequestor,
|
||||
embeddings: Embeddings,
|
||||
):
|
||||
self.lpr_config = config
|
||||
self.requestor = requestor
|
||||
self.embeddings = embeddings
|
||||
self.detection_model = self.embeddings.lpr_detection_model
|
||||
self.classification_model = self.embeddings.lpr_classification_model
|
||||
self.recognition_model = self.embeddings.lpr_recognition_model
|
||||
class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
super().__init__(config, metrics)
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.lpr_config = config.lpr
|
||||
self.requires_license_plate_detection = (
|
||||
"license_plate" not in self.config.objects.all_objects
|
||||
)
|
||||
self.detected_license_plates: dict[str, dict[str, any]] = {}
|
||||
|
||||
self.ctc_decoder = CTCDecoder()
|
||||
|
||||
self.batch_size = 6
|
||||
@ -39,13 +47,66 @@ class LicensePlateRecognition:
|
||||
self.box_thresh = 0.8
|
||||
self.mask_thresh = 0.8
|
||||
|
||||
self.lpr_detection_model = None
|
||||
self.lpr_classification_model = None
|
||||
self.lpr_recognition_model = None
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.detection_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="detection.onnx",
|
||||
download_urls={
|
||||
"detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_detect,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.classification_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="classification.onnx",
|
||||
download_urls={
|
||||
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_classify,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.recognition_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="recognition.onnx",
|
||||
download_urls={
|
||||
"recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_recognize,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
self.yolov9_detection_model = GenericONNXEmbedding(
|
||||
model_name="yolov9_license_plate",
|
||||
model_file="yolov9-256-license-plates.onnx",
|
||||
download_urls={
|
||||
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.yolov9_lpr_detect,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
if self.lpr_config.enabled:
|
||||
# all models need to be loaded to run LPR
|
||||
self.detection_model._load_model_and_utils()
|
||||
self.classification_model._load_model_and_utils()
|
||||
self.recognition_model._load_model_and_utils()
|
||||
self.yolov9_detection_model._load_model_and_utils()
|
||||
|
||||
def detect(self, image: np.ndarray) -> List[np.ndarray]:
|
||||
def _detect(self, image: np.ndarray) -> List[np.ndarray]:
|
||||
"""
|
||||
Detect possible license plates in the input image by first resizing and normalizing it,
|
||||
running a detection model, and filtering out low-probability regions.
|
||||
@ -59,18 +120,25 @@ class LicensePlateRecognition:
|
||||
h, w = image.shape[:2]
|
||||
|
||||
if sum([h, w]) < 64:
|
||||
image = self.zero_pad(image)
|
||||
image = self._zero_pad(image)
|
||||
|
||||
resized_image = self.resize_image(image)
|
||||
normalized_image = self.normalize_image(resized_image)
|
||||
resized_image = self._resize_image(image)
|
||||
normalized_image = self._normalize_image(resized_image)
|
||||
|
||||
if WRITE_DEBUG_IMAGES:
|
||||
current_time = int(datetime.datetime.now().timestamp())
|
||||
cv2.imwrite(
|
||||
f"debug/frames/license_plate_resized_{current_time}.jpg",
|
||||
resized_image,
|
||||
)
|
||||
|
||||
outputs = self.detection_model([normalized_image])[0]
|
||||
outputs = outputs[0, :, :]
|
||||
|
||||
boxes, _ = self.boxes_from_bitmap(outputs, outputs > self.mask_thresh, w, h)
|
||||
return self.filter_polygon(boxes, (h, w))
|
||||
boxes, _ = self._boxes_from_bitmap(outputs, outputs > self.mask_thresh, w, h)
|
||||
return self._filter_polygon(boxes, (h, w))
|
||||
|
||||
def classify(
|
||||
def _classify(
|
||||
self, images: List[np.ndarray]
|
||||
) -> Tuple[List[np.ndarray], List[Tuple[str, float]]]:
|
||||
"""
|
||||
@ -97,7 +165,7 @@ class LicensePlateRecognition:
|
||||
|
||||
return self._process_classification_output(images, outputs)
|
||||
|
||||
def recognize(
|
||||
def _recognize(
|
||||
self, images: List[np.ndarray]
|
||||
) -> Tuple[List[str], List[List[float]]]:
|
||||
"""
|
||||
@ -136,7 +204,7 @@ class LicensePlateRecognition:
|
||||
outputs = self.recognition_model(norm_images)
|
||||
return self.ctc_decoder(outputs)
|
||||
|
||||
def process_license_plate(
|
||||
def _process_license_plate(
|
||||
self, image: np.ndarray
|
||||
) -> Tuple[List[str], List[float], List[int]]:
|
||||
"""
|
||||
@ -157,13 +225,28 @@ class LicensePlateRecognition:
|
||||
logger.debug("Model runners not loaded")
|
||||
return [], [], []
|
||||
|
||||
plate_points = self.detect(image)
|
||||
plate_points = self._detect(image)
|
||||
if len(plate_points) == 0:
|
||||
logger.debug("No points found by OCR detector model")
|
||||
return [], [], []
|
||||
|
||||
plate_points = self.sort_polygon(list(plate_points))
|
||||
plate_points = self._sort_polygon(list(plate_points))
|
||||
plate_images = [self._crop_license_plate(image, x) for x in plate_points]
|
||||
rotated_images, _ = self.classify(plate_images)
|
||||
rotated_images, _ = self._classify(plate_images)
|
||||
|
||||
# debug rotated and classification result
|
||||
if WRITE_DEBUG_IMAGES:
|
||||
current_time = int(datetime.datetime.now().timestamp())
|
||||
for i, img in enumerate(plate_images):
|
||||
cv2.imwrite(
|
||||
f"debug/frames/license_plate_rotated_{current_time}_{i + 1}.jpg",
|
||||
img,
|
||||
)
|
||||
for i, img in enumerate(rotated_images):
|
||||
cv2.imwrite(
|
||||
f"debug/frames/license_plate_classified_{current_time}_{i + 1}.jpg",
|
||||
img,
|
||||
)
|
||||
|
||||
# keep track of the index of each image for correct area calc later
|
||||
sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in rotated_images])
|
||||
@ -171,7 +254,7 @@ class LicensePlateRecognition:
|
||||
idx: original_idx for original_idx, idx in enumerate(sorted_indices)
|
||||
}
|
||||
|
||||
results, confidences = self.recognize(rotated_images)
|
||||
results, confidences = self._recognize(rotated_images)
|
||||
|
||||
if results:
|
||||
license_plates = [""] * len(rotated_images)
|
||||
@ -192,23 +275,34 @@ class LicensePlateRecognition:
|
||||
save_image = cv2.cvtColor(
|
||||
rotated_images[original_idx], cv2.COLOR_RGB2BGR
|
||||
)
|
||||
filename = f"/config/plate_{original_idx}_{plate}_{area}.jpg"
|
||||
filename = f"debug/frames/plate_{original_idx}_{plate}_{area}.jpg"
|
||||
cv2.imwrite(filename, save_image)
|
||||
|
||||
license_plates[original_idx] = plate
|
||||
average_confidences[original_idx] = average_confidence
|
||||
areas[original_idx] = area
|
||||
|
||||
# Filter out plates that have a length of less than 3 characters
|
||||
# Filter out plates that have a length of less than min_plate_length characters
|
||||
# or that don't match the expected format (if defined)
|
||||
# Sort by area, then by plate length, then by confidence all desc
|
||||
sorted_data = sorted(
|
||||
[
|
||||
(plate, conf, area)
|
||||
for plate, conf, area in zip(
|
||||
license_plates, average_confidences, areas
|
||||
filtered_data = []
|
||||
for plate, conf, area in zip(license_plates, average_confidences, areas):
|
||||
if len(plate) < self.lpr_config.min_plate_length:
|
||||
logger.debug(
|
||||
f"Filtered out '{plate}' due to length ({len(plate)} < {self.lpr_config.min_plate_length})"
|
||||
)
|
||||
if len(plate) >= MIN_PLATE_LENGTH
|
||||
],
|
||||
continue
|
||||
|
||||
if self.lpr_config.format and not re.fullmatch(
|
||||
self.lpr_config.format, plate
|
||||
):
|
||||
logger.debug(f"Filtered out '{plate}' due to format mismatch")
|
||||
continue
|
||||
|
||||
filtered_data.append((plate, conf, area))
|
||||
|
||||
sorted_data = sorted(
|
||||
filtered_data,
|
||||
key=lambda x: (x[2], len(x[0]), x[1]),
|
||||
reverse=True,
|
||||
)
|
||||
@ -218,7 +312,7 @@ class LicensePlateRecognition:
|
||||
|
||||
return [], [], []
|
||||
|
||||
def resize_image(self, image: np.ndarray) -> np.ndarray:
|
||||
def _resize_image(self, image: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Resize the input image while maintaining the aspect ratio, ensuring dimensions are multiples of 32.
|
||||
|
||||
@ -234,7 +328,7 @@ class LicensePlateRecognition:
|
||||
resize_w = max(int(round(int(w * ratio) / 32) * 32), 32)
|
||||
return cv2.resize(image, (resize_w, resize_h))
|
||||
|
||||
def normalize_image(self, image: np.ndarray) -> np.ndarray:
|
||||
def _normalize_image(self, image: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Normalize the input image by subtracting the mean and multiplying by the standard deviation.
|
||||
|
||||
@ -252,7 +346,7 @@ class LicensePlateRecognition:
|
||||
cv2.multiply(image, std, image)
|
||||
return image.transpose((2, 0, 1))[np.newaxis, ...]
|
||||
|
||||
def boxes_from_bitmap(
|
||||
def _boxes_from_bitmap(
|
||||
self, output: np.ndarray, mask: np.ndarray, dest_width: int, dest_height: int
|
||||
) -> Tuple[np.ndarray, List[float]]:
|
||||
"""
|
||||
@ -282,14 +376,16 @@ class LicensePlateRecognition:
|
||||
contour = contours[index]
|
||||
|
||||
# get minimum bounding box (rotated rectangle) around the contour and the smallest side length.
|
||||
points, min_side = self.get_min_boxes(contour)
|
||||
points, min_side = self._get_min_boxes(contour)
|
||||
logger.debug(f"min side {index}, {min_side}")
|
||||
|
||||
if min_side < self.min_size:
|
||||
continue
|
||||
|
||||
points = np.array(points)
|
||||
|
||||
score = self.box_score(output, contour)
|
||||
score = self._box_score(output, contour)
|
||||
logger.debug(f"box score {index}, {score}")
|
||||
if self.box_thresh > score:
|
||||
continue
|
||||
|
||||
@ -302,7 +398,7 @@ class LicensePlateRecognition:
|
||||
points = np.array(offset.Execute(distance * 1.5)).reshape((-1, 1, 2))
|
||||
|
||||
# get the minimum bounding box around the shrunken polygon.
|
||||
box, min_side = self.get_min_boxes(points)
|
||||
box, min_side = self._get_min_boxes(points)
|
||||
|
||||
if min_side < self.min_size + 2:
|
||||
continue
|
||||
@ -321,7 +417,7 @@ class LicensePlateRecognition:
|
||||
return np.array(boxes, dtype="int32"), scores
|
||||
|
||||
@staticmethod
|
||||
def get_min_boxes(contour: np.ndarray) -> Tuple[List[Tuple[float, float]], float]:
|
||||
def _get_min_boxes(contour: np.ndarray) -> Tuple[List[Tuple[float, float]], float]:
|
||||
"""
|
||||
Calculate the minimum bounding box (rotated rectangle) for a given contour.
|
||||
|
||||
@ -340,7 +436,7 @@ class LicensePlateRecognition:
|
||||
return box, min(bounding_box[1])
|
||||
|
||||
@staticmethod
|
||||
def box_score(bitmap: np.ndarray, contour: np.ndarray) -> float:
|
||||
def _box_score(bitmap: np.ndarray, contour: np.ndarray) -> float:
|
||||
"""
|
||||
Calculate the average score within the bounding box of a contour.
|
||||
|
||||
@ -360,7 +456,7 @@ class LicensePlateRecognition:
|
||||
return cv2.mean(bitmap[y1 : y2 + 1, x1 : x2 + 1], mask)[0]
|
||||
|
||||
@staticmethod
|
||||
def expand_box(points: List[Tuple[float, float]]) -> np.ndarray:
|
||||
def _expand_box(points: List[Tuple[float, float]]) -> np.ndarray:
|
||||
"""
|
||||
Expand a polygonal shape slightly by a factor determined by the area-to-perimeter ratio.
|
||||
|
||||
@ -377,7 +473,7 @@ class LicensePlateRecognition:
|
||||
expanded = np.array(offset.Execute(distance * 1.5)).reshape((-1, 2))
|
||||
return expanded
|
||||
|
||||
def filter_polygon(
|
||||
def _filter_polygon(
|
||||
self, points: List[np.ndarray], shape: Tuple[int, int]
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
@ -394,14 +490,14 @@ class LicensePlateRecognition:
|
||||
height, width = shape
|
||||
return np.array(
|
||||
[
|
||||
self.clockwise_order(point)
|
||||
self._clockwise_order(point)
|
||||
for point in points
|
||||
if self.is_valid_polygon(point, width, height)
|
||||
if self._is_valid_polygon(point, width, height)
|
||||
]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_valid_polygon(point: np.ndarray, width: int, height: int) -> bool:
|
||||
def _is_valid_polygon(point: np.ndarray, width: int, height: int) -> bool:
|
||||
"""
|
||||
Check if a polygon is valid, meaning it fits within the image bounds
|
||||
and has sides of a minimum length.
|
||||
@ -424,7 +520,7 @@ class LicensePlateRecognition:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def clockwise_order(point: np.ndarray) -> np.ndarray:
|
||||
def _clockwise_order(point: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Arrange the points of a polygon in clockwise order based on their angular positions
|
||||
around the polygon's center.
|
||||
@ -441,10 +537,10 @@ class LicensePlateRecognition:
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def sort_polygon(points):
|
||||
def _sort_polygon(points):
|
||||
"""
|
||||
Sort polygons based on their position in the image. If polygons are close in vertical
|
||||
position (within 10 pixels), sort them by horizontal position.
|
||||
position (within 5 pixels), sort them by horizontal position.
|
||||
|
||||
Args:
|
||||
points: List of polygons to sort.
|
||||
@ -455,7 +551,7 @@ class LicensePlateRecognition:
|
||||
points.sort(key=lambda x: (x[0][1], x[0][0]))
|
||||
for i in range(len(points) - 1):
|
||||
for j in range(i, -1, -1):
|
||||
if abs(points[j + 1][0][1] - points[j][0][1]) < 10 and (
|
||||
if abs(points[j + 1][0][1] - points[j][0][1]) < 5 and (
|
||||
points[j + 1][0][0] < points[j][0][0]
|
||||
):
|
||||
temp = points[j]
|
||||
@ -466,7 +562,7 @@ class LicensePlateRecognition:
|
||||
return points
|
||||
|
||||
@staticmethod
|
||||
def zero_pad(image: np.ndarray) -> np.ndarray:
|
||||
def _zero_pad(image: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Apply zero-padding to an image, ensuring its dimensions are at least 32x32.
|
||||
The padding is added only if needed.
|
||||
@ -554,7 +650,8 @@ class LicensePlateRecognition:
|
||||
for j in range(len(outputs)):
|
||||
label, score = outputs[j]
|
||||
results[indices[i + j]] = [label, score]
|
||||
if "180" in label and score >= self.lpr_config.threshold:
|
||||
# make sure we have high confidence if we need to flip a box, this will be rare in lpr
|
||||
if "180" in label and score >= 0.9:
|
||||
images[indices[i + j]] = cv2.rotate(images[indices[i + j]], 1)
|
||||
|
||||
return images, results
|
||||
@ -598,7 +695,11 @@ class LicensePlateRecognition:
|
||||
resized_image = resized_image.transpose((2, 0, 1))
|
||||
resized_image = (resized_image.astype("float32") / 255.0 - 0.5) / 0.5
|
||||
|
||||
padded_image = np.zeros((input_shape[0], input_h, input_w), dtype=np.float32)
|
||||
# Compute mean pixel value of the resized image (per channel)
|
||||
mean_pixel = np.mean(resized_image, axis=(1, 2), keepdims=True)
|
||||
padded_image = np.full(
|
||||
(input_shape[0], input_h, input_w), mean_pixel, dtype=np.float32
|
||||
)
|
||||
padded_image[:, :, :resized_w] = resized_image
|
||||
|
||||
return padded_image
|
||||
@ -649,6 +750,363 @@ class LicensePlateRecognition:
|
||||
image = np.rot90(image, k=3)
|
||||
return image
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
"""
|
||||
Update inference metrics.
|
||||
"""
|
||||
self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10
|
||||
|
||||
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
"""
|
||||
Use a lightweight YOLOv9 model to detect license plates for users without Frigate+
|
||||
|
||||
Return the dimensions of the detected plate as [x1, y1, x2, y2].
|
||||
"""
|
||||
predictions = self.yolov9_detection_model(input)
|
||||
|
||||
confidence_threshold = self.lpr_config.detection_threshold
|
||||
|
||||
top_score = -1
|
||||
top_box = None
|
||||
|
||||
# Loop over predictions
|
||||
for prediction in predictions:
|
||||
score = prediction[6]
|
||||
if score >= confidence_threshold:
|
||||
bbox = prediction[1:5]
|
||||
# Scale boxes back to original image size
|
||||
scale_x = input.shape[1] / 256
|
||||
scale_y = input.shape[0] / 256
|
||||
bbox[0] *= scale_x
|
||||
bbox[1] *= scale_y
|
||||
bbox[2] *= scale_x
|
||||
bbox[3] *= scale_y
|
||||
|
||||
if score > top_score:
|
||||
top_score = score
|
||||
top_box = bbox
|
||||
|
||||
# Return the top scoring bounding box if found
|
||||
if top_box is not None:
|
||||
# expand box by 15% to help with OCR
|
||||
expansion = (top_box[2:] - top_box[:2]) * 0.1
|
||||
|
||||
# Expand box
|
||||
expanded_box = np.array(
|
||||
[
|
||||
top_box[0] - expansion[0], # x1
|
||||
top_box[1] - expansion[1], # y1
|
||||
top_box[2] + expansion[0], # x2
|
||||
top_box[3] + expansion[1], # y2
|
||||
]
|
||||
).clip(0, [input.shape[1], input.shape[0]] * 2)
|
||||
|
||||
logger.debug(f"Found license plate: {expanded_box.astype(int)}")
|
||||
return tuple(expanded_box.astype(int))
|
||||
else:
|
||||
return None # No detection above the threshold
|
||||
|
||||
def _should_keep_previous_plate(
|
||||
self, id, top_plate, top_char_confidences, top_area, avg_confidence
|
||||
):
|
||||
if id not in self.detected_license_plates:
|
||||
return False
|
||||
|
||||
prev_data = self.detected_license_plates[id]
|
||||
prev_plate = prev_data["plate"]
|
||||
prev_char_confidences = prev_data["char_confidences"]
|
||||
prev_area = prev_data["area"]
|
||||
prev_avg_confidence = (
|
||||
sum(prev_char_confidences) / len(prev_char_confidences)
|
||||
if prev_char_confidences
|
||||
else 0
|
||||
)
|
||||
|
||||
# 1. Normalize metrics
|
||||
# Length score - use relative comparison
|
||||
# If lengths are equal, score is 0.5 for both
|
||||
# If one is longer, it gets a higher score up to 1.0
|
||||
max_length_diff = 4 # Maximum expected difference in plate lengths
|
||||
length_diff = len(top_plate) - len(prev_plate)
|
||||
curr_length_score = 0.5 + (
|
||||
length_diff / (2 * max_length_diff)
|
||||
) # Normalize to 0-1
|
||||
curr_length_score = max(0, min(1, curr_length_score)) # Clamp to 0-1
|
||||
prev_length_score = 1 - curr_length_score # Inverse relationship
|
||||
|
||||
# Area score (normalize based on max of current and previous)
|
||||
max_area = max(top_area, prev_area)
|
||||
curr_area_score = top_area / max_area
|
||||
prev_area_score = prev_area / max_area
|
||||
|
||||
# Average confidence score (already normalized 0-1)
|
||||
curr_conf_score = avg_confidence
|
||||
prev_conf_score = prev_avg_confidence
|
||||
|
||||
# Character confidence comparison score
|
||||
min_length = min(len(top_plate), len(prev_plate))
|
||||
if min_length > 0:
|
||||
curr_char_conf = sum(top_char_confidences[:min_length]) / min_length
|
||||
prev_char_conf = sum(prev_char_confidences[:min_length]) / min_length
|
||||
else:
|
||||
curr_char_conf = 0
|
||||
prev_char_conf = 0
|
||||
|
||||
# 2. Define weights
|
||||
weights = {
|
||||
"length": 0.4,
|
||||
"area": 0.3,
|
||||
"avg_confidence": 0.2,
|
||||
"char_confidence": 0.1,
|
||||
}
|
||||
|
||||
# 3. Calculate weighted scores
|
||||
curr_score = (
|
||||
curr_length_score * weights["length"]
|
||||
+ curr_area_score * weights["area"]
|
||||
+ curr_conf_score * weights["avg_confidence"]
|
||||
+ curr_char_conf * weights["char_confidence"]
|
||||
)
|
||||
|
||||
prev_score = (
|
||||
prev_length_score * weights["length"]
|
||||
+ prev_area_score * weights["area"]
|
||||
+ prev_conf_score * weights["avg_confidence"]
|
||||
+ prev_char_conf * weights["char_confidence"]
|
||||
)
|
||||
|
||||
# 4. Log the comparison for debugging
|
||||
logger.debug(
|
||||
f"Plate comparison - Current plate: {top_plate} (score: {curr_score:.3f}) vs "
|
||||
f"Previous plate: {prev_plate} (score: {prev_score:.3f})\n"
|
||||
f"Metrics - Length: {len(top_plate)} vs {len(prev_plate)} (scores: {curr_length_score:.2f} vs {prev_length_score:.2f}), "
|
||||
f"Area: {top_area} vs {prev_area}, "
|
||||
f"Avg Conf: {avg_confidence:.2f} vs {prev_avg_confidence:.2f}"
|
||||
)
|
||||
|
||||
# 5. Return True if we should keep the previous plate (i.e., if it scores higher)
|
||||
return prev_score > curr_score
|
||||
|
||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||
"""Look for license plates in image."""
|
||||
start = datetime.datetime.now().timestamp()
|
||||
|
||||
id = obj_data["id"]
|
||||
|
||||
# don't run for non car objects
|
||||
if obj_data.get("label") != "car":
|
||||
logger.debug("Not a processing license plate for non car object.")
|
||||
return
|
||||
|
||||
# don't run for stationary car objects
|
||||
if obj_data.get("stationary") == True:
|
||||
logger.debug("Not a processing license plate for a stationary car object.")
|
||||
return
|
||||
|
||||
# don't overwrite sub label for objects that have a sub label
|
||||
# that is not a license plate
|
||||
if obj_data.get("sub_label") and id not in self.detected_license_plates:
|
||||
logger.debug(
|
||||
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
|
||||
)
|
||||
return
|
||||
|
||||
license_plate: Optional[dict[str, any]] = None
|
||||
|
||||
if self.requires_license_plate_detection:
|
||||
logger.debug("Running manual license_plate detection.")
|
||||
car_box = obj_data.get("box")
|
||||
|
||||
if not car_box:
|
||||
return
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
left, top, right, bottom = car_box
|
||||
car = rgb[top:bottom, left:right]
|
||||
|
||||
# double the size of the car for better box detection
|
||||
car = cv2.resize(car, (int(2 * car.shape[1]), int(2 * car.shape[0])))
|
||||
|
||||
if WRITE_DEBUG_IMAGES:
|
||||
current_time = int(datetime.datetime.now().timestamp())
|
||||
cv2.imwrite(
|
||||
f"debug/frames/car_frame_{current_time}.jpg",
|
||||
car,
|
||||
)
|
||||
|
||||
yolov9_start = datetime.datetime.now().timestamp()
|
||||
license_plate = self._detect_license_plate(car)
|
||||
logger.debug(
|
||||
f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
|
||||
)
|
||||
|
||||
if not license_plate:
|
||||
logger.debug("Detected no license plates for car object.")
|
||||
return
|
||||
|
||||
license_plate_area = max(
|
||||
0,
|
||||
(license_plate[2] - license_plate[0])
|
||||
* (license_plate[3] - license_plate[1]),
|
||||
)
|
||||
|
||||
# check that license plate is valid
|
||||
# double the value because we've doubled the size of the car
|
||||
if license_plate_area < self.config.lpr.min_area * 2:
|
||||
logger.debug("License plate is less than min_area")
|
||||
return
|
||||
|
||||
license_plate_frame = car[
|
||||
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
|
||||
]
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
if not obj_data.get("current_attributes"):
|
||||
logger.debug("No attributes to parse.")
|
||||
return
|
||||
|
||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||
for attr in attributes:
|
||||
if attr.get("label") != "license_plate":
|
||||
continue
|
||||
|
||||
if license_plate is None or attr.get("score", 0.0) > license_plate.get(
|
||||
"score", 0.0
|
||||
):
|
||||
license_plate = attr
|
||||
|
||||
# no license plates detected in this frame
|
||||
if not license_plate:
|
||||
return
|
||||
|
||||
if license_plate.get("score") < self.lpr_config.detection_threshold:
|
||||
logger.debug(
|
||||
f"Plate detection score is less than the threshold ({license_plate['score']:0.2f} < {self.lpr_config.detection_threshold})"
|
||||
)
|
||||
return
|
||||
|
||||
license_plate_box = license_plate.get("box")
|
||||
|
||||
# check that license plate is valid
|
||||
if (
|
||||
not license_plate_box
|
||||
or area(license_plate_box) < self.config.lpr.min_area
|
||||
):
|
||||
logger.debug(f"Invalid license plate box {license_plate}")
|
||||
return
|
||||
|
||||
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
license_plate_frame = license_plate_frame[
|
||||
license_plate_box[1] : license_plate_box[3],
|
||||
license_plate_box[0] : license_plate_box[2],
|
||||
]
|
||||
|
||||
# double the size of the license plate frame for better OCR
|
||||
license_plate_frame = cv2.resize(
|
||||
license_plate_frame,
|
||||
(
|
||||
int(2 * license_plate_frame.shape[1]),
|
||||
int(2 * license_plate_frame.shape[0]),
|
||||
),
|
||||
)
|
||||
|
||||
if WRITE_DEBUG_IMAGES:
|
||||
current_time = int(datetime.datetime.now().timestamp())
|
||||
cv2.imwrite(
|
||||
f"debug/frames/license_plate_frame_{current_time}.jpg",
|
||||
license_plate_frame,
|
||||
)
|
||||
|
||||
# run detection, returns results sorted by confidence, best first
|
||||
license_plates, confidences, areas = self._process_license_plate(
|
||||
license_plate_frame
|
||||
)
|
||||
|
||||
logger.debug(f"Text boxes: {license_plates}")
|
||||
logger.debug(f"Confidences: {confidences}")
|
||||
logger.debug(f"Areas: {areas}")
|
||||
|
||||
if license_plates:
|
||||
for plate, confidence, text_area in zip(license_plates, confidences, areas):
|
||||
avg_confidence = (
|
||||
(sum(confidence) / len(confidence)) if confidence else 0
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)"
|
||||
)
|
||||
else:
|
||||
# no plates found
|
||||
logger.debug("No text detected")
|
||||
return
|
||||
|
||||
top_plate, top_char_confidences, top_area = (
|
||||
license_plates[0],
|
||||
confidences[0],
|
||||
areas[0],
|
||||
)
|
||||
avg_confidence = (
|
||||
(sum(top_char_confidences) / len(top_char_confidences))
|
||||
if top_char_confidences
|
||||
else 0
|
||||
)
|
||||
|
||||
# Check if we have a previously detected plate for this ID
|
||||
if id in self.detected_license_plates:
|
||||
if self._should_keep_previous_plate(
|
||||
id, top_plate, top_char_confidences, top_area, avg_confidence
|
||||
):
|
||||
logger.debug("Keeping previous plate")
|
||||
return
|
||||
|
||||
# Check against minimum confidence threshold
|
||||
if avg_confidence < self.lpr_config.recognition_threshold:
|
||||
logger.debug(
|
||||
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})"
|
||||
)
|
||||
return
|
||||
|
||||
# Determine subLabel based on known plates, use regex matching
|
||||
# Default to the detected plate, use label name if there's a match
|
||||
sub_label = next(
|
||||
(
|
||||
label
|
||||
for label, plates in self.lpr_config.known_plates.items()
|
||||
if any(
|
||||
re.match(f"^{plate}$", top_plate)
|
||||
or distance(plate, top_plate) <= self.lpr_config.match_distance
|
||||
for plate in plates
|
||||
)
|
||||
),
|
||||
top_plate,
|
||||
)
|
||||
|
||||
# Send the result to the API
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": sub_label,
|
||||
"subLabelScore": avg_confidence,
|
||||
},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_license_plates[id] = {
|
||||
"plate": top_plate,
|
||||
"char_confidences": top_char_confidences,
|
||||
"area": top_area,
|
||||
}
|
||||
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
return
|
||||
|
||||
def expire_object(self, object_id: str):
|
||||
if object_id in self.detected_license_plates:
|
||||
self.detected_license_plates.pop(object_id)
|
||||
|
||||
|
||||
class CTCDecoder:
|
||||
"""
|
||||
@ -9,6 +9,7 @@ from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
from frigate.util.model import post_process_yolov9
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -22,7 +23,12 @@ class OvDetectorConfig(BaseDetectorConfig):
|
||||
|
||||
class OvDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
supported_models = [ModelTypeEnum.ssd, ModelTypeEnum.yolonas, ModelTypeEnum.yolox]
|
||||
supported_models = [
|
||||
ModelTypeEnum.ssd,
|
||||
ModelTypeEnum.yolonas,
|
||||
ModelTypeEnum.yolov9,
|
||||
ModelTypeEnum.yolox,
|
||||
]
|
||||
|
||||
def __init__(self, detector_config: OvDetectorConfig):
|
||||
self.ov_core = ov.Core()
|
||||
@ -160,8 +166,7 @@ class OvDetector(DetectionApi):
|
||||
|
||||
if self.model_invalid:
|
||||
return detections
|
||||
|
||||
if self.ov_model_type == ModelTypeEnum.ssd:
|
||||
elif self.ov_model_type == ModelTypeEnum.ssd:
|
||||
results = infer_request.get_output_tensor(0).data[0][0]
|
||||
|
||||
for i, (_, class_id, score, xmin, ymin, xmax, ymax) in enumerate(results):
|
||||
@ -176,8 +181,7 @@ class OvDetector(DetectionApi):
|
||||
xmax,
|
||||
]
|
||||
return detections
|
||||
|
||||
if self.ov_model_type == ModelTypeEnum.yolonas:
|
||||
elif self.ov_model_type == ModelTypeEnum.yolonas:
|
||||
predictions = infer_request.get_output_tensor(0).data
|
||||
|
||||
for i, prediction in enumerate(predictions):
|
||||
@ -196,8 +200,10 @@ class OvDetector(DetectionApi):
|
||||
x_max / self.w,
|
||||
]
|
||||
return detections
|
||||
|
||||
if self.ov_model_type == ModelTypeEnum.yolox:
|
||||
elif self.ov_model_type == ModelTypeEnum.yolov9:
|
||||
out_tensor = infer_request.get_output_tensor(0).data
|
||||
return post_process_yolov9(out_tensor, self.w, self.h)
|
||||
elif self.ov_model_type == ModelTypeEnum.yolox:
|
||||
out_tensor = infer_request.get_output_tensor()
|
||||
# [x, y, h, w, box_score, class_no_1, ..., class_no_80],
|
||||
results = out_tensor.data
|
||||
|
||||
@ -131,47 +131,6 @@ class Embeddings:
|
||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||
)
|
||||
|
||||
self.lpr_detection_model = None
|
||||
self.lpr_classification_model = None
|
||||
self.lpr_recognition_model = None
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.lpr_detection_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="detection.onnx",
|
||||
download_urls={
|
||||
"detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_detect,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.lpr_classification_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="classification.onnx",
|
||||
download_urls={
|
||||
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_classify,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.lpr_recognition_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="recognition.onnx",
|
||||
download_urls={
|
||||
"recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_recognize,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
def embed_thumbnail(
|
||||
self, event_id: str, thumbnail: bytes, upsert: bool = True
|
||||
) -> ndarray:
|
||||
|
||||
@ -5,6 +5,7 @@ from enum import Enum
|
||||
from io import BytesIO
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
from PIL import Image
|
||||
@ -32,6 +33,7 @@ disable_progress_bar()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
FACE_EMBEDDING_SIZE = 160
|
||||
LPR_EMBEDDING_SIZE = 256
|
||||
|
||||
|
||||
class ModelTypeEnum(str, Enum):
|
||||
@ -41,6 +43,7 @@ class ModelTypeEnum(str, Enum):
|
||||
lpr_detect = "lpr_detect"
|
||||
lpr_classify = "lpr_classify"
|
||||
lpr_recognize = "lpr_recognize"
|
||||
yolov9_lpr_detect = "yolov9_lpr_detect"
|
||||
|
||||
|
||||
class GenericONNXEmbedding:
|
||||
@ -148,6 +151,8 @@ class GenericONNXEmbedding:
|
||||
self.feature_extractor = []
|
||||
elif self.model_type == ModelTypeEnum.lpr_recognize:
|
||||
self.feature_extractor = []
|
||||
elif self.model_type == ModelTypeEnum.yolov9_lpr_detect:
|
||||
self.feature_extractor = []
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
@ -237,6 +242,45 @@ class GenericONNXEmbedding:
|
||||
for img in raw_inputs:
|
||||
processed.append({"x": img})
|
||||
return processed
|
||||
elif self.model_type == ModelTypeEnum.yolov9_lpr_detect:
|
||||
if isinstance(raw_inputs, list):
|
||||
raise ValueError(
|
||||
"License plate embedding does not support batch inputs."
|
||||
)
|
||||
# Get image as numpy array
|
||||
img = self._process_image(raw_inputs)
|
||||
height, width, channels = img.shape
|
||||
|
||||
# Resize maintaining aspect ratio
|
||||
if width > height:
|
||||
new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4)
|
||||
img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height))
|
||||
else:
|
||||
new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4)
|
||||
img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE))
|
||||
|
||||
# Get new dimensions after resize
|
||||
og_h, og_w, channels = img.shape
|
||||
|
||||
# Create black square frame
|
||||
frame = np.full(
|
||||
(LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels),
|
||||
(0, 0, 0),
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
# Center the resized image in the square frame
|
||||
x_center = (LPR_EMBEDDING_SIZE - og_w) // 2
|
||||
y_center = (LPR_EMBEDDING_SIZE - og_h) // 2
|
||||
frame[y_center : y_center + og_h, x_center : x_center + og_w] = img
|
||||
|
||||
# Normalize to 0-1
|
||||
frame = frame / 255.0
|
||||
|
||||
# Convert from HWC to CHW format and add batch dimension
|
||||
frame = np.transpose(frame, (2, 0, 1))
|
||||
frame = np.expand_dims(frame, axis=0)
|
||||
return [{"images": frame}]
|
||||
else:
|
||||
raise ValueError(f"Unable to preprocess inputs for {self.model_type}")
|
||||
|
||||
|
||||
@ -1,10 +1,8 @@
|
||||
"""Maintain embeddings in SQLite-vec."""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
@ -12,7 +10,6 @@ from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
from peewee import DoesNotExist
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
@ -26,20 +23,21 @@ from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
FRIGATE_LOCALHOST,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
)
|
||||
from frigate.data_processing.real_time.api import RealTimeProcessorApi
|
||||
from frigate.data_processing.real_time.bird_processor import BirdProcessor
|
||||
from frigate.data_processing.real_time.face_processor import FaceProcessor
|
||||
from frigate.data_processing.real_time.license_plate_processor import (
|
||||
LicensePlateProcessor,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.embeddings.lpr.lpr import LicensePlateRecognition
|
||||
from frigate.events.types import EventTypeEnum
|
||||
from frigate.genai import get_genai_client
|
||||
from frigate.models import Event
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.image import SharedMemoryFrameManager, area, calculate_region
|
||||
from frigate.util.image import SharedMemoryFrameManager, calculate_region
|
||||
|
||||
from .embeddings import Embeddings
|
||||
|
||||
@ -82,24 +80,15 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if self.config.classification.bird.enabled:
|
||||
self.processors.append(BirdProcessor(self.config, metrics))
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.processors.append(LicensePlateProcessor(self.config, metrics))
|
||||
|
||||
# create communication for updating event descriptions
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.stop_event = stop_event
|
||||
self.tracked_events: dict[str, list[any]] = {}
|
||||
self.genai_client = get_genai_client(config)
|
||||
|
||||
# set license plate recognition conditions
|
||||
self.lpr_config = self.config.lpr
|
||||
self.requires_license_plate_detection = (
|
||||
"license_plate" not in self.config.objects.all_objects
|
||||
)
|
||||
self.detected_license_plates: dict[str, dict[str, any]] = {}
|
||||
|
||||
if self.lpr_config.enabled:
|
||||
self.license_plate_recognition = LicensePlateRecognition(
|
||||
self.lpr_config, self.requestor, self.embeddings
|
||||
)
|
||||
|
||||
def run(self) -> None:
|
||||
"""Maintain a SQLite-vec database for semantic search."""
|
||||
while not self.stop_event.is_set():
|
||||
@ -164,11 +153,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
# no need to process updated objects if face recognition, lpr, genai are disabled
|
||||
if (
|
||||
not camera_config.genai.enabled
|
||||
and not self.lpr_config.enabled
|
||||
and len(self.processors) == 0
|
||||
):
|
||||
if not camera_config.genai.enabled and len(self.processors) == 0:
|
||||
return
|
||||
|
||||
# Create our own thumbnail based on the bounding box and the frame time
|
||||
@ -188,16 +173,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
for processor in self.processors:
|
||||
processor.process_frame(data, yuv_frame)
|
||||
|
||||
if self.lpr_config.enabled:
|
||||
start = datetime.datetime.now().timestamp()
|
||||
processed = self._process_license_plate(data, yuv_frame)
|
||||
|
||||
if processed:
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.metrics.alpr_pps.value = (
|
||||
self.metrics.alpr_pps.value * 9 + duration
|
||||
) / 10
|
||||
|
||||
# no need to save our own thumbnails if genai is not enabled
|
||||
# or if the object has become stationary
|
||||
if self.genai_client is not None and not data["stationary"]:
|
||||
@ -229,9 +204,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
for processor in self.processors:
|
||||
processor.expire_object(event_id)
|
||||
|
||||
if event_id in self.detected_license_plates:
|
||||
self.detected_license_plates.pop(event_id)
|
||||
|
||||
if updated_db:
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
@ -354,199 +326,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if event_id:
|
||||
self.handle_regenerate_description(event_id, source)
|
||||
|
||||
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
"""Return the dimensions of the input image as [x, y, width, height]."""
|
||||
height, width = input.shape[:2]
|
||||
return (0, 0, width, height)
|
||||
|
||||
def _process_license_plate(
|
||||
self, obj_data: dict[str, any], frame: np.ndarray
|
||||
) -> bool:
|
||||
"""Look for license plates in image."""
|
||||
id = obj_data["id"]
|
||||
|
||||
# don't run for non car objects
|
||||
if obj_data.get("label") != "car":
|
||||
logger.debug("Not a processing license plate for non car object.")
|
||||
return False
|
||||
|
||||
# don't run for stationary car objects
|
||||
if obj_data.get("stationary") == True:
|
||||
logger.debug("Not a processing license plate for a stationary car object.")
|
||||
return False
|
||||
|
||||
# don't overwrite sub label for objects that have a sub label
|
||||
# that is not a license plate
|
||||
if obj_data.get("sub_label") and id not in self.detected_license_plates:
|
||||
logger.debug(
|
||||
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
|
||||
)
|
||||
return False
|
||||
|
||||
license_plate: Optional[dict[str, any]] = None
|
||||
|
||||
if self.requires_license_plate_detection:
|
||||
logger.debug("Running manual license_plate detection.")
|
||||
car_box = obj_data.get("box")
|
||||
|
||||
if not car_box:
|
||||
return False
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
left, top, right, bottom = car_box
|
||||
car = rgb[top:bottom, left:right]
|
||||
license_plate = self._detect_license_plate(car)
|
||||
|
||||
if not license_plate:
|
||||
logger.debug("Detected no license plates for car object.")
|
||||
return False
|
||||
|
||||
license_plate_frame = car[
|
||||
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
|
||||
]
|
||||
license_plate_frame = cv2.cvtColor(license_plate_frame, cv2.COLOR_RGB2BGR)
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
if not obj_data.get("current_attributes"):
|
||||
logger.debug("No attributes to parse.")
|
||||
return False
|
||||
|
||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||
for attr in attributes:
|
||||
if attr.get("label") != "license_plate":
|
||||
continue
|
||||
|
||||
if license_plate is None or attr.get("score", 0.0) > license_plate.get(
|
||||
"score", 0.0
|
||||
):
|
||||
license_plate = attr
|
||||
|
||||
# no license plates detected in this frame
|
||||
if not license_plate:
|
||||
return False
|
||||
|
||||
license_plate_box = license_plate.get("box")
|
||||
|
||||
# check that license plate is valid
|
||||
if (
|
||||
not license_plate_box
|
||||
or area(license_plate_box) < self.config.lpr.min_area
|
||||
):
|
||||
logger.debug(f"Invalid license plate box {license_plate}")
|
||||
return False
|
||||
|
||||
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
license_plate_frame = license_plate_frame[
|
||||
license_plate_box[1] : license_plate_box[3],
|
||||
license_plate_box[0] : license_plate_box[2],
|
||||
]
|
||||
|
||||
# run detection, returns results sorted by confidence, best first
|
||||
license_plates, confidences, areas = (
|
||||
self.license_plate_recognition.process_license_plate(license_plate_frame)
|
||||
)
|
||||
|
||||
logger.debug(f"Text boxes: {license_plates}")
|
||||
logger.debug(f"Confidences: {confidences}")
|
||||
logger.debug(f"Areas: {areas}")
|
||||
|
||||
if license_plates:
|
||||
for plate, confidence, text_area in zip(license_plates, confidences, areas):
|
||||
avg_confidence = (
|
||||
(sum(confidence) / len(confidence)) if confidence else 0
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)"
|
||||
)
|
||||
else:
|
||||
# no plates found
|
||||
logger.debug("No text detected")
|
||||
return True
|
||||
|
||||
top_plate, top_char_confidences, top_area = (
|
||||
license_plates[0],
|
||||
confidences[0],
|
||||
areas[0],
|
||||
)
|
||||
avg_confidence = (
|
||||
(sum(top_char_confidences) / len(top_char_confidences))
|
||||
if top_char_confidences
|
||||
else 0
|
||||
)
|
||||
|
||||
# Check if we have a previously detected plate for this ID
|
||||
if id in self.detected_license_plates:
|
||||
prev_plate = self.detected_license_plates[id]["plate"]
|
||||
prev_char_confidences = self.detected_license_plates[id]["char_confidences"]
|
||||
prev_area = self.detected_license_plates[id]["area"]
|
||||
prev_avg_confidence = (
|
||||
(sum(prev_char_confidences) / len(prev_char_confidences))
|
||||
if prev_char_confidences
|
||||
else 0
|
||||
)
|
||||
|
||||
# Define conditions for keeping the previous plate
|
||||
shorter_than_previous = len(top_plate) < len(prev_plate)
|
||||
lower_avg_confidence = avg_confidence <= prev_avg_confidence
|
||||
smaller_area = top_area < prev_area
|
||||
|
||||
# Compare character-by-character confidence where possible
|
||||
min_length = min(len(top_plate), len(prev_plate))
|
||||
char_confidence_comparison = sum(
|
||||
1
|
||||
for i in range(min_length)
|
||||
if top_char_confidences[i] <= prev_char_confidences[i]
|
||||
)
|
||||
worse_char_confidences = char_confidence_comparison >= min_length / 2
|
||||
|
||||
if (shorter_than_previous or smaller_area) and (
|
||||
lower_avg_confidence and worse_char_confidences
|
||||
):
|
||||
logger.debug(
|
||||
f"Keeping previous plate. New plate stats: "
|
||||
f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} "
|
||||
f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}"
|
||||
)
|
||||
return True
|
||||
|
||||
# Check against minimum confidence threshold
|
||||
if avg_confidence < self.lpr_config.threshold:
|
||||
logger.debug(
|
||||
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})"
|
||||
)
|
||||
return True
|
||||
|
||||
# Determine subLabel based on known plates, use regex matching
|
||||
# Default to the detected plate, use label name if there's a match
|
||||
sub_label = next(
|
||||
(
|
||||
label
|
||||
for label, plates in self.lpr_config.known_plates.items()
|
||||
if any(re.match(f"^{plate}$", top_plate) for plate in plates)
|
||||
),
|
||||
top_plate,
|
||||
)
|
||||
|
||||
# Send the result to the API
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": sub_label,
|
||||
"subLabelScore": avg_confidence,
|
||||
},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_license_plates[id] = {
|
||||
"plate": top_plate,
|
||||
"char_confidences": top_char_confidences,
|
||||
"area": top_area,
|
||||
}
|
||||
|
||||
return True
|
||||
|
||||
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
||||
"""Return jpg thumbnail of a region of the frame."""
|
||||
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
|
||||
|
||||
@ -187,7 +187,7 @@ class EventProcessor(threading.Thread):
|
||||
)
|
||||
|
||||
# keep these from being set back to false because the event
|
||||
# may have started while recordings and snapshots were enabled
|
||||
# may have started while recordings/snapshots/alerts/detections were enabled
|
||||
# this would be an issue for long running events
|
||||
if self.events_in_process[event_data["id"]]["has_clip"]:
|
||||
event_data["has_clip"] = True
|
||||
|
||||
@ -5,6 +5,7 @@ import imutils
|
||||
import numpy as np
|
||||
from scipy.ndimage import gaussian_filter
|
||||
|
||||
from frigate.camera import PTZMetrics
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.config import MotionConfig
|
||||
from frigate.motion import MotionDetector
|
||||
@ -18,6 +19,7 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
frame_shape,
|
||||
config: MotionConfig,
|
||||
fps: int,
|
||||
ptz_metrics: PTZMetrics = None,
|
||||
name="improved",
|
||||
blur_radius=1,
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
@ -48,6 +50,8 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
self.contrast_values[:, 1:2] = 255
|
||||
self.contrast_values_index = 0
|
||||
self.config_subscriber = ConfigSubscriber(f"config/motion/{name}")
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.last_stop_time = None
|
||||
|
||||
def is_calibrating(self):
|
||||
return self.calibrating
|
||||
@ -64,6 +68,21 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
if not self.config.enabled:
|
||||
return motion_boxes
|
||||
|
||||
# if ptz motor is moving from autotracking, quickly return
|
||||
# a single box that is 80% of the frame
|
||||
if (
|
||||
self.ptz_metrics.autotracker_enabled.value
|
||||
and not self.ptz_metrics.motor_stopped.is_set()
|
||||
):
|
||||
return [
|
||||
(
|
||||
int(self.frame_shape[1] * 0.1),
|
||||
int(self.frame_shape[0] * 0.1),
|
||||
int(self.frame_shape[1] * 0.9),
|
||||
int(self.frame_shape[0] * 0.9),
|
||||
)
|
||||
]
|
||||
|
||||
gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
|
||||
|
||||
# resize frame
|
||||
@ -151,6 +170,25 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
self.motion_frame_size[0] * self.motion_frame_size[1]
|
||||
)
|
||||
|
||||
# check if the motor has just stopped from autotracking
|
||||
# if so, reassign the average to the current frame so we begin with a new baseline
|
||||
if (
|
||||
# ensure we only do this for cameras with autotracking enabled
|
||||
self.ptz_metrics.autotracker_enabled.value
|
||||
and self.ptz_metrics.motor_stopped.is_set()
|
||||
and (
|
||||
self.last_stop_time is None
|
||||
or self.ptz_metrics.stop_time.value != self.last_stop_time
|
||||
)
|
||||
# value is 0 on startup or when motor is moving
|
||||
and self.ptz_metrics.stop_time.value != 0
|
||||
):
|
||||
self.last_stop_time = self.ptz_metrics.stop_time.value
|
||||
|
||||
self.avg_frame = resized_frame.astype(np.float32)
|
||||
motion_boxes = []
|
||||
pct_motion = 0
|
||||
|
||||
# once the motion is less than 5% and the number of contours is < 4, assume its calibrated
|
||||
if pct_motion < 0.05 and len(motion_boxes) <= 4:
|
||||
self.calibrating = False
|
||||
|
||||
@ -406,7 +406,7 @@ class CameraState:
|
||||
|
||||
if current_frame is not None:
|
||||
self.current_frame_time = frame_time
|
||||
self._current_frame = current_frame
|
||||
self._current_frame = np.copy(current_frame)
|
||||
|
||||
if self.previous_frame_id is not None:
|
||||
self.frame_manager.close(self.previous_frame_id)
|
||||
|
||||
@ -465,7 +465,6 @@ class OnvifController:
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
self.ptz_metrics[camera_name].motor_stopped.clear()
|
||||
self.ptz_metrics[camera_name].start_time.value = 0
|
||||
self.ptz_metrics[camera_name].stop_time.value = 0
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
|
||||
@ -148,7 +148,8 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
|
||||
# create communication for review segments
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.config_subscriber = ConfigSubscriber("config/record/")
|
||||
self.record_config_subscriber = ConfigSubscriber("config/record/")
|
||||
self.review_config_subscriber = ConfigSubscriber("config/review/")
|
||||
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all)
|
||||
|
||||
# manual events
|
||||
@ -226,6 +227,13 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
)
|
||||
self.active_review_segments[segment.camera] = None
|
||||
|
||||
def end_segment(self, camera: str) -> None:
|
||||
"""End the pending segment for a camera."""
|
||||
segment = self.active_review_segments.get(camera)
|
||||
if segment:
|
||||
prev_data = segment.get_data(False)
|
||||
self._publish_segment_end(segment, prev_data)
|
||||
|
||||
def update_existing_segment(
|
||||
self,
|
||||
segment: PendingReviewSegment,
|
||||
@ -273,6 +281,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
& set(camera_config.review.alerts.required_zones)
|
||||
)
|
||||
)
|
||||
and camera_config.review.alerts.enabled
|
||||
):
|
||||
segment.severity = SeverityEnum.alert
|
||||
should_update = True
|
||||
@ -369,13 +378,14 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
& set(camera_config.review.alerts.required_zones)
|
||||
)
|
||||
)
|
||||
and camera_config.review.alerts.enabled
|
||||
):
|
||||
severity = SeverityEnum.alert
|
||||
|
||||
# if object is detection label
|
||||
# and review is not already a detection or alert
|
||||
# and has entered required zones or required zones is not set
|
||||
# mark this review as alert
|
||||
# mark this review as detection
|
||||
if (
|
||||
not severity
|
||||
and (
|
||||
@ -390,6 +400,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
& set(camera_config.review.detections.required_zones)
|
||||
)
|
||||
)
|
||||
and camera_config.review.detections.enabled
|
||||
):
|
||||
severity = SeverityEnum.detection
|
||||
|
||||
@ -430,15 +441,25 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
# check if there is an updated config
|
||||
while True:
|
||||
(
|
||||
updated_topic,
|
||||
updated_record_topic,
|
||||
updated_record_config,
|
||||
) = self.config_subscriber.check_for_update()
|
||||
) = self.record_config_subscriber.check_for_update()
|
||||
|
||||
if not updated_topic:
|
||||
(
|
||||
updated_review_topic,
|
||||
updated_review_config,
|
||||
) = self.review_config_subscriber.check_for_update()
|
||||
|
||||
if not updated_record_topic and not updated_review_topic:
|
||||
break
|
||||
|
||||
camera_name = updated_topic.rpartition("/")[-1]
|
||||
self.config.cameras[camera_name].record = updated_record_config
|
||||
if updated_record_topic:
|
||||
camera_name = updated_record_topic.rpartition("/")[-1]
|
||||
self.config.cameras[camera_name].record = updated_record_config
|
||||
|
||||
if updated_review_topic:
|
||||
camera_name = updated_review_topic.rpartition("/")[-1]
|
||||
self.config.cameras[camera_name].review = updated_review_config
|
||||
|
||||
(topic, data) = self.detection_subscriber.check_for_update(timeout=1)
|
||||
|
||||
@ -475,12 +496,22 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
|
||||
if not self.config.cameras[camera].record.enabled:
|
||||
if current_segment:
|
||||
self.update_existing_segment(
|
||||
current_segment, frame_name, frame_time, []
|
||||
)
|
||||
|
||||
self.end_segment(camera)
|
||||
continue
|
||||
|
||||
# Check if the current segment should be processed based on enabled settings
|
||||
if current_segment:
|
||||
if (
|
||||
current_segment.severity == SeverityEnum.alert
|
||||
and not self.config.cameras[camera].review.alerts.enabled
|
||||
) or (
|
||||
current_segment.severity == SeverityEnum.detection
|
||||
and not self.config.cameras[camera].review.detections.enabled
|
||||
):
|
||||
self.end_segment(camera)
|
||||
continue
|
||||
|
||||
# If we reach here, the segment can be processed (if it exists)
|
||||
if current_segment is not None:
|
||||
if topic == DetectionTypeEnum.video:
|
||||
self.update_existing_segment(
|
||||
@ -496,20 +527,24 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
current_segment.last_update = frame_time
|
||||
|
||||
for audio in audio_detections:
|
||||
if audio in camera_config.review.alerts.labels:
|
||||
if (
|
||||
audio in camera_config.review.alerts.labels
|
||||
and camera_config.review.alerts.enabled
|
||||
):
|
||||
current_segment.audio.add(audio)
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
elif (
|
||||
camera_config.review.detections.labels is None
|
||||
or audio in camera_config.review.detections.labels
|
||||
):
|
||||
) and camera_config.review.detections.enabled:
|
||||
current_segment.audio.add(audio)
|
||||
elif topic == DetectionTypeEnum.api:
|
||||
if manual_info["state"] == ManualEventState.complete:
|
||||
current_segment.detections[manual_info["event_id"]] = (
|
||||
manual_info["label"]
|
||||
)
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
if self.config.cameras[camera].review.alerts.enabled:
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
current_segment.last_update = manual_info["end_time"]
|
||||
elif manual_info["state"] == ManualEventState.start:
|
||||
self.indefinite_events[camera][manual_info["event_id"]] = (
|
||||
@ -518,7 +553,8 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
current_segment.detections[manual_info["event_id"]] = (
|
||||
manual_info["label"]
|
||||
)
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
if self.config.cameras[camera].review.alerts.enabled:
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
|
||||
# temporarily make it so this event can not end
|
||||
current_segment.last_update = sys.maxsize
|
||||
@ -536,12 +572,16 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
)
|
||||
else:
|
||||
if topic == DetectionTypeEnum.video:
|
||||
self.check_if_new_segment(
|
||||
camera,
|
||||
frame_name,
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
)
|
||||
if (
|
||||
self.config.cameras[camera].review.alerts.enabled
|
||||
or self.config.cameras[camera].review.detections.enabled
|
||||
):
|
||||
self.check_if_new_segment(
|
||||
camera,
|
||||
frame_name,
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
)
|
||||
elif topic == DetectionTypeEnum.audio and len(audio_detections) > 0:
|
||||
severity = None
|
||||
|
||||
@ -549,13 +589,16 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
detections = set()
|
||||
|
||||
for audio in audio_detections:
|
||||
if audio in camera_config.review.alerts.labels:
|
||||
if (
|
||||
audio in camera_config.review.alerts.labels
|
||||
and camera_config.review.alerts.enabled
|
||||
):
|
||||
detections.add(audio)
|
||||
severity = SeverityEnum.alert
|
||||
elif (
|
||||
camera_config.review.detections.labels is None
|
||||
or audio in camera_config.review.detections.labels
|
||||
):
|
||||
) and camera_config.review.detections.enabled:
|
||||
detections.add(audio)
|
||||
|
||||
if not severity:
|
||||
@ -572,28 +615,36 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
detections,
|
||||
)
|
||||
elif topic == DetectionTypeEnum.api:
|
||||
self.active_review_segments[camera] = PendingReviewSegment(
|
||||
camera,
|
||||
frame_time,
|
||||
SeverityEnum.alert,
|
||||
{manual_info["event_id"]: manual_info["label"]},
|
||||
{},
|
||||
[],
|
||||
set(),
|
||||
)
|
||||
|
||||
if manual_info["state"] == ManualEventState.start:
|
||||
self.indefinite_events[camera][manual_info["event_id"]] = (
|
||||
manual_info["label"]
|
||||
if self.config.cameras[camera].review.alerts.enabled:
|
||||
self.active_review_segments[camera] = PendingReviewSegment(
|
||||
camera,
|
||||
frame_time,
|
||||
SeverityEnum.alert,
|
||||
{manual_info["event_id"]: manual_info["label"]},
|
||||
{},
|
||||
[],
|
||||
set(),
|
||||
)
|
||||
# temporarily make it so this event can not end
|
||||
self.active_review_segments[camera].last_update = sys.maxsize
|
||||
elif manual_info["state"] == ManualEventState.complete:
|
||||
self.active_review_segments[camera].last_update = manual_info[
|
||||
"end_time"
|
||||
]
|
||||
|
||||
self.config_subscriber.stop()
|
||||
if manual_info["state"] == ManualEventState.start:
|
||||
self.indefinite_events[camera][manual_info["event_id"]] = (
|
||||
manual_info["label"]
|
||||
)
|
||||
# temporarily make it so this event can not end
|
||||
self.active_review_segments[
|
||||
camera
|
||||
].last_update = sys.maxsize
|
||||
elif manual_info["state"] == ManualEventState.complete:
|
||||
self.active_review_segments[
|
||||
camera
|
||||
].last_update = manual_info["end_time"]
|
||||
else:
|
||||
logger.warning(
|
||||
f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert."
|
||||
)
|
||||
|
||||
self.record_config_subscriber.stop()
|
||||
self.review_config_subscriber.stop()
|
||||
self.requestor.stop()
|
||||
self.detection_subscriber.stop()
|
||||
logger.info("Exiting review maintainer...")
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import logging
|
||||
import random
|
||||
import string
|
||||
from typing import Sequence
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from norfair import (
|
||||
Detection,
|
||||
@ -11,12 +13,19 @@ from norfair import (
|
||||
draw_boxes,
|
||||
)
|
||||
from norfair.drawing.drawer import Drawer
|
||||
from rich import print
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from frigate.camera import PTZMetrics
|
||||
from frigate.config import CameraConfig
|
||||
from frigate.ptz.autotrack import PtzMotionEstimator
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.util.image import intersection_over_union
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
get_histogram,
|
||||
intersection_over_union,
|
||||
)
|
||||
from frigate.util.object import average_boxes, median_of_boxes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -71,12 +80,36 @@ def frigate_distance(detection: Detection, tracked_object) -> float:
|
||||
return distance(detection.points, tracked_object.estimate)
|
||||
|
||||
|
||||
def histogram_distance(matched_not_init_trackers, unmatched_trackers):
|
||||
snd_embedding = unmatched_trackers.last_detection.embedding
|
||||
|
||||
if snd_embedding is None:
|
||||
for detection in reversed(unmatched_trackers.past_detections):
|
||||
if detection.embedding is not None:
|
||||
snd_embedding = detection.embedding
|
||||
break
|
||||
else:
|
||||
return 1
|
||||
|
||||
for detection_fst in matched_not_init_trackers.past_detections:
|
||||
if detection_fst.embedding is None:
|
||||
continue
|
||||
|
||||
distance = 1 - cv2.compareHist(
|
||||
snd_embedding, detection_fst.embedding, cv2.HISTCMP_CORREL
|
||||
)
|
||||
if distance < 0.5:
|
||||
return distance
|
||||
return 1
|
||||
|
||||
|
||||
class NorfairTracker(ObjectTracker):
|
||||
def __init__(
|
||||
self,
|
||||
config: CameraConfig,
|
||||
ptz_metrics: PTZMetrics,
|
||||
):
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.tracked_objects = {}
|
||||
self.untracked_object_boxes: list[list[int]] = []
|
||||
self.disappeared = {}
|
||||
@ -88,26 +121,137 @@ class NorfairTracker(ObjectTracker):
|
||||
self.ptz_motion_estimator = {}
|
||||
self.camera_name = config.name
|
||||
self.track_id_map = {}
|
||||
# TODO: could also initialize a tracker per object class if there
|
||||
# was a good reason to have different distance calculations
|
||||
self.tracker = Tracker(
|
||||
distance_function=frigate_distance,
|
||||
distance_threshold=2.5,
|
||||
initialization_delay=self.detect_config.min_initialized,
|
||||
hit_counter_max=self.detect_config.max_disappeared,
|
||||
# use default filter factory with custom values
|
||||
# R is the multiplier for the sensor measurement noise matrix, default of 4.0
|
||||
# lowering R means that we trust the position of the bounding boxes more
|
||||
# testing shows that the prediction was being relied on a bit too much
|
||||
# TODO: could use different kalman filter values along with
|
||||
# the different tracker per object class
|
||||
filter_factory=OptimizedKalmanFilterFactory(R=3.4),
|
||||
)
|
||||
|
||||
# Define tracker configurations for static camera
|
||||
self.object_type_configs = {
|
||||
"car": {
|
||||
"filter_factory": OptimizedKalmanFilterFactory(R=3.4, Q=0.03),
|
||||
"distance_function": frigate_distance,
|
||||
"distance_threshold": 2.5,
|
||||
},
|
||||
}
|
||||
|
||||
# Define autotracking PTZ-specific configurations
|
||||
self.ptz_object_type_configs = {
|
||||
"person": {
|
||||
"filter_factory": OptimizedKalmanFilterFactory(
|
||||
R=4.5,
|
||||
Q=0.25,
|
||||
),
|
||||
"distance_function": frigate_distance,
|
||||
"distance_threshold": 2,
|
||||
"past_detections_length": 5,
|
||||
"reid_distance_function": histogram_distance,
|
||||
"reid_distance_threshold": 0.5,
|
||||
"reid_hit_counter_max": 10,
|
||||
},
|
||||
}
|
||||
|
||||
# Default tracker configuration
|
||||
# use default filter factory with custom values
|
||||
# R is the multiplier for the sensor measurement noise matrix, default of 4.0
|
||||
# lowering R means that we trust the position of the bounding boxes more
|
||||
# testing shows that the prediction was being relied on a bit too much
|
||||
self.default_tracker_config = {
|
||||
"filter_factory": OptimizedKalmanFilterFactory(R=3.4),
|
||||
"distance_function": frigate_distance,
|
||||
"distance_threshold": 2.5,
|
||||
}
|
||||
|
||||
self.default_ptz_tracker_config = {
|
||||
"filter_factory": OptimizedKalmanFilterFactory(R=4, Q=0.2),
|
||||
"distance_function": frigate_distance,
|
||||
"distance_threshold": 3,
|
||||
}
|
||||
|
||||
self.trackers = {}
|
||||
# Handle static trackers
|
||||
for obj_type, tracker_config in self.object_type_configs.items():
|
||||
if obj_type in self.camera_config.objects.track:
|
||||
if obj_type not in self.trackers:
|
||||
self.trackers[obj_type] = {}
|
||||
self.trackers[obj_type]["static"] = self._create_tracker(
|
||||
obj_type, tracker_config
|
||||
)
|
||||
|
||||
# Handle PTZ trackers
|
||||
for obj_type, tracker_config in self.ptz_object_type_configs.items():
|
||||
if (
|
||||
obj_type in self.camera_config.onvif.autotracking.track
|
||||
and self.camera_config.onvif.autotracking.enabled_in_config
|
||||
):
|
||||
if obj_type not in self.trackers:
|
||||
self.trackers[obj_type] = {}
|
||||
self.trackers[obj_type]["ptz"] = self._create_tracker(
|
||||
obj_type, tracker_config
|
||||
)
|
||||
|
||||
# Initialize default trackers
|
||||
self.default_tracker = {
|
||||
"static": Tracker(
|
||||
distance_function=frigate_distance,
|
||||
distance_threshold=self.default_tracker_config["distance_threshold"],
|
||||
initialization_delay=self.detect_config.min_initialized,
|
||||
hit_counter_max=self.detect_config.max_disappeared,
|
||||
filter_factory=self.default_tracker_config["filter_factory"],
|
||||
),
|
||||
"ptz": Tracker(
|
||||
distance_function=frigate_distance,
|
||||
distance_threshold=self.default_ptz_tracker_config[
|
||||
"distance_threshold"
|
||||
],
|
||||
initialization_delay=self.detect_config.min_initialized,
|
||||
hit_counter_max=self.detect_config.max_disappeared,
|
||||
filter_factory=self.default_ptz_tracker_config["filter_factory"],
|
||||
),
|
||||
}
|
||||
|
||||
if self.ptz_metrics.autotracker_enabled.value:
|
||||
self.ptz_motion_estimator = PtzMotionEstimator(
|
||||
self.camera_config, self.ptz_metrics
|
||||
)
|
||||
|
||||
def _create_tracker(self, obj_type, tracker_config):
|
||||
"""Helper function to create a tracker with given configuration."""
|
||||
tracker_params = {
|
||||
"distance_function": tracker_config["distance_function"],
|
||||
"distance_threshold": tracker_config["distance_threshold"],
|
||||
"initialization_delay": self.detect_config.min_initialized,
|
||||
"hit_counter_max": self.detect_config.max_disappeared,
|
||||
"filter_factory": tracker_config["filter_factory"],
|
||||
}
|
||||
|
||||
# Add reid parameters if max_frames is None
|
||||
if (
|
||||
self.detect_config.stationary.max_frames.objects.get(
|
||||
obj_type, self.detect_config.stationary.max_frames.default
|
||||
)
|
||||
is None
|
||||
):
|
||||
reid_keys = [
|
||||
"past_detections_length",
|
||||
"reid_distance_function",
|
||||
"reid_distance_threshold",
|
||||
"reid_hit_counter_max",
|
||||
]
|
||||
tracker_params.update(
|
||||
{key: tracker_config[key] for key in reid_keys if key in tracker_config}
|
||||
)
|
||||
|
||||
return Tracker(**tracker_params)
|
||||
|
||||
def get_tracker(self, object_type: str) -> Tracker:
|
||||
"""Get the appropriate tracker based on object type and camera mode."""
|
||||
mode = (
|
||||
"ptz"
|
||||
if self.camera_config.onvif.autotracking.enabled_in_config
|
||||
and object_type in self.camera_config.onvif.autotracking.track
|
||||
else "static"
|
||||
)
|
||||
if object_type in self.trackers:
|
||||
return self.trackers[object_type][mode]
|
||||
return self.default_tracker[mode]
|
||||
|
||||
def register(self, track_id, obj):
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
id = f"{obj['frame_time']}-{rand_id}"
|
||||
@ -116,10 +260,13 @@ class NorfairTracker(ObjectTracker):
|
||||
obj["start_time"] = obj["frame_time"]
|
||||
obj["motionless_count"] = 0
|
||||
obj["position_changes"] = 0
|
||||
|
||||
# Get the correct tracker for this object's label
|
||||
tracker = self.get_tracker(obj["label"])
|
||||
obj["score_history"] = [
|
||||
p.data["score"]
|
||||
for p in next(
|
||||
(o for o in self.tracker.tracked_objects if o.global_id == track_id)
|
||||
(o for o in tracker.tracked_objects if o.global_id == track_id)
|
||||
).past_detections
|
||||
]
|
||||
self.tracked_objects[id] = obj
|
||||
@ -137,11 +284,25 @@ class NorfairTracker(ObjectTracker):
|
||||
self.stationary_box_history[id] = []
|
||||
|
||||
def deregister(self, id, track_id):
|
||||
obj = self.tracked_objects[id]
|
||||
|
||||
del self.tracked_objects[id]
|
||||
del self.disappeared[id]
|
||||
self.tracker.tracked_objects = [
|
||||
o for o in self.tracker.tracked_objects if o.global_id != track_id
|
||||
]
|
||||
|
||||
# only manually deregister objects from norfair's list if max_frames is defined
|
||||
if (
|
||||
self.detect_config.stationary.max_frames.objects.get(
|
||||
obj["label"], self.detect_config.stationary.max_frames.default
|
||||
)
|
||||
is not None
|
||||
):
|
||||
tracker = self.get_tracker(obj["label"])
|
||||
tracker.tracked_objects = [
|
||||
o
|
||||
for o in tracker.tracked_objects
|
||||
if o.global_id != track_id and o.hit_counter < 0
|
||||
]
|
||||
|
||||
del self.track_id_map[track_id]
|
||||
|
||||
# tracks the current position of the object based on the last N bounding boxes
|
||||
@ -287,9 +448,13 @@ class NorfairTracker(ObjectTracker):
|
||||
def match_and_update(
|
||||
self, frame_name: str, frame_time: float, detections: list[dict[str, any]]
|
||||
):
|
||||
norfair_detections = []
|
||||
|
||||
# Group detections by object type
|
||||
detections_by_type = {}
|
||||
for obj in detections:
|
||||
label = obj[0]
|
||||
if label not in detections_by_type:
|
||||
detections_by_type[label] = []
|
||||
|
||||
# centroid is used for other things downstream
|
||||
centroid_x = int((obj[2][0] + obj[2][2]) / 2.0)
|
||||
centroid_y = int((obj[2][1] + obj[2][3]) / 2.0)
|
||||
@ -297,22 +462,32 @@ class NorfairTracker(ObjectTracker):
|
||||
# track based on top,left and bottom,right corners instead of centroid
|
||||
points = np.array([[obj[2][0], obj[2][1]], [obj[2][2], obj[2][3]]])
|
||||
|
||||
norfair_detections.append(
|
||||
Detection(
|
||||
points=points,
|
||||
label=obj[0],
|
||||
data={
|
||||
"label": obj[0],
|
||||
"score": obj[1],
|
||||
"box": obj[2],
|
||||
"area": obj[3],
|
||||
"ratio": obj[4],
|
||||
"region": obj[5],
|
||||
"frame_time": frame_time,
|
||||
"centroid": (centroid_x, centroid_y),
|
||||
},
|
||||
embedding = None
|
||||
if self.ptz_metrics.autotracker_enabled.value:
|
||||
yuv_frame = self.frame_manager.get(
|
||||
frame_name, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
embedding = get_histogram(
|
||||
yuv_frame, obj[2][0], obj[2][1], obj[2][2], obj[2][3]
|
||||
)
|
||||
|
||||
detection = Detection(
|
||||
points=points,
|
||||
label=label,
|
||||
# TODO: stationary objects won't have embeddings
|
||||
embedding=embedding,
|
||||
data={
|
||||
"label": label,
|
||||
"score": obj[1],
|
||||
"box": obj[2],
|
||||
"area": obj[3],
|
||||
"ratio": obj[4],
|
||||
"region": obj[5],
|
||||
"frame_time": frame_time,
|
||||
"centroid": (centroid_x, centroid_y),
|
||||
},
|
||||
)
|
||||
detections_by_type[label].append(detection)
|
||||
|
||||
coord_transformations = None
|
||||
|
||||
@ -327,13 +502,32 @@ class NorfairTracker(ObjectTracker):
|
||||
detections, frame_name, frame_time, self.camera_name
|
||||
)
|
||||
|
||||
tracked_objects = self.tracker.update(
|
||||
detections=norfair_detections, coord_transformations=coord_transformations
|
||||
# Update all configured trackers
|
||||
all_tracked_objects = []
|
||||
for label in self.trackers:
|
||||
tracker = self.get_tracker(label)
|
||||
tracked_objects = tracker.update(
|
||||
detections=detections_by_type.get(label, []),
|
||||
coord_transformations=coord_transformations,
|
||||
)
|
||||
all_tracked_objects.extend(tracked_objects)
|
||||
|
||||
# Collect detections for objects without specific trackers
|
||||
default_detections = []
|
||||
for label, dets in detections_by_type.items():
|
||||
if label not in self.trackers:
|
||||
default_detections.extend(dets)
|
||||
|
||||
# Update default tracker with untracked detections
|
||||
mode = "ptz" if self.ptz_metrics.autotracker_enabled.value else "static"
|
||||
tracked_objects = self.default_tracker[mode].update(
|
||||
detections=default_detections, coord_transformations=coord_transformations
|
||||
)
|
||||
all_tracked_objects.extend(tracked_objects)
|
||||
|
||||
# update or create new tracks
|
||||
active_ids = []
|
||||
for t in tracked_objects:
|
||||
for t in all_tracked_objects:
|
||||
estimate = tuple(t.estimate.flatten().astype(int))
|
||||
# keep the estimate within the bounds of the image
|
||||
estimate = (
|
||||
@ -373,19 +567,55 @@ class NorfairTracker(ObjectTracker):
|
||||
o[2] for o in detections if o[2] not in tracked_object_boxes
|
||||
]
|
||||
|
||||
def print_objects_as_table(self, tracked_objects: Sequence):
|
||||
"""Used for helping in debugging"""
|
||||
print()
|
||||
console = Console()
|
||||
table = Table(show_header=True, header_style="bold magenta")
|
||||
table.add_column("Id", style="yellow", justify="center")
|
||||
table.add_column("Age", justify="right")
|
||||
table.add_column("Hit Counter", justify="right")
|
||||
table.add_column("Last distance", justify="right")
|
||||
table.add_column("Init Id", justify="center")
|
||||
for obj in tracked_objects:
|
||||
table.add_row(
|
||||
str(obj.id),
|
||||
str(obj.age),
|
||||
str(obj.hit_counter),
|
||||
f"{obj.last_distance:.4f}" if obj.last_distance is not None else "N/A",
|
||||
str(obj.initializing_id),
|
||||
)
|
||||
console.print(table)
|
||||
|
||||
def debug_draw(self, frame, frame_time):
|
||||
# Collect all tracked objects from each tracker
|
||||
all_tracked_objects = []
|
||||
|
||||
# print a table to the console with norfair tracked object info
|
||||
if False:
|
||||
self.print_objects_as_table(self.trackers["person"]["ptz"].tracked_objects)
|
||||
|
||||
# Get tracked objects from type-specific trackers
|
||||
for object_trackers in self.trackers.values():
|
||||
for tracker in object_trackers.values():
|
||||
all_tracked_objects.extend(tracker.tracked_objects)
|
||||
|
||||
# Get tracked objects from default trackers
|
||||
for tracker in self.default_tracker.values():
|
||||
all_tracked_objects.extend(tracker.tracked_objects)
|
||||
|
||||
active_detections = [
|
||||
Drawable(id=obj.id, points=obj.last_detection.points, label=obj.label)
|
||||
for obj in self.tracker.tracked_objects
|
||||
for obj in all_tracked_objects
|
||||
if obj.last_detection.data["frame_time"] == frame_time
|
||||
]
|
||||
missing_detections = [
|
||||
Drawable(id=obj.id, points=obj.last_detection.points, label=obj.label)
|
||||
for obj in self.tracker.tracked_objects
|
||||
for obj in all_tracked_objects
|
||||
if obj.last_detection.data["frame_time"] != frame_time
|
||||
]
|
||||
# draw the estimated bounding box
|
||||
draw_boxes(frame, self.tracker.tracked_objects, color="green", draw_ids=True)
|
||||
draw_boxes(frame, all_tracked_objects, color="green", draw_ids=True)
|
||||
# draw the detections that were detected in the current frame
|
||||
draw_boxes(frame, active_detections, color="blue", draw_ids=True)
|
||||
# draw the detections that are missing in the current frame
|
||||
@ -393,7 +623,7 @@ class NorfairTracker(ObjectTracker):
|
||||
|
||||
# draw the distance calculation for the last detection
|
||||
# estimate vs detection
|
||||
for obj in self.tracker.tracked_objects:
|
||||
for obj in all_tracked_objects:
|
||||
ld = obj.last_detection
|
||||
# bottom right
|
||||
text_anchor = (
|
||||
|
||||
@ -72,18 +72,27 @@ class TrackedObject:
|
||||
def max_severity(self) -> Optional[str]:
|
||||
review_config = self.camera_config.review
|
||||
|
||||
if self.obj_data["label"] in review_config.alerts.labels and (
|
||||
not review_config.alerts.required_zones
|
||||
or set(self.entered_zones) & set(review_config.alerts.required_zones)
|
||||
if (
|
||||
self.camera_config.review.alerts.enabled
|
||||
and self.obj_data["label"] in review_config.alerts.labels
|
||||
and (
|
||||
not review_config.alerts.required_zones
|
||||
or set(self.entered_zones) & set(review_config.alerts.required_zones)
|
||||
)
|
||||
):
|
||||
return SeverityEnum.alert
|
||||
|
||||
if (
|
||||
not review_config.detections.labels
|
||||
or self.obj_data["label"] in review_config.detections.labels
|
||||
) and (
|
||||
not review_config.detections.required_zones
|
||||
or set(self.entered_zones) & set(review_config.detections.required_zones)
|
||||
self.camera_config.review.detections.enabled
|
||||
and (
|
||||
not review_config.detections.labels
|
||||
or self.obj_data["label"] in review_config.detections.labels
|
||||
)
|
||||
and (
|
||||
not review_config.detections.required_zones
|
||||
or set(self.entered_zones)
|
||||
& set(review_config.detections.required_zones)
|
||||
)
|
||||
):
|
||||
return SeverityEnum.detection
|
||||
|
||||
|
||||
@ -949,3 +949,13 @@ def get_image_from_recording(
|
||||
return process.stdout
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_histogram(image, x_min, y_min, x_max, y_max):
|
||||
image_bgr = cv2.cvtColor(image, cv2.COLOR_YUV2BGR_I420)
|
||||
image_bgr = image_bgr[y_min:y_max, x_min:x_max]
|
||||
|
||||
hist = cv2.calcHist(
|
||||
[image_bgr], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]
|
||||
)
|
||||
return cv2.normalize(hist, hist).flatten()
|
||||
|
||||
@ -435,7 +435,11 @@ def track_camera(
|
||||
object_filters = config.objects.filters
|
||||
|
||||
motion_detector = ImprovedMotionDetector(
|
||||
frame_shape, config.motion, config.detect.fps, name=config.name
|
||||
frame_shape,
|
||||
config.motion,
|
||||
config.detect.fps,
|
||||
name=config.name,
|
||||
ptz_metrics=ptz_metrics,
|
||||
)
|
||||
object_detector = RemoteObjectDetector(
|
||||
name, labelmap, detection_queue, result_connection, model_config, stop_event
|
||||
@ -506,14 +510,7 @@ def detect(
|
||||
height = y_max - y_min
|
||||
area = width * height
|
||||
ratio = width / max(1, height)
|
||||
det = (
|
||||
d[0],
|
||||
d[1],
|
||||
(x_min, y_min, x_max, y_max),
|
||||
area,
|
||||
ratio,
|
||||
region,
|
||||
)
|
||||
det = (d[0], d[1], (x_min, y_min, x_max, y_max), area, ratio, region)
|
||||
# apply object filters
|
||||
if is_object_filtered(det, objects_to_track, object_filters):
|
||||
continue
|
||||
|
||||
@ -61,6 +61,8 @@ function useValue(): useValueReturn {
|
||||
notifications,
|
||||
notifications_suspended,
|
||||
autotracking,
|
||||
alerts,
|
||||
detections,
|
||||
} =
|
||||
// @ts-expect-error we know this is correct
|
||||
state["config"];
|
||||
@ -76,6 +78,10 @@ function useValue(): useValueReturn {
|
||||
cameraStates[`${name}/ptz_autotracker/state`] = autotracking
|
||||
? "ON"
|
||||
: "OFF";
|
||||
cameraStates[`${name}/review_alerts/state`] = alerts ? "ON" : "OFF";
|
||||
cameraStates[`${name}/review_detections/state`] = detections
|
||||
? "ON"
|
||||
: "OFF";
|
||||
});
|
||||
|
||||
setWsState((prevState) => ({
|
||||
@ -213,6 +219,31 @@ export function useAutotrackingState(camera: string): {
|
||||
return { payload: payload as ToggleableSetting, send };
|
||||
}
|
||||
|
||||
export function useAlertsState(camera: string): {
|
||||
payload: ToggleableSetting;
|
||||
send: (payload: ToggleableSetting, retain?: boolean) => void;
|
||||
} {
|
||||
const {
|
||||
value: { payload },
|
||||
send,
|
||||
} = useWs(`${camera}/review_alerts/state`, `${camera}/review_alerts/set`);
|
||||
return { payload: payload as ToggleableSetting, send };
|
||||
}
|
||||
|
||||
export function useDetectionsState(camera: string): {
|
||||
payload: ToggleableSetting;
|
||||
send: (payload: ToggleableSetting, retain?: boolean) => void;
|
||||
} {
|
||||
const {
|
||||
value: { payload },
|
||||
send,
|
||||
} = useWs(
|
||||
`${camera}/review_detections/state`,
|
||||
`${camera}/review_detections/set`,
|
||||
);
|
||||
return { payload: payload as ToggleableSetting, send };
|
||||
}
|
||||
|
||||
export function usePtzCommand(camera: string): {
|
||||
payload: string;
|
||||
send: (payload: string, retain?: boolean) => void;
|
||||
|
||||
@ -82,16 +82,19 @@ export default function LiveContextMenu({
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (cameraGroup) {
|
||||
if (cameraGroup && cameraGroup != "default") {
|
||||
setGroupStreamingSettings(allGroupsStreamingSettings[cameraGroup]);
|
||||
}
|
||||
// set individual group when all groups changes
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [allGroupsStreamingSettings]);
|
||||
}, [allGroupsStreamingSettings, cameraGroup]);
|
||||
|
||||
const onSave = useCallback(
|
||||
(settings: GroupStreamingSettings) => {
|
||||
if (!cameraGroup || !allGroupsStreamingSettings) {
|
||||
if (
|
||||
!cameraGroup ||
|
||||
!allGroupsStreamingSettings ||
|
||||
cameraGroup == "default" ||
|
||||
!settings
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -45,6 +45,13 @@ import {
|
||||
} from "@/components/ui/tooltip";
|
||||
import { AnnotationSettingsPane } from "./AnnotationSettingsPane";
|
||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||
import {
|
||||
ContextMenu,
|
||||
ContextMenuContent,
|
||||
ContextMenuItem,
|
||||
ContextMenuTrigger,
|
||||
} from "@/components/ui/context-menu";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
|
||||
type ObjectLifecycleProps = {
|
||||
className?: string;
|
||||
@ -68,6 +75,7 @@ export default function ObjectLifecycle({
|
||||
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const apiHost = useApiHost();
|
||||
const navigate = useNavigate();
|
||||
|
||||
const [imgLoaded, setImgLoaded] = useState(false);
|
||||
const imgRef = useRef<HTMLImageElement>(null);
|
||||
@ -293,62 +301,83 @@ export default function ObjectLifecycle({
|
||||
imgLoaded ? "visible" : "invisible",
|
||||
)}
|
||||
>
|
||||
<img
|
||||
key={event.id}
|
||||
ref={imgRef}
|
||||
className={cn(
|
||||
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
|
||||
)}
|
||||
loading={isSafari ? "eager" : "lazy"}
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={src}
|
||||
onLoad={() => setImgLoaded(true)}
|
||||
onError={() => setHasError(true)}
|
||||
/>
|
||||
<ContextMenu>
|
||||
<ContextMenuTrigger>
|
||||
<img
|
||||
key={event.id}
|
||||
ref={imgRef}
|
||||
className={cn(
|
||||
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
|
||||
)}
|
||||
loading={isSafari ? "eager" : "lazy"}
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={src}
|
||||
onLoad={() => setImgLoaded(true)}
|
||||
onError={() => setHasError(true)}
|
||||
/>
|
||||
|
||||
{showZones &&
|
||||
lifecycleZones?.map((zone) => (
|
||||
<div
|
||||
className="absolute inset-0 flex items-center justify-center"
|
||||
style={{
|
||||
width: imgRef.current?.clientWidth,
|
||||
height: imgRef.current?.clientHeight,
|
||||
}}
|
||||
key={zone}
|
||||
>
|
||||
<svg
|
||||
viewBox={`0 0 ${imgRef.current?.width} ${imgRef.current?.height}`}
|
||||
className="absolute inset-0"
|
||||
>
|
||||
<polygon
|
||||
points={getZonePolygon(zone)}
|
||||
className="fill-none stroke-2"
|
||||
{showZones &&
|
||||
lifecycleZones?.map((zone) => (
|
||||
<div
|
||||
className="absolute inset-0 flex items-center justify-center"
|
||||
style={{
|
||||
stroke: `rgb(${getZoneColor(zone)?.join(",")})`,
|
||||
fill:
|
||||
selectedZone == zone
|
||||
? `rgba(${getZoneColor(zone)?.join(",")}, 0.5)`
|
||||
: `rgba(${getZoneColor(zone)?.join(",")}, 0.3)`,
|
||||
strokeWidth: selectedZone == zone ? 4 : 2,
|
||||
width: imgRef.current?.clientWidth,
|
||||
height: imgRef.current?.clientHeight,
|
||||
}}
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
))}
|
||||
key={zone}
|
||||
>
|
||||
<svg
|
||||
viewBox={`0 0 ${imgRef.current?.width} ${imgRef.current?.height}`}
|
||||
className="absolute inset-0"
|
||||
>
|
||||
<polygon
|
||||
points={getZonePolygon(zone)}
|
||||
className="fill-none stroke-2"
|
||||
style={{
|
||||
stroke: `rgb(${getZoneColor(zone)?.join(",")})`,
|
||||
fill:
|
||||
selectedZone == zone
|
||||
? `rgba(${getZoneColor(zone)?.join(",")}, 0.5)`
|
||||
: `rgba(${getZoneColor(zone)?.join(",")}, 0.3)`,
|
||||
strokeWidth: selectedZone == zone ? 4 : 2,
|
||||
}}
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{boxStyle && (
|
||||
<div className="absolute border-2 border-red-600" style={boxStyle}>
|
||||
<div className="absolute bottom-[-3px] left-1/2 h-[5px] w-[5px] -translate-x-1/2 transform bg-yellow-500" />
|
||||
</div>
|
||||
)}
|
||||
{boxStyle && (
|
||||
<div
|
||||
className="absolute border-2 border-red-600"
|
||||
style={boxStyle}
|
||||
>
|
||||
<div className="absolute bottom-[-3px] left-1/2 h-[5px] w-[5px] -translate-x-1/2 transform bg-yellow-500" />
|
||||
</div>
|
||||
)}
|
||||
</ContextMenuTrigger>
|
||||
<ContextMenuContent>
|
||||
<ContextMenuItem>
|
||||
<div
|
||||
className="flex w-full cursor-pointer items-center justify-start gap-2 p-2"
|
||||
onClick={() =>
|
||||
navigate(
|
||||
`/settings?page=masks%20/%20zones&camera=${event.camera}&object_mask=${eventSequence?.[current].data.box}`,
|
||||
)
|
||||
}
|
||||
>
|
||||
<div className="text-primary">Create Object Mask</div>
|
||||
</div>
|
||||
</ContextMenuItem>
|
||||
</ContextMenuContent>
|
||||
</ContextMenu>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import React, { useState, useRef } from "react";
|
||||
import React, { useState, useRef, useEffect } from "react";
|
||||
import { useVideoDimensions } from "@/hooks/use-video-dimensions";
|
||||
import HlsVideoPlayer from "./HlsVideoPlayer";
|
||||
import ActivityIndicator from "../indicators/activity-indicator";
|
||||
@ -15,37 +15,61 @@ export function GenericVideoPlayer({
|
||||
children,
|
||||
}: GenericVideoPlayerProps) {
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
const [sourceExists, setSourceExists] = useState(true);
|
||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||
const { videoDimensions, setVideoResolution } =
|
||||
useVideoDimensions(containerRef);
|
||||
|
||||
useEffect(() => {
|
||||
const checkSourceExists = async (url: string) => {
|
||||
try {
|
||||
const response = await fetch(url, { method: "HEAD" });
|
||||
// nginx vod module returns 502 for non existent media
|
||||
// https://github.com/kaltura/nginx-vod-module/issues/468
|
||||
setSourceExists(response.status !== 502 && response.status !== 404);
|
||||
} catch (error) {
|
||||
setSourceExists(false);
|
||||
}
|
||||
};
|
||||
|
||||
checkSourceExists(source);
|
||||
}, [source]);
|
||||
|
||||
return (
|
||||
<div ref={containerRef} className="relative flex h-full w-full flex-col">
|
||||
<div className="relative flex flex-grow items-center justify-center">
|
||||
{isLoading && (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 z-10 -translate-x-1/2 -translate-y-1/2" />
|
||||
{!sourceExists ? (
|
||||
<div className="flex aspect-video w-full items-center justify-center bg-background_alt text-lg text-primary">
|
||||
Video not available
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
{isLoading && (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 z-10 -translate-x-1/2 -translate-y-1/2" />
|
||||
)}
|
||||
<div
|
||||
className="relative flex items-center justify-center"
|
||||
style={videoDimensions}
|
||||
>
|
||||
<HlsVideoPlayer
|
||||
videoRef={videoRef}
|
||||
currentSource={source}
|
||||
hotKeys
|
||||
visible
|
||||
frigateControls={false}
|
||||
fullscreen={false}
|
||||
supportsFullscreen={false}
|
||||
onPlaying={() => {
|
||||
setIsLoading(false);
|
||||
onPlaying?.();
|
||||
}}
|
||||
setFullResolution={setVideoResolution}
|
||||
/>
|
||||
{!isLoading && children}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
<div
|
||||
className="relative flex items-center justify-center"
|
||||
style={videoDimensions}
|
||||
>
|
||||
<HlsVideoPlayer
|
||||
videoRef={videoRef}
|
||||
currentSource={source}
|
||||
hotKeys
|
||||
visible
|
||||
frigateControls={false}
|
||||
fullscreen={false}
|
||||
supportsFullscreen={false}
|
||||
onPlaying={() => {
|
||||
setIsLoading(false);
|
||||
onPlaying?.();
|
||||
}}
|
||||
setFullResolution={setVideoResolution}
|
||||
/>
|
||||
{!isLoading && children}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@ -144,7 +144,7 @@ export default function HlsVideoPlayer({
|
||||
|
||||
const [tallCamera, setTallCamera] = useState(false);
|
||||
const [isPlaying, setIsPlaying] = useState(true);
|
||||
const [muted, setMuted] = useOverlayState("playerMuted", true);
|
||||
const [muted, setMuted] = usePersistence("hlsPlayerMuted", true);
|
||||
const [volume, setVolume] = useOverlayState("playerVolume", 1.0);
|
||||
const [defaultPlaybackRate] = usePersistence("playbackRate", 1);
|
||||
const [playbackRate, setPlaybackRate] = useOverlayState(
|
||||
@ -211,7 +211,7 @@ export default function HlsVideoPlayer({
|
||||
fullscreen: supportsFullscreen,
|
||||
}}
|
||||
setControlsOpen={setControlsOpen}
|
||||
setMuted={(muted) => setMuted(muted, true)}
|
||||
setMuted={(muted) => setMuted(muted)}
|
||||
playbackRate={playbackRate ?? 1}
|
||||
hotKeys={hotKeys}
|
||||
onPlayPause={onPlayPause}
|
||||
@ -280,9 +280,12 @@ export default function HlsVideoPlayer({
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
onVolumeChange={() =>
|
||||
setVolume(videoRef.current?.volume ?? 1.0, true)
|
||||
}
|
||||
onVolumeChange={() => {
|
||||
setVolume(videoRef.current?.volume ?? 1.0, true);
|
||||
if (!frigateControls) {
|
||||
setMuted(videoRef.current?.muted);
|
||||
}
|
||||
}}
|
||||
onPlay={() => {
|
||||
setIsPlaying(true);
|
||||
|
||||
|
||||
@ -33,6 +33,8 @@ type MotionMaskEditPaneProps = {
|
||||
setIsLoading: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
onSave?: () => void;
|
||||
onCancel?: () => void;
|
||||
snapPoints: boolean;
|
||||
setSnapPoints: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
};
|
||||
|
||||
export default function MotionMaskEditPane({
|
||||
@ -45,6 +47,8 @@ export default function MotionMaskEditPane({
|
||||
setIsLoading,
|
||||
onSave,
|
||||
onCancel,
|
||||
snapPoints,
|
||||
setSnapPoints,
|
||||
}: MotionMaskEditPaneProps) {
|
||||
const { data: config, mutate: updateConfig } =
|
||||
useSWR<FrigateConfig>("config");
|
||||
@ -252,6 +256,8 @@ export default function MotionMaskEditPane({
|
||||
polygons={polygons}
|
||||
setPolygons={setPolygons}
|
||||
activePolygonIndex={activePolygonIndex}
|
||||
snapPoints={snapPoints}
|
||||
setSnapPoints={setSnapPoints}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -49,6 +49,8 @@ type ObjectMaskEditPaneProps = {
|
||||
setIsLoading: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
onSave?: () => void;
|
||||
onCancel?: () => void;
|
||||
snapPoints: boolean;
|
||||
setSnapPoints: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
};
|
||||
|
||||
export default function ObjectMaskEditPane({
|
||||
@ -61,6 +63,8 @@ export default function ObjectMaskEditPane({
|
||||
setIsLoading,
|
||||
onSave,
|
||||
onCancel,
|
||||
snapPoints,
|
||||
setSnapPoints,
|
||||
}: ObjectMaskEditPaneProps) {
|
||||
const { data: config, mutate: updateConfig } =
|
||||
useSWR<FrigateConfig>("config");
|
||||
@ -272,6 +276,8 @@ export default function ObjectMaskEditPane({
|
||||
polygons={polygons}
|
||||
setPolygons={setPolygons}
|
||||
activePolygonIndex={activePolygonIndex}
|
||||
snapPoints={snapPoints}
|
||||
setSnapPoints={setSnapPoints}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -6,6 +6,7 @@ import type { KonvaEventObject } from "konva/lib/Node";
|
||||
import { Polygon, PolygonType } from "@/types/canvas";
|
||||
import { useApiHost } from "@/api";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import { snapPointToLines } from "@/utils/canvasUtil";
|
||||
|
||||
type PolygonCanvasProps = {
|
||||
containerRef: RefObject<HTMLDivElement>;
|
||||
@ -18,6 +19,7 @@ type PolygonCanvasProps = {
|
||||
hoveredPolygonIndex: number | null;
|
||||
selectedZoneMask: PolygonType[] | undefined;
|
||||
activeLine?: number;
|
||||
snapPoints: boolean;
|
||||
};
|
||||
|
||||
export function PolygonCanvas({
|
||||
@ -31,6 +33,7 @@ export function PolygonCanvas({
|
||||
hoveredPolygonIndex,
|
||||
selectedZoneMask,
|
||||
activeLine,
|
||||
snapPoints,
|
||||
}: PolygonCanvasProps) {
|
||||
const [isLoaded, setIsLoaded] = useState(false);
|
||||
const [image, setImage] = useState<HTMLImageElement | undefined>();
|
||||
@ -156,9 +159,23 @@ export function PolygonCanvas({
|
||||
intersection?.getClassName() !== "Circle") ||
|
||||
(activePolygon.isFinished && intersection?.name() == "unfilled-line")
|
||||
) {
|
||||
let newPoint = [mousePos.x, mousePos.y];
|
||||
|
||||
if (snapPoints) {
|
||||
// Snap to other polygons' edges
|
||||
const otherPolygons = polygons.filter(
|
||||
(_, i) => i !== activePolygonIndex,
|
||||
);
|
||||
const snappedPos = snapPointToLines(newPoint, otherPolygons, 10);
|
||||
|
||||
if (snappedPos) {
|
||||
newPoint = snappedPos;
|
||||
}
|
||||
}
|
||||
|
||||
const { updatedPoints, updatedPointsOrder } = addPointToPolygon(
|
||||
activePolygon,
|
||||
[mousePos.x, mousePos.y],
|
||||
newPoint,
|
||||
);
|
||||
|
||||
updatedPolygons[activePolygonIndex] = {
|
||||
@ -184,11 +201,24 @@ export function PolygonCanvas({
|
||||
if (stage) {
|
||||
// we add an unfilled line for adding points when finished
|
||||
const index = e.target.index - (activePolygon.isFinished ? 2 : 1);
|
||||
const pos = [e.target._lastPos!.x, e.target._lastPos!.y];
|
||||
if (pos[0] < 0) pos[0] = 0;
|
||||
if (pos[1] < 0) pos[1] = 0;
|
||||
if (pos[0] > stage.width()) pos[0] = stage.width();
|
||||
if (pos[1] > stage.height()) pos[1] = stage.height();
|
||||
let pos = [e.target._lastPos!.x, e.target._lastPos!.y];
|
||||
|
||||
if (snapPoints) {
|
||||
// Snap to other polygons' edges
|
||||
const otherPolygons = polygons.filter(
|
||||
(_, i) => i !== activePolygonIndex,
|
||||
);
|
||||
const snappedPos = snapPointToLines(pos, otherPolygons, 10); // 10 is the snap threshold
|
||||
|
||||
if (snappedPos) {
|
||||
pos = snappedPos;
|
||||
}
|
||||
}
|
||||
|
||||
// Constrain to stage boundaries
|
||||
pos[0] = Math.max(0, Math.min(pos[0], stage.width()));
|
||||
pos[1] = Math.max(0, Math.min(pos[1], stage.height()));
|
||||
|
||||
updatedPolygons[activePolygonIndex] = {
|
||||
...activePolygon,
|
||||
points: [
|
||||
@ -291,6 +321,16 @@ export function PolygonCanvas({
|
||||
handlePointDragMove={handlePointDragMove}
|
||||
handleGroupDragEnd={handleGroupDragEnd}
|
||||
activeLine={activeLine}
|
||||
snapPoints={snapPoints}
|
||||
snapToLines={(point) =>
|
||||
snapPoints
|
||||
? snapPointToLines(
|
||||
point,
|
||||
polygons.filter((_, i) => i !== index),
|
||||
10,
|
||||
)
|
||||
: null
|
||||
}
|
||||
/>
|
||||
),
|
||||
)}
|
||||
@ -310,6 +350,16 @@ export function PolygonCanvas({
|
||||
handlePointDragMove={handlePointDragMove}
|
||||
handleGroupDragEnd={handleGroupDragEnd}
|
||||
activeLine={activeLine}
|
||||
snapPoints={snapPoints}
|
||||
snapToLines={(point) =>
|
||||
snapPoints
|
||||
? snapPointToLines(
|
||||
point,
|
||||
polygons.filter((_, i) => i !== activePolygonIndex),
|
||||
10,
|
||||
)
|
||||
: null
|
||||
}
|
||||
/>
|
||||
)}
|
||||
</Layer>
|
||||
|
||||
@ -28,6 +28,8 @@ type PolygonDrawerProps = {
|
||||
handlePointDragMove: (e: KonvaEventObject<MouseEvent | TouchEvent>) => void;
|
||||
handleGroupDragEnd: (e: KonvaEventObject<MouseEvent | TouchEvent>) => void;
|
||||
activeLine?: number;
|
||||
snapToLines: (point: number[]) => number[] | null;
|
||||
snapPoints: boolean;
|
||||
};
|
||||
|
||||
export default function PolygonDrawer({
|
||||
@ -41,6 +43,8 @@ export default function PolygonDrawer({
|
||||
handlePointDragMove,
|
||||
handleGroupDragEnd,
|
||||
activeLine,
|
||||
snapToLines,
|
||||
snapPoints,
|
||||
}: PolygonDrawerProps) {
|
||||
const vertexRadius = 6;
|
||||
const flattenedPoints = useMemo(() => flattenPoints(points), [points]);
|
||||
@ -218,15 +222,32 @@ export default function PolygonDrawer({
|
||||
onMouseOver={handleMouseOverPoint}
|
||||
onMouseOut={handleMouseOutPoint}
|
||||
draggable={isActive}
|
||||
onDragMove={isActive ? handlePointDragMove : undefined}
|
||||
onDragMove={(e) => {
|
||||
if (isActive) {
|
||||
if (snapPoints) {
|
||||
const snappedPos = snapToLines([e.target.x(), e.target.y()]);
|
||||
if (snappedPos) {
|
||||
e.target.position({ x: snappedPos[0], y: snappedPos[1] });
|
||||
}
|
||||
}
|
||||
handlePointDragMove(e);
|
||||
}
|
||||
}}
|
||||
dragBoundFunc={(pos) => {
|
||||
if (stageRef.current) {
|
||||
return dragBoundFunc(
|
||||
const boundPos = dragBoundFunc(
|
||||
stageRef.current.width(),
|
||||
stageRef.current.height(),
|
||||
vertexRadius,
|
||||
pos,
|
||||
);
|
||||
if (snapPoints) {
|
||||
const snappedPos = snapToLines([boundPos.x, boundPos.y]);
|
||||
return snappedPos
|
||||
? { x: snappedPos[0], y: snappedPos[1] }
|
||||
: boundPos;
|
||||
}
|
||||
return boundPos;
|
||||
} else {
|
||||
return pos;
|
||||
}
|
||||
|
||||
@ -2,17 +2,23 @@ import { Polygon } from "@/types/canvas";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
||||
import { MdOutlineRestartAlt, MdUndo } from "react-icons/md";
|
||||
import { Button } from "../ui/button";
|
||||
import { TbPolygon, TbPolygonOff } from "react-icons/tb";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
type PolygonEditControlsProps = {
|
||||
polygons: Polygon[];
|
||||
setPolygons: React.Dispatch<React.SetStateAction<Polygon[]>>;
|
||||
activePolygonIndex: number | undefined;
|
||||
snapPoints: boolean;
|
||||
setSnapPoints: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
};
|
||||
|
||||
export default function PolygonEditControls({
|
||||
polygons,
|
||||
setPolygons,
|
||||
activePolygonIndex,
|
||||
snapPoints,
|
||||
setSnapPoints,
|
||||
}: PolygonEditControlsProps) {
|
||||
const undo = () => {
|
||||
if (activePolygonIndex === undefined || !polygons) {
|
||||
@ -97,6 +103,25 @@ export default function PolygonEditControls({
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Reset</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant={snapPoints ? "select" : "default"}
|
||||
className={cn("size-6 rounded-md p-1")}
|
||||
aria-label="Snap points"
|
||||
onClick={() => setSnapPoints((prev) => !prev)}
|
||||
>
|
||||
{snapPoints ? (
|
||||
<TbPolygon className="text-primary" />
|
||||
) : (
|
||||
<TbPolygonOff className="text-secondary-foreground" />
|
||||
)}
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{snapPoints ? "Don't snap points" : "Snap points"}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -41,6 +41,8 @@ type ZoneEditPaneProps = {
|
||||
onSave?: () => void;
|
||||
onCancel?: () => void;
|
||||
setActiveLine: React.Dispatch<React.SetStateAction<number | undefined>>;
|
||||
snapPoints: boolean;
|
||||
setSnapPoints: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
};
|
||||
|
||||
export default function ZoneEditPane({
|
||||
@ -54,6 +56,8 @@ export default function ZoneEditPane({
|
||||
onSave,
|
||||
onCancel,
|
||||
setActiveLine,
|
||||
snapPoints,
|
||||
setSnapPoints,
|
||||
}: ZoneEditPaneProps) {
|
||||
const { data: config, mutate: updateConfig } =
|
||||
useSWR<FrigateConfig>("config");
|
||||
@ -483,6 +487,8 @@ export default function ZoneEditPane({
|
||||
polygons={polygons}
|
||||
setPolygons={setPolygons}
|
||||
activePolygonIndex={activePolygonIndex}
|
||||
snapPoints={snapPoints}
|
||||
setSnapPoints={setSnapPoints}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -83,9 +83,11 @@ function Exports() {
|
||||
const onHandleRename = useCallback(
|
||||
(id: string, update: string) => {
|
||||
axios
|
||||
.patch(`export/${id}/${encodeURIComponent(update)}`)
|
||||
.patch(`export/${id}/rename`, {
|
||||
name: update,
|
||||
})
|
||||
.then((response) => {
|
||||
if (response.status == 200) {
|
||||
if (response.status === 200) {
|
||||
setDeleteClip(undefined);
|
||||
mutate();
|
||||
}
|
||||
|
||||
@ -38,6 +38,7 @@ import NotificationView from "@/views/settings/NotificationsSettingsView";
|
||||
import SearchSettingsView from "@/views/settings/SearchSettingsView";
|
||||
import UiSettingsView from "@/views/settings/UiSettingsView";
|
||||
import { useSearchEffect } from "@/hooks/use-overlay-state";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
|
||||
const allSettingsViews = [
|
||||
"UI settings",
|
||||
@ -58,6 +59,8 @@ export default function Settings() {
|
||||
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
|
||||
const [searchParams] = useSearchParams();
|
||||
|
||||
// available settings views
|
||||
|
||||
const settingsViews = useMemo(() => {
|
||||
@ -124,7 +127,8 @@ export default function Settings() {
|
||||
if (allSettingsViews.includes(page as SettingsType)) {
|
||||
setPage(page as SettingsType);
|
||||
}
|
||||
return true;
|
||||
// don't clear url params if we're creating a new object mask
|
||||
return !searchParams.has("object_mask");
|
||||
});
|
||||
|
||||
useSearchEffect("camera", (camera: string) => {
|
||||
@ -132,7 +136,8 @@ export default function Settings() {
|
||||
if (cameraNames.includes(camera)) {
|
||||
setSelectedCamera(camera);
|
||||
}
|
||||
return true;
|
||||
// don't clear url params if we're creating a new object mask
|
||||
return !searchParams.has("object_mask");
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@ -179,6 +179,7 @@ export interface CameraConfig {
|
||||
};
|
||||
review: {
|
||||
alerts: {
|
||||
enabled: boolean;
|
||||
required_zones: string[];
|
||||
labels: string[];
|
||||
retain: {
|
||||
@ -187,6 +188,7 @@ export interface CameraConfig {
|
||||
};
|
||||
};
|
||||
detections: {
|
||||
enabled: boolean;
|
||||
required_zones: string[];
|
||||
labels: string[];
|
||||
retain: {
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { Vector2d } from "konva/lib/types";
|
||||
import { Polygon } from "@/types/canvas";
|
||||
|
||||
export const getAveragePoint = (points: number[]): Vector2d => {
|
||||
let totalX = 0;
|
||||
@ -100,3 +101,72 @@ export const masksAreIdentical = (arr1: string[], arr2: string[]): boolean => {
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
export function snapPointToLines(
|
||||
point: number[],
|
||||
polygons: Polygon[],
|
||||
threshold: number,
|
||||
): number[] | null {
|
||||
for (const polygon of polygons) {
|
||||
if (!polygon.isFinished) continue;
|
||||
|
||||
for (let i = 0; i < polygon.points.length; i++) {
|
||||
const start = polygon.points[i];
|
||||
const end = polygon.points[(i + 1) % polygon.points.length];
|
||||
|
||||
const snappedPoint = snapPointToLine(point, start, end, threshold);
|
||||
if (snappedPoint) {
|
||||
return snappedPoint;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function snapPointToLine(
|
||||
point: number[],
|
||||
lineStart: number[],
|
||||
lineEnd: number[],
|
||||
threshold: number,
|
||||
): number[] | null {
|
||||
const [x, y] = point;
|
||||
const [x1, y1] = lineStart;
|
||||
const [x2, y2] = lineEnd;
|
||||
|
||||
const A = x - x1;
|
||||
const B = y - y1;
|
||||
const C = x2 - x1;
|
||||
const D = y2 - y1;
|
||||
|
||||
const dot = A * C + B * D;
|
||||
const lenSq = C * C + D * D;
|
||||
let param = -1;
|
||||
|
||||
if (lenSq !== 0) {
|
||||
param = dot / lenSq;
|
||||
}
|
||||
|
||||
let xx, yy;
|
||||
|
||||
if (param < 0) {
|
||||
xx = x1;
|
||||
yy = y1;
|
||||
} else if (param > 1) {
|
||||
xx = x2;
|
||||
yy = y2;
|
||||
} else {
|
||||
xx = x1 + param * C;
|
||||
yy = y1 + param * D;
|
||||
}
|
||||
|
||||
const dx = x - xx;
|
||||
const dy = y - yy;
|
||||
const distance = Math.sqrt(dx * dx + dy * dy);
|
||||
|
||||
if (distance <= threshold) {
|
||||
return [xx, yy];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -14,7 +14,11 @@ import {
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { usePersistence } from "@/hooks/use-persistence";
|
||||
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
||||
import {
|
||||
AllGroupsStreamingSettings,
|
||||
CameraConfig,
|
||||
FrigateConfig,
|
||||
} from "@/types/frigateConfig";
|
||||
import { ReviewSegment } from "@/types/review";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import {
|
||||
@ -38,6 +42,7 @@ import { FaCompress, FaExpand } from "react-icons/fa";
|
||||
import useCameraLiveMode from "@/hooks/use-camera-live-mode";
|
||||
import { useResizeObserver } from "@/hooks/resize-observer";
|
||||
import LiveContextMenu from "@/components/menu/LiveContextMenu";
|
||||
import { useStreamingSettings } from "@/context/streaming-settings-provider";
|
||||
|
||||
type LiveDashboardViewProps = {
|
||||
cameras: CameraConfig[];
|
||||
@ -135,8 +140,6 @@ export default function LiveDashboardView({
|
||||
|
||||
// camera live views
|
||||
|
||||
const [autoLiveView] = usePersistence("autoLiveView", true);
|
||||
|
||||
const [{ height: containerHeight }] = useResizeObserver(containerRef);
|
||||
|
||||
const hasScrollbar = useMemo(() => {
|
||||
@ -198,6 +201,17 @@ export default function LiveDashboardView({
|
||||
supportsAudioOutputStates,
|
||||
} = useCameraLiveMode(cameras, windowVisible);
|
||||
|
||||
const [globalAutoLive] = usePersistence("autoLiveView", true);
|
||||
|
||||
const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
|
||||
useStreamingSettings();
|
||||
|
||||
const currentGroupStreamingSettings = useMemo(() => {
|
||||
if (cameraGroup && cameraGroup != "default" && allGroupsStreamingSettings) {
|
||||
return allGroupsStreamingSettings[cameraGroup];
|
||||
}
|
||||
}, [allGroupsStreamingSettings, cameraGroup]);
|
||||
|
||||
const cameraRef = useCallback(
|
||||
(node: HTMLElement | null) => {
|
||||
if (!visibleCameraObserver.current) {
|
||||
@ -245,6 +259,25 @@ export default function LiveDashboardView({
|
||||
}));
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!allGroupsStreamingSettings) {
|
||||
return;
|
||||
}
|
||||
|
||||
const initialAudioStates: AudioState = {};
|
||||
const initialVolumeStates: VolumeState = {};
|
||||
|
||||
Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
|
||||
Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
|
||||
initialAudioStates[camera] = cameraSettings.playAudio ?? false;
|
||||
initialVolumeStates[camera] = cameraSettings.volume ?? 1;
|
||||
});
|
||||
});
|
||||
|
||||
setAudioStates(initialAudioStates);
|
||||
setVolumeStates(initialVolumeStates);
|
||||
}, [allGroupsStreamingSettings]);
|
||||
|
||||
const toggleAudio = (cameraName: string): void => {
|
||||
setAudioStates((prev) => ({
|
||||
...prev,
|
||||
@ -252,12 +285,53 @@ export default function LiveDashboardView({
|
||||
}));
|
||||
};
|
||||
|
||||
const onSaveMuting = useCallback(
|
||||
(playAudio: boolean) => {
|
||||
if (
|
||||
!cameraGroup ||
|
||||
!allGroupsStreamingSettings ||
|
||||
cameraGroup == "default"
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
const existingGroupSettings =
|
||||
allGroupsStreamingSettings[cameraGroup] || {};
|
||||
|
||||
const updatedSettings: AllGroupsStreamingSettings = {
|
||||
...Object.fromEntries(
|
||||
Object.entries(allGroupsStreamingSettings || {}).filter(
|
||||
([key]) => key !== cameraGroup,
|
||||
),
|
||||
),
|
||||
[cameraGroup]: {
|
||||
...existingGroupSettings,
|
||||
...Object.fromEntries(
|
||||
Object.entries(existingGroupSettings).map(
|
||||
([cameraName, settings]) => [
|
||||
cameraName,
|
||||
{
|
||||
...settings,
|
||||
playAudio: playAudio,
|
||||
},
|
||||
],
|
||||
),
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
setAllGroupsStreamingSettings?.(updatedSettings);
|
||||
},
|
||||
[cameraGroup, allGroupsStreamingSettings, setAllGroupsStreamingSettings],
|
||||
);
|
||||
|
||||
const muteAll = (): void => {
|
||||
const updatedStates: Record<string, boolean> = {};
|
||||
visibleCameras.forEach((cameraName) => {
|
||||
updatedStates[cameraName] = false;
|
||||
});
|
||||
setAudioStates(updatedStates);
|
||||
onSaveMuting(false);
|
||||
};
|
||||
|
||||
const unmuteAll = (): void => {
|
||||
@ -266,6 +340,7 @@ export default function LiveDashboardView({
|
||||
updatedStates[cameraName] = true;
|
||||
});
|
||||
setAudioStates(updatedStates);
|
||||
onSaveMuting(true);
|
||||
};
|
||||
|
||||
return (
|
||||
@ -392,19 +467,30 @@ export default function LiveDashboardView({
|
||||
} else {
|
||||
grow = "aspect-video";
|
||||
}
|
||||
const streamName =
|
||||
currentGroupStreamingSettings?.[camera.name]?.streamName ||
|
||||
Object.values(camera.live.streams)?.[0];
|
||||
const autoLive =
|
||||
currentGroupStreamingSettings?.[camera.name]?.streamType !==
|
||||
"no-streaming";
|
||||
const showStillWithoutActivity =
|
||||
currentGroupStreamingSettings?.[camera.name]?.streamType !==
|
||||
"continuous";
|
||||
const useWebGL =
|
||||
currentGroupStreamingSettings?.[camera.name]
|
||||
?.compatibilityMode || false;
|
||||
return (
|
||||
<LiveContextMenu
|
||||
className={grow}
|
||||
key={camera.name}
|
||||
camera={camera.name}
|
||||
cameraGroup={cameraGroup}
|
||||
streamName={Object.values(camera.live.streams)?.[0]}
|
||||
streamName={streamName}
|
||||
preferredLiveMode={preferredLiveModes[camera.name] ?? "mse"}
|
||||
isRestreamed={isRestreamedStates[camera.name]}
|
||||
supportsAudio={
|
||||
supportsAudioOutputStates[
|
||||
Object.values(camera.live.streams)?.[0]
|
||||
]?.supportsAudio ?? false
|
||||
supportsAudioOutputStates[streamName]?.supportsAudio ??
|
||||
false
|
||||
}
|
||||
audioState={audioStates[camera.name]}
|
||||
toggleAudio={() => toggleAudio(camera.name)}
|
||||
@ -431,11 +517,12 @@ export default function LiveDashboardView({
|
||||
}
|
||||
cameraConfig={camera}
|
||||
preferredLiveMode={preferredLiveModes[camera.name] ?? "mse"}
|
||||
autoLive={autoLiveView}
|
||||
useWebGL={false}
|
||||
autoLive={autoLive ?? globalAutoLive}
|
||||
showStillWithoutActivity={showStillWithoutActivity ?? true}
|
||||
useWebGL={useWebGL}
|
||||
playInBackground={false}
|
||||
showStats={statsStates[camera.name]}
|
||||
streamName={Object.values(camera.live.streams)[0]}
|
||||
streamName={streamName}
|
||||
onClick={() => onSelectCamera(camera.name)}
|
||||
onError={(e) => handleError(camera.name, e)}
|
||||
onResetLiveMode={() => resetPreferredLiveMode(camera.name)}
|
||||
|
||||
@ -27,6 +27,9 @@ import { LuExternalLink } from "react-icons/lu";
|
||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||
import { MdCircle } from "react-icons/md";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Switch } from "@/components/ui/switch";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { useAlertsState, useDetectionsState } from "@/api/ws";
|
||||
|
||||
type CameraSettingsViewProps = {
|
||||
selectedCamera: string;
|
||||
@ -105,6 +108,11 @@ export default function CameraSettingsView({
|
||||
const watchedAlertsZones = form.watch("alerts_zones");
|
||||
const watchedDetectionsZones = form.watch("detections_zones");
|
||||
|
||||
const { payload: alertsState, send: sendAlerts } =
|
||||
useAlertsState(selectedCamera);
|
||||
const { payload: detectionsState, send: sendDetections } =
|
||||
useDetectionsState(selectedCamera);
|
||||
|
||||
const handleCheckedChange = useCallback(
|
||||
(isChecked: boolean) => {
|
||||
if (!isChecked) {
|
||||
@ -244,6 +252,47 @@ export default function CameraSettingsView({
|
||||
|
||||
<Separator className="my-2 flex bg-secondary" />
|
||||
|
||||
<Heading as="h4" className="my-2">
|
||||
Review
|
||||
</Heading>
|
||||
|
||||
<div className="mb-5 mt-2 flex max-w-5xl flex-col gap-2 space-y-3 text-sm text-primary-variant">
|
||||
<div className="flex flex-row items-center">
|
||||
<Switch
|
||||
id="alerts-enabled"
|
||||
className="mr-3"
|
||||
checked={alertsState == "ON"}
|
||||
onCheckedChange={(isChecked) => {
|
||||
sendAlerts(isChecked ? "ON" : "OFF");
|
||||
}}
|
||||
/>
|
||||
<div className="space-y-0.5">
|
||||
<Label htmlFor="alerts-enabled">Alerts</Label>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-col">
|
||||
<div className="flex flex-row items-center">
|
||||
<Switch
|
||||
id="detections-enabled"
|
||||
className="mr-3"
|
||||
checked={detectionsState == "ON"}
|
||||
onCheckedChange={(isChecked) => {
|
||||
sendDetections(isChecked ? "ON" : "OFF");
|
||||
}}
|
||||
/>
|
||||
<div className="space-y-0.5">
|
||||
<Label htmlFor="detections-enabled">Detections</Label>
|
||||
</div>
|
||||
</div>
|
||||
<div className="mt-3 text-sm text-muted-foreground">
|
||||
Enable/disable alerts and detections for this camera. When
|
||||
disabled, no new review items will be generated.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Separator className="my-2 flex bg-secondary" />
|
||||
|
||||
<Heading as="h4" className="my-2">
|
||||
Review Classification
|
||||
</Heading>
|
||||
|
||||
@ -37,6 +37,7 @@ import PolygonItem from "@/components/settings/PolygonItem";
|
||||
import { Link } from "react-router-dom";
|
||||
import { isDesktop } from "react-device-detect";
|
||||
import { StatusBarMessagesContext } from "@/context/statusbar-provider";
|
||||
import { useSearchEffect } from "@/hooks/use-overlay-state";
|
||||
|
||||
type MasksAndZoneViewProps = {
|
||||
selectedCamera: string;
|
||||
@ -62,6 +63,7 @@ export default function MasksAndZonesView({
|
||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||
const [editPane, setEditPane] = useState<PolygonType | undefined>(undefined);
|
||||
const [activeLine, setActiveLine] = useState<number | undefined>();
|
||||
const [snapPoints, setSnapPoints] = useState(false);
|
||||
|
||||
const { addMessage } = useContext(StatusBarMessagesContext)!;
|
||||
|
||||
@ -142,7 +144,7 @@ export default function MasksAndZonesView({
|
||||
}
|
||||
}, [scaledHeight, aspectRatio]);
|
||||
|
||||
const handleNewPolygon = (type: PolygonType) => {
|
||||
const handleNewPolygon = (type: PolygonType, coordinates?: number[][]) => {
|
||||
if (!cameraConfig) {
|
||||
return;
|
||||
}
|
||||
@ -161,9 +163,9 @@ export default function MasksAndZonesView({
|
||||
setEditingPolygons([
|
||||
...(allPolygons || []),
|
||||
{
|
||||
points: [],
|
||||
points: coordinates ?? [],
|
||||
distances: [],
|
||||
isFinished: false,
|
||||
isFinished: coordinates ? true : false,
|
||||
type,
|
||||
typeIndex: 9999,
|
||||
name: "",
|
||||
@ -373,6 +375,48 @@ export default function MasksAndZonesView({
|
||||
}
|
||||
}, [selectedCamera]);
|
||||
|
||||
useSearchEffect("object_mask", (coordinates: string) => {
|
||||
if (!scaledWidth || !scaledHeight || isLoading) {
|
||||
return false;
|
||||
}
|
||||
// convert box points string to points array
|
||||
const points = coordinates.split(",").map((p) => parseFloat(p));
|
||||
|
||||
const [x1, y1, w, h] = points;
|
||||
|
||||
// bottom center
|
||||
const centerX = x1 + w / 2;
|
||||
const bottomY = y1 + h;
|
||||
|
||||
const centerXAbs = centerX * scaledWidth;
|
||||
const bottomYAbs = bottomY * scaledHeight;
|
||||
|
||||
// padding and clamp
|
||||
const minPadding = 0.1 * w * scaledWidth;
|
||||
const maxPadding = 0.3 * w * scaledWidth;
|
||||
const padding = Math.min(
|
||||
Math.max(minPadding, 0.15 * w * scaledWidth),
|
||||
maxPadding,
|
||||
);
|
||||
|
||||
const top = Math.max(0, bottomYAbs - padding);
|
||||
const bottom = Math.min(scaledHeight, bottomYAbs + padding);
|
||||
const left = Math.max(0, centerXAbs - padding);
|
||||
const right = Math.min(scaledWidth, centerXAbs + padding);
|
||||
|
||||
const paddedBox = [
|
||||
[left, top],
|
||||
[right, top],
|
||||
[right, bottom],
|
||||
[left, bottom],
|
||||
];
|
||||
|
||||
setEditPane("object_mask");
|
||||
setActivePolygonIndex(undefined);
|
||||
handleNewPolygon("object_mask", paddedBox);
|
||||
return true;
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
document.title = "Mask and Zone Editor - Frigate";
|
||||
}, []);
|
||||
@ -399,6 +443,8 @@ export default function MasksAndZonesView({
|
||||
onCancel={handleCancel}
|
||||
onSave={handleSave}
|
||||
setActiveLine={setActiveLine}
|
||||
snapPoints={snapPoints}
|
||||
setSnapPoints={setSnapPoints}
|
||||
/>
|
||||
)}
|
||||
{editPane == "motion_mask" && (
|
||||
@ -412,6 +458,8 @@ export default function MasksAndZonesView({
|
||||
setIsLoading={setIsLoading}
|
||||
onCancel={handleCancel}
|
||||
onSave={handleSave}
|
||||
snapPoints={snapPoints}
|
||||
setSnapPoints={setSnapPoints}
|
||||
/>
|
||||
)}
|
||||
{editPane == "object_mask" && (
|
||||
@ -425,6 +473,8 @@ export default function MasksAndZonesView({
|
||||
setIsLoading={setIsLoading}
|
||||
onCancel={handleCancel}
|
||||
onSave={handleSave}
|
||||
snapPoints={snapPoints}
|
||||
setSnapPoints={setSnapPoints}
|
||||
/>
|
||||
)}
|
||||
{editPane === undefined && (
|
||||
@ -662,6 +712,7 @@ export default function MasksAndZonesView({
|
||||
hoveredPolygonIndex={hoveredPolygonIndex}
|
||||
selectedZoneMask={selectedZoneMask}
|
||||
activeLine={activeLine}
|
||||
snapPoints={true}
|
||||
/>
|
||||
) : (
|
||||
<Skeleton className="size-full" />
|
||||
|
||||
@ -484,7 +484,7 @@ export default function NotificationView({
|
||||
}
|
||||
}}
|
||||
>
|
||||
{`${registration != null ? "Unregister" : "Register"} for notifications on this device`}
|
||||
{`${registration != null ? "Unregister" : "Register"} this device`}
|
||||
</Button>
|
||||
{registration != null && registration.active && (
|
||||
<Button
|
||||
|
||||
Loading…
Reference in New Issue
Block a user