Merge branch 'blakeblackshear:dev' into dev-docs-i18n

This commit is contained in:
GuoQing Liu 2025-05-06 15:50:16 +08:00 committed by GitHub
commit 726f31c0ad
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
391 changed files with 12342 additions and 5355 deletions

View File

@ -39,14 +39,14 @@ jobs:
STABLE_TAG=${BASE}:stable
PULL_TAG=${BASE}:${BUILD_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
done
# stable tag
if [[ "${BUILD_TYPE}" == "stable" ]]; then
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
done
fi

View File

@ -37,9 +37,9 @@ opencv-python-headless == 4.11.0.*
opencv-contrib-python == 4.11.0.*
scipy == 1.14.*
# OpenVino & ONNX
openvino == 2024.4.*
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64'
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
openvino == 2025.0.*
onnxruntime-openvino == 1.21.0 ; platform_machine == 'x86_64'
onnxruntime == 1.21.0 ; platform_machine == 'aarch64'
# Embeddings
transformers == 4.45.*
# Generative AI

View File

@ -4,6 +4,11 @@
set -o errexit -o nounset -o pipefail
# opt out of openvino telemetry
if [ -e /usr/local/bin/opt_in_out ]; then
/usr/local/bin/opt_in_out --opt_out
fi
# Logs should be sent to stdout so that s6 can collect them
# Tell S6-Overlay not to restart this service

View File

@ -53,7 +53,7 @@ elif go2rtc_config["api"].get("origin") is None:
# Need to set default location for HA config
if go2rtc_config.get("hass") is None:
go2rtc_config["hass"] = {"config": "/config"}
go2rtc_config["hass"] = {"config": "/homeassistant"}
# we want to ensure that logs are easy to read
if go2rtc_config.get("log") is None:

View File

@ -26,7 +26,7 @@ COPY --from=rootfs / /
COPY docker/rockchip/COCO /COCO
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/librknnrt.so /usr/lib/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/

View File

@ -1,2 +1,2 @@
rknn-toolkit2 == 2.3.0
rknn-toolkit-lite2 == 2.3.0
rknn-toolkit2 == 2.3.2
rknn-toolkit-lite2 == 2.3.2

View File

@ -13,5 +13,5 @@ nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
nvidia-cufft-cu12==11.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.21.0; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -77,7 +77,7 @@ Changing the secret will invalidate current tokens.
Frigate can be configured to leverage features of common upstream authentication proxies such as Authelia, Authentik, oauth2_proxy, or traefik-forward-auth.
If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret.
If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication as there is no correspondence between users in Frigate's database and users authenticated via the proxy. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret.
Here is an example of how to disable Frigate's authentication and also ensure the requests come only from your known proxy.
@ -109,6 +109,14 @@ proxy:
Frigate supports both `admin` and `viewer` roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization.
A default role can be provided. Any value in the mapped `role` header will override the default.
```yaml
proxy:
...
default_role: viewer
```
#### Port Considerations
**Authenticated Port (8971)**

View File

@ -15,6 +15,17 @@ Many cameras support encoding options which greatly affect the live view experie
:::
## H.265 Cameras via Safari
Some cameras support h265 with different formats, but Safari only supports the annexb format. When using h265 camera streams for recording with devices that use the Safari browser, the `apple_compatibility` option should be used.
```yaml
cameras:
h265_cam: # <------ Doesn't matter what the camera is called
ffmpeg:
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
```
## MJPEG Cameras
Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg.

View File

@ -3,7 +3,7 @@ id: face_recognition
title: Face Recognition
---
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known person is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
## Model Requirements
@ -13,6 +13,12 @@ When running a Frigate+ model (or any custom model that natively detects faces)
When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.
:::note
Frigate needs to first detect a `person` before it can detect and recognize a face.
:::
### Face Recognition
Frigate has support for two face recognition model types:
@ -22,11 +28,13 @@ Frigate has support for two face recognition model types:
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
All of these features run locally on your system.
## Minimum System Requirements
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
The `large` model is optimized for accuracy, an integrated or discrete GPU is highly recommended.
The `large` model is optimized for accuracy, an integrated or discrete GPU is highly recommended. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
## Configuration
@ -39,7 +47,7 @@ face_recognition:
## Advanced Configuration
Fine-tune face recognition with these optional parameters:
Fine-tune face recognition with these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled` and `min_area`.
### Detection
@ -62,6 +70,13 @@ Fine-tune face recognition with these optional parameters:
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- Default: `True`.
## Usage
1. **Enable face recognition** in your configuration file and restart Frigate.
2. **Upload your face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI.
3. When Frigate detects and attempts to recognize a face, it will appear in the **Train** tab of the Face Library, along with its associated recognition confidence.
4. From the **Train** tab, you can **assign the face** to a new or existing person to improve recognition accuracy for the future.
## Creating a Robust Training Set
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
@ -133,6 +148,7 @@ No, using another face recognition service will interfere with Frigate's built i
### Does face recognition run on the recording stream?
Face recognition does not run on the recording stream, this would be suboptimal for many reasons:
1. The latency of accessing the recordings means the notifications would not include the names of recognized people because recognition would not complete until after.
2. The embedding models used run on a set image size, so larger images will be scaled down to match this anyway.
3. Motion clarity is much more important than extra pixels, over-compression and motion blur are much more detrimental to results than resolution.

View File

@ -9,7 +9,7 @@ Some presets of FFmpeg args are provided by default to make the configuration ea
It is highly recommended to use hwaccel presets in the config. These presets not only replace the longer args, but they also give Frigate hints of what hardware is available and allows Frigate to make other optimizations using the GPU such as when encoding the birdseye restream or when scaling a stream that has a size different than the native stream size.
See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on how to setup hwaccel for your GPU / iGPU.
See [the hwaccel docs](/configuration/hardware_acceleration_video.md) for more info on how to setup hwaccel for your GPU / iGPU.
| Preset | Usage | Other Notes |
| --------------------- | ------------------------------ | ----------------------------------------------------- |

View File

@ -0,0 +1,32 @@
---
id: hardware_acceleration_enrichments
title: Enrichments
---
# Enrichments
Some of Frigate's enrichments can use a discrete GPU for accelerated processing.
## Requirements
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU and configure the enrichment according to its specific documentation.
- **AMD**
- ROCm will automatically be detected and used for enrichments in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware for object detection.
:::note
A Google Coral is a TPU (Tensor Processing Unit), not a dedicated GPU (Graphics Processing Unit) and therefore does not provide any kind of acceleration for Frigate's enrichments.
:::

View File

@ -1,15 +1,15 @@
---
id: hardware_acceleration
title: Hardware Acceleration
id: hardware_acceleration_video
title: Video Decoding
---
# Hardware Acceleration
# Video Decoding
It is highly recommended to use a GPU for hardware acceleration in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
It is highly recommended to use a GPU for hardware acceleration video decoding in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
# Officially Supported
# Object Detection
## Raspberry Pi 3/4
@ -69,12 +69,12 @@ Or map in all the `/dev/video*` devices.
**Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------ | ----------------------------------- |
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------- | ------------------------------------ |
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
:::

View File

@ -3,17 +3,18 @@ id: license_plate_recognition
title: License Plate Recognition (LPR)
---
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles.
When a plate is recognized, the recognized name is:
When a plate is recognized, the details are:
- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object.
- Viewable in the Review Item Details pane in Review (sub labels).
- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates).
- Filterable through the More Filters menu in Explore.
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` tracked object.
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if known) and `plate`.
## Model Requirements
@ -23,7 +24,7 @@ Users without a model that detects license plates can still run LPR. Frigate use
:::note
In the default mode, Frigate's LPR needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera and have a zoomed-in view where a `car` will not be detected, you can still run LPR, but the configuration parameters will differ from the default mode. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section below.
In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle` before it can recognize a license plate. If you're using a dedicated LPR camera and have a zoomed-in view where a `car` or `motorcycle` will not be detected, you can still run LPR, but the configuration parameters will differ from the default mode. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section below.
:::
@ -50,13 +51,13 @@ cameras:
enabled: False
```
For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run.
For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car` or `motorcycle`, and that a car or motorcycle is actually being detected by Frigate. Otherwise, LPR will not run.
Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements.
## Advanced Configuration
Fine-tune the LPR feature using these optional parameters at the global level of your config. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`.
Fine-tune the LPR feature using these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`.
### Detection
@ -68,10 +69,10 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
- **`device`**: Device to use to run license plate recognition models.
- Default: `CPU`
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model.
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
- **`model_size`**: The size of the model used to detect text on plates.
- Default: `small`
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large does not improve _text recognition_, but it may improve _text detection_.
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_.
### Recognition
@ -86,7 +87,7 @@ Fine-tune the LPR feature using these optional parameters at the global level of
### Matching
- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` objects when a recognized plate matches a known value.
- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` and `motorcycle` objects when a recognized plate matches a known value.
- These labels appear in the UI, filters, and notifications.
- Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`.
- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
@ -184,7 +185,7 @@ cameras:
ffmpeg: ... # add your streams
detect:
enabled: True
fps: 5 # increase to 10 if vehicles move quickly across your frame. Higher than 15 is unnecessary and is not recommended.
fps: 5 # increase to 10 if vehicles move quickly across your frame. Higher than 10 is unnecessary and is not recommended.
min_initialized: 2
width: 1920
height: 1080
@ -216,7 +217,7 @@ With this setup:
- Snapshots will have license plate bounding boxes on them.
- The `frigate/events` MQTT topic will publish tracked object updates.
- Debug view will display `license_plate` bounding boxes.
- If you are using a Frigate+ model and want to submit images from your dedicated LPR camera for model training and fine-tuning, annotate both the `car` and the `license_plate` in the snapshots on the Frigate+ website, even if the car is barely visible.
- If you are using a Frigate+ model and want to submit images from your dedicated LPR camera for model training and fine-tuning, annotate both the `car` / `motorcycle` and the `license_plate` in the snapshots on the Frigate+ website, even if the car is barely visible.
### Using the Secondary LPR Pipeline (Without Frigate+)
@ -228,7 +229,7 @@ An example configuration for a dedicated LPR camera using the secondary pipeline
# LPR global configuration
lpr:
enabled: True
device: CPU # can also be GPU if available
device: CPU # can also be GPU if available and correct Docker image is used
detection_threshold: 0.7 # change if necessary
# Dedicated LPR camera configuration
@ -310,9 +311,9 @@ Recognized plates will show as object labels in the debug view and will appear i
If you are still having issues detecting plates, start with a basic configuration and see the debugging tips below.
### Can I run LPR without detecting `car` objects?
### Can I run LPR without detecting `car` or `motorcycle` objects?
In normal LPR mode, Frigate requires a `car` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above.
In normal LPR mode, Frigate requires a `car` or `motorcycle` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above.
### How can I improve detection accuracy?
@ -335,8 +336,8 @@ Use `match_distance` to allow small character mismatches. Alternatively, define
### How do I debug LPR issues?
- View MQTT messages for `frigate/events` to verify detected plates.
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`.
- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` label will change to the recognized plate when LPR is enabled and working.
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car` or `motorcycle`.
- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` or `motorcycle` label will change to the recognized plate when LPR is enabled and working.
- Adjust `detection_threshold` and `recognition_threshold` settings per the suggestions [above](#advanced-configuration).
- Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`). Ensure these images are readable and the text is clear.
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary.
@ -356,4 +357,12 @@ LPR's performance impact depends on your hardware. Ensure you have at least 4GB
The YOLOv9 license plate detector model will run (and the metric will appear) if you've enabled LPR but haven't defined `license_plate` as an object to track, either at the global or camera level.
If you are detecting `car` on cameras where you don't want to run LPR, make sure you disable LPR it at the camera level. And if you do want to run LPR on those cameras, make sure you define `license_plate` as an object to track.
If you are detecting `car` or `motorcycle` on cameras where you don't want to run LPR, make sure you disable LPR it at the camera level. And if you do want to run LPR on those cameras, make sure you define `license_plate` as an object to track.
### It looks like Frigate picked up my camera's timestamp as the license plate. How can I prevent this?
This could happen if cars or motorcycles travel close to your camera's timestamp. You could either move the timestamp through your camera's firmware, or apply a mask to it in Frigate.
If you are using a model that natively detects `license_plate`, add an _object mask_ of type `license_plate` and a _motion mask_ over your timestamp.
If you are using dedicated LPR camera mode, only a _motion mask_ over your timestamp is required.

View File

@ -152,7 +152,7 @@ Use this configuration for YOLO-based models. When no custom model path or URL i
```yaml
detectors:
hailo8l:
hailo:
type: hailo8l
device: PCIe
@ -185,7 +185,7 @@ For SSD-based models, provide either a model path or URL to your compiled SSD mo
```yaml
detectors:
hailo8l:
hailo:
type: hailo8l
device: PCIe
@ -209,7 +209,7 @@ The Hailo detector supports all YOLO models compiled for Hailo hardware that inc
```yaml
detectors:
hailo8l:
hailo:
type: hailo8l
device: PCIe
@ -484,7 +484,7 @@ frigate:
### Configuration Parameters
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration_video.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
@ -610,7 +610,7 @@ If the correct build is used for your GPU then the GPU will be detected and used
- **Nvidia**
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp6` Frigate image.
:::
@ -659,7 +659,7 @@ YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) model
:::tip
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate.
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate.
:::
@ -811,7 +811,23 @@ Hardware accelerated object detection is supported on the following SoCs:
- RK3576
- RK3588
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2.
:::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be:
```yaml
detectors:
rknn_0:
type: rknn
num_cores: 0
rknn_1:
type: rknn
num_cores: 0
```
:::
### Prerequisites
@ -844,14 +860,14 @@ detectors: # required
The inference time was determined on a rk3588 with 3 NPU cores.
| Model | Size in mb | Inference time in ms |
| ------------------- | ---------- | -------------------- |
| deci-fp16-yolonas_s | 24 | 25 |
| deci-fp16-yolonas_m | 62 | 35 |
| deci-fp16-yolonas_l | 81 | 45 |
| yolov9_tiny | 8 | 35 |
| yolox_nano | 3 | 16 |
| yolox_tiny | 6 | 20 |
| Model | Size in mb | Inference time in ms |
| --------------------- | ---------- | -------------------- |
| deci-fp16-yolonas_s | 24 | 25 |
| deci-fp16-yolonas_m | 62 | 35 |
| deci-fp16-yolonas_l | 81 | 45 |
| frigate-fp16-yolov9-t | 6 | 35 |
| rock-i8-yolox_nano | 3 | 14 |
| rock-i8_yolox_tiny | 6 | 18 |
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
@ -887,10 +903,13 @@ The pre-trained YOLO-NAS weights from DeciAI are subject to their license and ca
model: # required
# name of model (will be automatically downloaded) or path to your own .rknn model file
# possible values are:
# - yolov9-t
# - yolov9-s
# - frigate-fp16-yolov9-t
# - frigate-fp16-yolov9-s
# - frigate-fp16-yolov9-m
# - frigate-fp16-yolov9-c
# - frigate-fp16-yolov9-e
# your yolo_model.rknn
path: /config/model_cache/rknn_cache/yolov9-t.rknn
path: frigate-fp16-yolov9-t
model_type: yolo-generic
width: 320
height: 320
@ -905,10 +924,12 @@ model: # required
model: # required
# name of model (will be automatically downloaded) or path to your own .rknn model file
# possible values are:
# - yolox_nano
# - yolox_tiny
# - rock-i8-yolox_nano
# - rock-i8-yolox_tiny
# - rock-fp16-yolox_nano
# - rock-fp16-yolox_tiny
# your yolox_model.rknn
path: yolox_tiny
path: rock-i8-yolox_nano
model_type: yolox
width: 416
height: 416
@ -948,7 +969,7 @@ Explanation of the paramters:
- `soc`: the SoC this model was build for (e.g. "rk3588")
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.2_EN.pdf).
# Models

View File

@ -174,6 +174,10 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
:::
## Apple Compatibility with H.265 Streams
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
## Syncing Recordings With Disk
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.

View File

@ -78,16 +78,19 @@ proxy:
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
# is disabled.
# NOTE: Many authentication proxies pass a header downstream with the authenticated
# user name. Not all values are supported. It must be a whitelisted header.
# user name and role. Not all values are supported. It must be a whitelisted header.
# See the docs for more info.
header_map:
user: x-forwarded-user
role: x-forwarded-role
# Optional: Url for logging out a user. This sets the location of the logout url in
# the UI.
logout_url: /api/logout
# Optional: Auth secret that is checked against the X-Proxy-Secret header sent from
# the proxy. If not set, all requests are trusted regardless of origin.
auth_secret: None
# Optional: The default role to use for proxy auth. Must be "admin" or "viewer"
default_role: viewer
# Optional: Authentication configuration
auth:
@ -543,9 +546,9 @@ semantic_search:
model_size: "small"
# Optional: Configuration for face recognition capability
# NOTE: Can (enabled, min_area) be overridden at the camera level
# NOTE: enabled, min_area can be overridden at the camera level
face_recognition:
# Optional: Enable semantic search (default: shown below)
# Optional: Enable face recognition (default: shown below)
enabled: False
# Optional: Minimum face distance score required to mark as a potential match (default: shown below)
unknown_score: 0.8
@ -560,6 +563,8 @@ face_recognition:
save_attempts: 100
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
blur_confidence_filter: True
# Optional: Set the model size used face recognition. (default: shown below)
model_size: small
# Optional: Configuration for license plate recognition capability
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level

View File

@ -90,19 +90,7 @@ semantic_search:
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
- **AMD**
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
:::

View File

@ -84,7 +84,13 @@ Only car objects can trigger the `front_yard_street` zone and only person can tr
### Zone Loitering
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time before the object will be considered in the zone.
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.
:::note
When using loitering zones, a review item will remain active until the object leaves. Loitering zones are only meant to be used in areas where loitering is not expected behavior.
:::
```yaml
cameras:

View File

@ -91,4 +91,4 @@ The `CODEOWNERS` file should be updated to include the `docker/board` along with
# Docs
At a minimum the `installation`, `object_detectors`, `hardware_acceleration`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board.
At a minimum the `installation`, `object_detectors`, `hardware_acceleration_video`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board.

View File

@ -38,6 +38,7 @@ Frigate supports multiple different detectors that work on different types of ha
**Most Hardware**
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices offering a wide range of compatibility with devices.
- [Supports many model architectures](../../configuration/object_detectors#configuration)
- Runs best with tiny or small size models
@ -73,10 +74,10 @@ Frigate supports multiple different detectors that work on different types of ha
### Hailo-8
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided.
**Default Model Configuration:**
- **Hailo-8L:** Default model is **YOLOv6n**.
- **Hailo-8:** Default model is **YOLOv6n**.
@ -90,6 +91,7 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
### Google Coral TPU
Frigate supports both the USB and M.2 versions of the Google Coral.
- The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
- The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
@ -107,20 +109,17 @@ More information is available [in the detector docs](/configuration/object_detec
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
| -------------------- | -------------------------- | ------------------------- | ------------------------- | -------------------------------------- |
| Intel i3 6100T | 15 - 35 ms | | | Can only run one detector instance |
| Intel i5 6500 | ~ 15 ms | | | |
| Intel i5 7200u | 15 - 25 ms | | | |
| Intel i5 7500 | ~ 15 ms | | | |
| Intel i3 8100 | ~ 15 ms | | | |
| Intel i5 1135G7 | 10 - 15 ms | | | |
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel i7 12650H | ~ 15 ms | 320: ~ 20 ms 640: ~ 42 ms | 336: 50 ms | |
| Intel N100 | ~ 15 ms | 320: ~ 20 ms | | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | |
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
| -------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- |
| Intel HD 530 | 15 - 35 ms | | | Can only run one detector instance |
| Intel HD 620 | 15 - 25 ms | 320: ~ 35 ms | | |
| Intel HD 630 | ~ 15 ms | 320: ~ 30 ms | | |
| Intel UHD 730 | ~ 10 ms | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel UHD 770 | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel N100 | ~ 15 ms | 320: ~ 20 ms | | |
| Intel Iris XE | ~ 10 ms | 320: ~ 18 ms 640: ~ 50 ms | | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | |
### TensorRT - Nvidia GPU
@ -144,15 +143,15 @@ Inference speeds will vary greatly depending on the GPU and the model used.
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| --------------- | --------------------- | ------------------------- |
| AMD 780M | ~ 14 ms | 320: ~ 30 ms 640: ~ 60 ms |
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| -------- | --------------------- | ------------------------- |
| AMD 780M | ~ 14 ms | 320: ~ 30 ms 640: ~ 60 ms |
## Community Supported Detectors
### Nvidia Jetson
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
@ -166,11 +165,10 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
- RK3576
- RK3588
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | YOLOx Inference Time |
| --------------- | --------------------- | --------------------------- | ------------------------- |
| rk3588 3 cores | ~ 35 ms | small: ~ 20 ms med: ~ 30 ms | nano: 18 ms tiny: 20 ms |
| rk3566 1 core | | small: ~ 96 ms | |
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | YOLOx Inference Time |
| -------------- | --------------------- | --------------------------- | ----------------------- |
| rk3588 3 cores | tiny: ~ 35 ms | small: ~ 20 ms med: ~ 30 ms | nano: 14 ms tiny: 18 ms |
| rk3566 1 core | | small: ~ 96 ms | |
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.

View File

@ -183,7 +183,7 @@ or add these options to your `docker run` command:
#### Configuration
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration#rockchip-platform).
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform).
## Docker
@ -316,7 +316,8 @@ If you choose to run Frigate via LXC in Proxmox the setup can be complex so be p
:::
Suggestions include:
Suggestions include:
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`
@ -407,7 +408,7 @@ mkdir -p /share/share_vol2/frigate/media
# Also replace the time zone value for 'TZ' in the sample command.
# Example command will create a docker container that uses at most 2 CPUs and 4G RAM.
# You may need to add "--env=LIBVA_DRIVER_NAME=i965 \" to the following docker run command if you
# have certain CPU (e.g., J4125). See https://docs.frigate.video/configuration/hardware_acceleration.
# have certain CPU (e.g., J4125). See https://docs.frigate.video/configuration/hardware_acceleration_video.
docker run \
--name=frigate \
--shm-size=256m \

View File

@ -162,7 +162,7 @@ FFmpeg arguments for other types of cameras can be found [here](../configuration
### Step 3: Configure hardware acceleration (recommended)
Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware.
Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) config reference for examples applicable to your hardware.
Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md):
@ -303,6 +303,7 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
### Step 7: Complete config
At this point you have a complete config with basic functionality.
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
- View [full config reference](../configuration/reference.md) for a complete list of configuration options.

View File

@ -104,7 +104,9 @@ Message published for each changed tracked object. The first message is publishe
### `frigate/tracked_object_update`
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
Message published for updates to tracked object metadata, for example:
#### Generative AI Description Update
```json
{
@ -114,6 +116,33 @@ Message published for updates to tracked object metadata, for example when GenAI
}
```
#### Face Recognition Update
```json
{
"type": "face",
"id": "1607123955.475377-mxklsc",
"name": "John",
"score": 0.95,
"camera": "front_door_cam",
"timestamp": 1607123958.748393,
}
```
#### License Plate Recognition Update
```json
{
"type": "lpr",
"id": "1607123955.475377-mxklsc",
"name": "John's Car",
"plate": "123ABC",
"score": 0.95,
"camera": "driveway_cam",
"timestamp": 1607123958.748393,
}
```
### `frigate/reviews`
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.

View File

@ -34,7 +34,7 @@ Frigate generally [recommends cameras with configurable sub streams](/frigate/ha
To do this efficiently the following setup is required:
1. A GPU or iGPU must be available to do the scaling.
2. [ffmpeg presets for hwaccel](/configuration/hardware_acceleration.md) must be used
2. [ffmpeg presets for hwaccel](/configuration/hardware_acceleration_video.md) must be used
3. Set the desired detection resolution for `detect -> width` and `detect -> height`.
When this is done correctly, the GPU will do the decoding and scaling which will result in a small increase in CPU usage but with better results.

7818
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -17,10 +17,10 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.6.3",
"@docusaurus/preset-classic": "^3.6.3",
"@docusaurus/theme-mermaid": "^3.6.3",
"@docusaurus/core": "^3.7.0",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@docusaurus/preset-classic": "^3.7.0",
"@docusaurus/theme-mermaid": "^3.6.3",
"@mdx-js/react": "^3.1.0",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.3.1",

View File

@ -59,10 +59,13 @@ const sidebars: SidebarsConfig = {
"configuration/objects",
"configuration/stationary_objects",
],
"Hardware Acceleration": [
"configuration/hardware_acceleration_video",
"configuration/hardware_acceleration_enrichments",
],
"Extra Configuration": [
"configuration/authentication",
"configuration/notifications",
"configuration/hardware_acceleration",
"configuration/ffmpeg_presets",
"configuration/pwa",
"configuration/tls",

View File

@ -74,7 +74,7 @@ def go2rtc_streams():
)
stream_data = r.json()
for data in stream_data.values():
for producer in data.get("producers", []):
for producer in data.get("producers") or []:
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
return JSONResponse(content=stream_data)

View File

@ -261,14 +261,14 @@ def auth(request: Request):
role_header = proxy_config.header_map.role
role = (
request.headers.get(role_header, default="viewer")
request.headers.get(role_header, default=proxy_config.default_role)
if role_header
else "viewer"
else proxy_config.default_role
)
# if comma-separated with "admin", use "admin", else "viewer"
# if comma-separated with "admin", use "admin", else use default role
success_response.headers["remote-role"] = (
"admin" if role and "admin" in role else "viewer"
"admin" if role and "admin" in role else proxy_config.default_role
)
return success_response

View File

@ -14,6 +14,7 @@ from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.request.classification_body import RenameFaceBody
from frigate.api.defs.tags import Tags
from frigate.config.camera import DetectConfig
from frigate.const import FACE_DIR
@ -260,6 +261,35 @@ def deregister_faces(request: Request, name: str, body: dict = None):
)
@router.put("/faces/{old_name}/rename", dependencies=[Depends(require_role(["admin"]))])
def rename_face(request: Request, old_name: str, body: RenameFaceBody):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
context: EmbeddingsContext = request.app.embeddings
try:
context.rename_face(old_name, body.new_name)
return JSONResponse(
content={
"success": True,
"message": f"Successfully renamed face to {body.new_name}.",
},
status_code=200,
)
except ValueError as e:
logger.error(e)
return JSONResponse(
status_code=400,
content={
"message": "Error renaming face. Check Frigate logs.",
"success": False,
},
)
@router.put("/lpr/reprocess")
def reprocess_license_plate(request: Request, event_id: str):
if not request.app.frigate_config.lpr.enabled:

View File

@ -0,0 +1,5 @@
from pydantic import BaseModel
class RenameFaceBody(BaseModel):
new_name: str

View File

@ -58,13 +58,9 @@ async def review(
)
clauses = [
(
(ReviewSegment.start_time > after)
& (
(ReviewSegment.end_time.is_null(True))
| (ReviewSegment.end_time < before)
)
)
(ReviewSegment.start_time > after)
& (ReviewSegment.start_time < before)
& ((ReviewSegment.end_time.is_null(True)) | (ReviewSegment.end_time < before))
]
if cameras != "all":

View File

@ -135,6 +135,7 @@ class Dispatcher:
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": event.data["description"],
"camera": event.camera,
}
),
)

View File

@ -39,9 +39,6 @@ class EventMetadataSubscriber(Subscriber):
def __init__(self, topic: EventMetadataTypeEnum) -> None:
super().__init__(topic.value)
def check_for_update(self, timeout: float = 1) -> tuple | None:
return super().check_for_update(timeout)
def _return_object(self, topic: str, payload: tuple) -> tuple:
if payload is None:
return (None, None)

View File

@ -6,6 +6,8 @@ from typing import Optional
import zmq
from frigate.const import FAST_QUEUE_TIMEOUT
SOCKET_PUB = "ipc:///tmp/cache/proxy_pub"
SOCKET_SUB = "ipc:///tmp/cache/proxy_sub"
@ -77,7 +79,9 @@ class Subscriber:
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
self.socket.connect(SOCKET_SUB)
def check_for_update(self, timeout: float = 1) -> Optional[tuple[str, any]]:
def check_for_update(
self, timeout: float = FAST_QUEUE_TIMEOUT
) -> Optional[tuple[str, any]]:
"""Returns message or None if no update."""
try:
has_update, _, _ = zmq.select([self.socket], [], [], timeout)

View File

@ -63,9 +63,9 @@ class PtzAutotrackConfig(FrigateBaseModel):
else:
raise ValueError("Invalid type for movement_weights")
if len(weights) != 5:
if len(weights) != 6:
raise ValueError(
"movement_weights must have exactly 5 floats, remove this line from your config and run autotracking calibration"
"movement_weights must have exactly 6 floats, remove this line from your config and run autotracking calibration"
)
return weights

View File

@ -94,7 +94,7 @@ class CameraFaceRecognitionConfig(FrigateBaseModel):
default=500, title="Min area of face box to consider running face recognition."
)
model_config = ConfigDict(extra="ignore", protected_namespaces=())
model_config = ConfigDict(extra="forbid", protected_namespaces=())
class LicensePlateRecognitionConfig(FrigateBaseModel):
@ -168,4 +168,4 @@ class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
le=10,
)
model_config = ConfigDict(extra="ignore", protected_namespaces=())
model_config = ConfigDict(extra="forbid", protected_namespaces=())

View File

@ -472,8 +472,24 @@ class FrigateConfig(FrigateBaseModel):
)
for name, camera in self.cameras.items():
modified_global_config = global_config.copy()
# only populate some fields down to the camera level for specific keys
allowed_fields_map = {
"face_recognition": ["enabled", "min_area"],
"lpr": ["enabled", "expire_time", "min_area", "enhancement"],
}
for section in allowed_fields_map:
if section in modified_global_config:
modified_global_config[section] = {
k: v
for k, v in modified_global_config[section].items()
if k in allowed_fields_map[section]
}
merged_config = deep_merge(
camera.model_dump(exclude_unset=True), global_config
camera.model_dump(exclude_unset=True), modified_global_config
)
camera_config: CameraConfig = CameraConfig.model_validate(
{"name": name, **merged_config}

View File

@ -30,3 +30,6 @@ class ProxyConfig(FrigateBaseModel):
default=None,
title="Secret value for proxy authentication.",
)
default_role: Optional[str] = Field(
default="viewer", title="Default role for proxy users."
)

View File

@ -129,3 +129,7 @@ AUTOTRACKING_ZOOM_EDGE_THRESHOLD = 0.05
JWT_SECRET_ENV_VAR = "FRIGATE_JWT_SECRET"
PASSWORD_HASH_ALGORITHM = "pbkdf2_sha256"
# Queues
FAST_QUEUE_TIMEOUT = 0.00001 # seconds

View File

@ -2,6 +2,7 @@
import base64
import datetime
import json
import logging
import math
import os
@ -23,6 +24,7 @@ from frigate.comms.event_metadata_updater import (
)
from frigate.const import CLIPS_DIR
from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond
from frigate.util.image import area
@ -1221,7 +1223,7 @@ class LicensePlateProcessingMixin:
license_plate_area = (license_plate[2] - license_plate[0]) * (
license_plate[3] - license_plate[1]
)
if license_plate_area < self.lpr_config.min_area:
if license_plate_area < self.config.cameras[camera].lpr.min_area:
logger.debug(f"{camera}: License plate area below minimum threshold.")
return
@ -1315,10 +1317,7 @@ class LicensePlateProcessingMixin:
# check that license plate is valid
# double the value because we've doubled the size of the car
if (
license_plate_area
< self.config.cameras[obj_data["camera"]].lpr.min_area * 2
):
if license_plate_area < self.config.cameras[camera].lpr.min_area * 2:
logger.debug(f"{camera}: License plate is less than min_area")
return
@ -1362,10 +1361,10 @@ class LicensePlateProcessingMixin:
if (
not license_plate_box
or area(license_plate_box)
< self.config.cameras[obj_data["camera"]].lpr.min_area
< self.config.cameras[camera].lpr.min_area
):
logger.debug(
f"{camera}: Area for license plate box {area(license_plate_box)} is less than min_area {self.config.cameras[obj_data['camera']].lpr.min_area}"
f"{camera}: Area for license plate box {area(license_plate_box)} is less than min_area {self.config.cameras[camera].lpr.min_area}"
)
return
@ -1513,6 +1512,20 @@ class LicensePlateProcessingMixin:
)
# always publish to recognized_license_plate field
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.lpr,
"name": sub_label,
"plate": top_plate,
"score": avg_confidence,
"id": id,
"camera": camera,
"timestamp": start,
}
),
)
self.sub_label_publisher.publish(
EventMetadataTypeEnum.recognized_license_plate,
(id, top_plate, avg_confidence),

View File

@ -9,6 +9,7 @@ from peewee import DoesNotExist
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.event_metadata_updater import EventMetadataPublisher
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.data_processing.common.license_plate.mixin import (
WRITE_DEBUG_IMAGES,
@ -31,11 +32,13 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
def __init__(
self,
config: FrigateConfig,
requestor: InterProcessRequestor,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics,
model_runner: LicensePlateModelRunner,
detected_license_plates: dict[str, dict[str, any]],
):
self.requestor = requestor
self.detected_license_plates = detected_license_plates
self.model_runner = model_runner
self.lpr_config = config.lpr

View File

@ -2,6 +2,7 @@
import base64
import datetime
import json
import logging
import os
import random
@ -17,6 +18,7 @@ from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.const import FACE_DIR, MODEL_CACHE_DIR
from frigate.data_processing.common.face.model import (
@ -24,6 +26,7 @@ from frigate.data_processing.common.face.model import (
FaceNetRecognizer,
FaceRecognizer,
)
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond
from frigate.util.image import area
@ -42,11 +45,13 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
def __init__(
self,
config: FrigateConfig,
requestor: InterProcessRequestor,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics,
):
super().__init__(config, metrics)
self.face_config = config.face_recognition
self.requestor = requestor
self.sub_label_publisher = sub_label_publisher
self.face_detector: cv2.FaceDetectorYN = None
self.requires_face_detection = "face" not in self.config.objects.all_objects
@ -157,8 +162,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
"""Look for faces in image."""
self.metrics.face_rec_fps.value = self.faces_per_second.eps()
camera = obj_data["camera"]
if not self.config.cameras[obj_data["camera"]].face_recognition.enabled:
if not self.config.cameras[camera].face_recognition.enabled:
return
start = datetime.datetime.now().timestamp()
@ -245,7 +251,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
if (
not face_box
or area(face_box)
< self.config.cameras[obj_data["camera"]].face_recognition.min_area
< self.config.cameras[camera].face_recognition.min_area
):
logger.debug(f"Invalid face box {face}")
return
@ -286,6 +292,20 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.person_face_history[id]
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.face,
"name": weighted_sub_label,
"score": weighted_score,
"id": id,
"camera": camera,
"timestamp": start,
}
),
)
if weighted_score >= self.face_config.recognition_threshold:
self.sub_label_publisher.publish(
EventMetadataTypeEnum.sub_label,

View File

@ -5,6 +5,7 @@ import logging
import numpy as np
from frigate.comms.event_metadata_updater import EventMetadataPublisher
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig
from frigate.data_processing.common.license_plate.mixin import (
LicensePlateProcessingMixin,
@ -23,11 +24,13 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
def __init__(
self,
config: FrigateConfig,
requestor: InterProcessRequestor,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics,
model_runner: LicensePlateModelRunner,
detected_license_plates: dict[str, dict[str, any]],
):
self.requestor = requestor
self.detected_license_plates = detected_license_plates
self.model_runner = model_runner
self.lpr_config = config.lpr

View File

@ -19,7 +19,11 @@ DETECTOR_KEY = "rknn"
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
supported_models = {
ModelTypeEnum.yologeneric: "^frigate-fp16-yolov9-[cemst]$",
ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$",
ModelTypeEnum.yolox: "^rock-(fp16|i8)-yolox_(nano|tiny)$",
}
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "rknn_cache/")
@ -115,7 +119,7 @@ class Rknn(DetectionApi):
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + f"-{soc}-v2.3.0-1.rknn"
model_props["filename"] = model_path + f"-{soc}-v2.3.2-1.rknn"
model_props["path"] = model_cache_dir + model_props["filename"]
@ -136,7 +140,7 @@ class Rknn(DetectionApi):
os.mkdir(model_cache_dir)
urllib.request.urlretrieve(
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.0/{filename}",
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.2/{filename}",
model_cache_dir + filename,
)

View File

@ -5,11 +5,13 @@ import json
import logging
import multiprocessing as mp
import os
import re
import signal
import threading
from types import FrameType
from typing import Optional, Union
from pathvalidate import ValidationError, sanitize_filename
from setproctitle import setproctitle
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
@ -240,6 +242,42 @@ class EmbeddingsContext:
EmbeddingsRequestEnum.clear_face_classifier.value, None
)
def rename_face(self, old_name: str, new_name: str) -> None:
valid_name_pattern = r"^[a-zA-Z0-9\s_-]{1,50}$"
try:
sanitized_old_name = sanitize_filename(old_name, replacement_text="_")
sanitized_new_name = sanitize_filename(new_name, replacement_text="_")
except ValidationError as e:
raise ValueError(f"Invalid face name: {str(e)}")
if not re.match(valid_name_pattern, old_name):
raise ValueError(f"Invalid old face name: {old_name}")
if not re.match(valid_name_pattern, new_name):
raise ValueError(f"Invalid new face name: {new_name}")
if sanitized_old_name != old_name:
raise ValueError(f"Old face name contains invalid characters: {old_name}")
if sanitized_new_name != new_name:
raise ValueError(f"New face name contains invalid characters: {new_name}")
old_path = os.path.normpath(os.path.join(FACE_DIR, old_name))
new_path = os.path.normpath(os.path.join(FACE_DIR, new_name))
# Prevent path traversal
if not old_path.startswith(
os.path.normpath(FACE_DIR)
) or not new_path.startswith(os.path.normpath(FACE_DIR)):
raise ValueError("Invalid path detected")
if not os.path.exists(old_path):
raise ValueError(f"Face {old_name} not found.")
os.rename(old_path, new_path)
self.requestor.send_data(
EmbeddingsRequestEnum.clear_face_classifier.value, None
)
def update_description(self, event_id: str, description: str) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.embed_description.value,

View File

@ -120,7 +120,7 @@ class EmbeddingMaintainer(threading.Thread):
if self.config.face_recognition.enabled:
self.realtime_processors.append(
FaceRealTimeProcessor(
self.config, self.event_metadata_publisher, metrics
self.config, self.requestor, self.event_metadata_publisher, metrics
)
)
@ -135,6 +135,7 @@ class EmbeddingMaintainer(threading.Thread):
self.realtime_processors.append(
LicensePlateRealTimeProcessor(
self.config,
self.requestor,
self.event_metadata_publisher,
metrics,
lpr_model_runner,
@ -149,6 +150,7 @@ class EmbeddingMaintainer(threading.Thread):
self.post_processors.append(
LicensePlatePostProcessor(
self.config,
self.requestor,
self.event_metadata_publisher,
metrics,
lpr_model_runner,
@ -229,7 +231,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_updates(self) -> None:
"""Process event updates"""
update = self.event_subscriber.check_for_update(timeout=0.01)
update = self.event_subscriber.check_for_update()
if update is None:
return
@ -322,7 +324,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_finalized(self) -> None:
"""Process the end of an event."""
while True:
ended = self.event_end_subscriber.check_for_update(timeout=0.01)
ended = self.event_end_subscriber.check_for_update()
if ended == None:
break
@ -418,7 +420,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_recordings_updates(self) -> None:
"""Process recordings updates."""
while True:
recordings_data = self.recordings_subscriber.check_for_update(timeout=0.01)
recordings_data = self.recordings_subscriber.check_for_update()
if recordings_data == None:
break
@ -435,7 +437,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_event_metadata(self):
# Check for regenerate description requests
(topic, payload) = self.event_metadata_subscriber.check_for_update(timeout=0.01)
(topic, payload) = self.event_metadata_subscriber.check_for_update()
if topic is None:
return
@ -449,7 +451,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_dedicated_lpr(self) -> None:
"""Process event updates"""
(topic, data) = self.detection_subscriber.check_for_update(timeout=0.01)
(topic, data) = self.detection_subscriber.check_for_update()
if topic is None:
return
@ -583,6 +585,7 @@ class EmbeddingMaintainer(threading.Thread):
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
"camera": event.camera,
},
)

View File

@ -36,11 +36,12 @@ class JinaV1TextEmbedding(BaseEmbedding):
requestor: InterProcessRequestor,
device: str = "AUTO",
):
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
super().__init__(
model_name="jinaai/jina-clip-v1",
model_file="text_model_fp16.onnx",
download_urls={
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
"text_model_fp16.onnx": f"{HF_ENDPOINT}/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
)
self.tokenizer_file = "tokenizer"
@ -156,12 +157,13 @@ class JinaV1ImageEmbedding(BaseEmbedding):
if model_size == "large"
else "vision_model_quantized.onnx"
)
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
super().__init__(
model_name="jinaai/jina-clip-v1",
model_file=model_file,
download_urls={
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
model_file: f"{HF_ENDPOINT}/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
"preprocessor_config.json": f"{HF_ENDPOINT}/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
},
)
self.requestor = requestor

View File

@ -34,12 +34,13 @@ class JinaV2Embedding(BaseEmbedding):
model_file = (
"model_fp16.onnx" if model_size == "large" else "model_quantized.onnx"
)
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
super().__init__(
model_name="jinaai/jina-clip-v2",
model_file=model_file,
download_urls={
model_file: f"https://huggingface.co/jinaai/jina-clip-v2/resolve/main/onnx/{model_file}",
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v2/resolve/main/preprocessor_config.json",
model_file: f"{HF_ENDPOINT}/jinaai/jina-clip-v2/resolve/main/onnx/{model_file}",
"preprocessor_config.json": f"{HF_ENDPOINT}/jinaai/jina-clip-v2/resolve/main/preprocessor_config.json",
},
)
self.tokenizer_file = "tokenizer"

View File

@ -75,7 +75,7 @@ class EventProcessor(threading.Thread):
).execute()
while not self.stop_event.is_set():
update = self.event_receiver.check_for_update()
update = self.event_receiver.check_for_update(timeout=1)
if update == None:
continue

View File

@ -6,6 +6,8 @@ import queue
import signal
import threading
from abc import ABC, abstractmethod
from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent
import numpy as np
from setproctitle import setproctitle
@ -15,6 +17,7 @@ from frigate.detectors import create_detector
from frigate.detectors.detector_config import (
BaseDetectorConfig,
InputDTypeEnum,
ModelConfig,
)
from frigate.util.builtin import EventsPerSecond, load_labels
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
@ -85,11 +88,11 @@ class LocalObjectDetector(ObjectDetector):
def run_detector(
name: str,
detection_queue: mp.Queue,
out_events: dict[str, mp.Event],
avg_speed,
start,
detector_config,
detection_queue: Queue,
out_events: dict[str, MpEvent],
avg_speed: Value,
start: Value,
detector_config: BaseDetectorConfig,
):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
@ -97,7 +100,7 @@ def run_detector(
setproctitle(f"frigate.detector.{name}")
listen()
stop_event = mp.Event()
stop_event: MpEvent = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
@ -145,17 +148,17 @@ def run_detector(
class ObjectDetectProcess:
def __init__(
self,
name,
detection_queue,
out_events,
detector_config,
name: str,
detection_queue: Queue,
out_events: dict[str, MpEvent],
detector_config: BaseDetectorConfig,
):
self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
self.avg_inference_speed = mp.Value("d", 0.01)
self.detection_start = mp.Value("d", 0.0)
self.detect_process = None
self.avg_inference_speed = Value("d", 0.01)
self.detection_start = Value("d", 0.0)
self.detect_process: util.Process | None = None
self.detector_config = detector_config
self.start_or_restart()
@ -193,7 +196,15 @@ class ObjectDetectProcess:
class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_config, stop_event):
def __init__(
self,
name: str,
labels: dict[int, str],
detection_queue: Queue,
event: MpEvent,
model_config: ModelConfig,
stop_event: MpEvent,
):
self.labels = labels
self.name = name
self.fps = EventsPerSecond()

View File

@ -206,6 +206,7 @@ class PtzAutoTracker:
self.calibrating: dict[str, object] = {}
self.intercept: dict[str, object] = {}
self.move_coefficients: dict[str, object] = {}
self.zoom_time: dict[str, float] = {}
self.zoom_factor: dict[str, object] = {}
# if cam is set to autotrack, onvif should be set up
@ -272,7 +273,12 @@ class PtzAutoTracker:
move_status_supported = self.onvif.get_service_capabilities(camera)
if move_status_supported is None or move_status_supported.lower() != "true":
if not (
isinstance(move_status_supported, bool) and move_status_supported
) and not (
isinstance(move_status_supported, str)
and move_status_supported.lower() == "true"
):
logger.warning(
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
)
@ -292,7 +298,7 @@ class PtzAutoTracker:
self.move_threads[camera].start()
if camera_config.onvif.autotracking.movement_weights:
if len(camera_config.onvif.autotracking.movement_weights) == 5:
if len(camera_config.onvif.autotracking.movement_weights) == 6:
camera_config.onvif.autotracking.movement_weights = [
float(val)
for val in camera_config.onvif.autotracking.movement_weights
@ -311,7 +317,10 @@ class PtzAutoTracker:
camera_config.onvif.autotracking.movement_weights[2]
)
self.move_coefficients[camera] = (
camera_config.onvif.autotracking.movement_weights[3:]
camera_config.onvif.autotracking.movement_weights[3:5]
)
self.zoom_time[camera] = (
camera_config.onvif.autotracking.movement_weights[5]
)
else:
camera_config.onvif.autotracking.enabled = False
@ -360,6 +369,7 @@ class PtzAutoTracker:
!= ZoomingModeEnum.disabled
):
logger.info(f"Calibration for {camera} in progress: 0% complete")
self.zoom_time[camera] = 0
for i in range(2):
# absolute move to 0 - fully zoomed out
@ -403,6 +413,7 @@ class PtzAutoTracker:
zoom_out_values.append(self.ptz_metrics[camera].zoom_level.value)
zoom_start_time = time.time()
# relative move to 0.01
self.onvif._move_relative(
camera,
@ -415,13 +426,45 @@ class PtzAutoTracker:
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
zoom_stop_time = time.time()
full_relative_start_time = time.time()
self.onvif._move_relative(
camera,
-1,
-1,
-1e-2,
1,
)
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
full_relative_stop_time = time.time()
self.onvif._move_relative(
camera,
1,
1,
1e-2,
1,
)
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
self.zoom_time[camera] = (
full_relative_stop_time - full_relative_start_time
) - (zoom_stop_time - zoom_start_time)
zoom_in_values.append(self.ptz_metrics[camera].zoom_level.value)
self.ptz_metrics[camera].max_zoom.value = max(zoom_in_values)
self.ptz_metrics[camera].min_zoom.value = min(zoom_out_values)
logger.debug(
f"{camera}: Calibration values: max zoom: {self.ptz_metrics[camera].max_zoom.value}, min zoom: {self.ptz_metrics[camera].min_zoom.value}"
f"{camera}: Calibration values: max zoom: {self.ptz_metrics[camera].max_zoom.value}, min zoom: {self.ptz_metrics[camera].min_zoom.value}, zoom time: {self.zoom_time[camera]}"
)
else:
@ -537,6 +580,7 @@ class PtzAutoTracker:
self.ptz_metrics[camera].max_zoom.value,
self.intercept[camera],
*self.move_coefficients[camera],
self.zoom_time[camera],
]
)
@ -1061,6 +1105,7 @@ class PtzAutoTracker:
average_velocity = np.zeros((4,))
predicted_box = obj.obj_data["box"]
zoom_predicted_box = obj.obj_data["box"]
centroid_x = obj.obj_data["centroid"][0]
centroid_y = obj.obj_data["centroid"][1]
@ -1069,20 +1114,20 @@ class PtzAutoTracker:
pan = ((centroid_x / camera_width) - 0.5) * 2
tilt = (0.5 - (centroid_y / camera_height)) * 2
_, average_velocity = (
self._get_valid_velocity(camera, obj)
if "velocity" not in self.tracked_object_metrics[camera]
else (
self.tracked_object_metrics[camera]["valid_velocity"],
self.tracked_object_metrics[camera]["velocity"],
)
)
if (
camera_config.onvif.autotracking.movement_weights
): # use estimates if we have available coefficients
predicted_movement_time = self._predict_movement_time(camera, pan, tilt)
_, average_velocity = (
self._get_valid_velocity(camera, obj)
if "velocity" not in self.tracked_object_metrics[camera]
else (
self.tracked_object_metrics[camera]["valid_velocity"],
self.tracked_object_metrics[camera]["velocity"],
)
)
if np.any(average_velocity):
# this box could exceed the frame boundaries if velocity is high
# but we'll handle that in _enqueue_move() as two separate moves
@ -1111,6 +1156,34 @@ class PtzAutoTracker:
camera, obj, predicted_box, predicted_movement_time, debug_zoom=True
)
if (
camera_config.onvif.autotracking.movement_weights
and camera_config.onvif.autotracking.zooming == ZoomingModeEnum.relative
and zoom != 0
):
zoom_predicted_movement_time = 0
if np.any(average_velocity):
zoom_predicted_movement_time = abs(zoom) * self.zoom_time[camera]
zoom_predicted_box = (
predicted_box
+ camera_fps * zoom_predicted_movement_time * average_velocity
)
zoom_predicted_box = np.round(zoom_predicted_box).astype(int)
centroid_x = round((zoom_predicted_box[0] + zoom_predicted_box[2]) / 2)
centroid_y = round((zoom_predicted_box[1] + zoom_predicted_box[3]) / 2)
# recalculate pan and tilt with new centroid
pan = ((centroid_x / camera_width) - 0.5) * 2
tilt = (0.5 - (centroid_y / camera_height)) * 2
logger.debug(
f"{camera}: Zoom amount: {zoom}, zoom predicted time: {zoom_predicted_movement_time}, zoom predicted box: {tuple(zoom_predicted_box)}"
)
self._enqueue_move(camera, obj.obj_data["frame_time"], pan, tilt, zoom)
def _autotrack_move_zoom_only(self, camera, obj):
@ -1242,7 +1315,7 @@ class PtzAutoTracker:
return
# this is a brand new object that's on our camera, has our label, entered the zone,
# is not a false positive, and is not initially motionless
# is not a false positive, and is active
if (
# new object
self.tracked_object[camera] is None
@ -1252,7 +1325,7 @@ class PtzAutoTracker:
and not obj.previous["false_positive"]
and not obj.false_positive
and not self.tracked_object_history[camera]
and obj.obj_data["motionless_count"] == 0
and obj.active
):
logger.debug(
f"{camera}: New object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"

View File

@ -74,7 +74,7 @@ class OnvifController:
"features": [],
"presets": {},
}
except ONVIFError as e:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.error(f"Failed to create ONVIF camera instance for {cam_name}: {e}")
# track initial failures
self.failed_cams[cam_name] = {
@ -100,7 +100,7 @@ class OnvifController:
# this will fire an exception if camera is not a ptz
capabilities = onvif.get_definition("ptz")
logger.debug(f"Onvif capabilities for {camera_name}: {capabilities}")
except (ONVIFError, Fault, TransportError) as e:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.error(
f"Unable to get Onvif capabilities for camera: {camera_name}: {e}"
)
@ -109,7 +109,7 @@ class OnvifController:
try:
profiles = await media.GetProfiles()
logger.debug(f"Onvif profiles for {camera_name}: {profiles}")
except (ONVIFError, Fault, TransportError) as e:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.error(
f"Unable to get Onvif media profiles for camera: {camera_name}: {e}"
)
@ -263,7 +263,7 @@ class OnvifController:
# setup existing presets
try:
presets: list[dict] = await ptz.GetPresets({"ProfileToken": profile.token})
except ONVIFError as e:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.warning(f"Unable to get presets from camera: {camera_name}: {e}")
presets = []
@ -392,7 +392,7 @@ class OnvifController:
try:
asyncio.run(self.cams[camera_name]["ptz"].ContinuousMove(move_request))
except ONVIFError as e:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.warning(f"Onvif sending move request to {camera_name} failed: {e}")
def _move_relative(self, camera_name: str, pan, tilt, zoom, speed) -> None:
@ -593,7 +593,7 @@ class OnvifController:
self._zoom(camera_name, command)
else:
self._move(camera_name, command)
except ONVIFError as e:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.error(f"Unable to handle onvif command: {e}")
async def get_camera_info(self, camera_name: str) -> dict[str, any]:

View File

@ -27,6 +27,7 @@ from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import (
CACHE_DIR,
CACHE_SEGMENT_FORMAT,
FAST_QUEUE_TIMEOUT,
INSERT_MANY_RECORDINGS,
MAX_SEGMENT_DURATION,
MAX_SEGMENTS_IN_CACHE,
@ -38,8 +39,6 @@ from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__)
QUEUE_READ_TIMEOUT = 0.00001 # seconds
class SegmentInfo:
def __init__(
@ -536,7 +535,7 @@ class RecordingMaintainer(threading.Thread):
# empty the object recordings info queue
while True:
(topic, data) = self.detection_subscriber.check_for_update(
timeout=QUEUE_READ_TIMEOUT
timeout=FAST_QUEUE_TIMEOUT
)
if not topic:

View File

@ -1491,7 +1491,9 @@ class TestConfig(unittest.TestCase):
"fps": 5,
},
"onvif": {
"autotracking": {"movement_weights": "0, 1, 1.23, 2.34, 0.50"}
"autotracking": {
"movement_weights": "0, 1, 1.23, 2.34, 0.50, 1"
}
},
}
},
@ -1504,6 +1506,7 @@ class TestConfig(unittest.TestCase):
"1.23",
"2.34",
"0.5",
"1.0",
]
def test_fails_invalid_movement_weights(self):

View File

@ -28,7 +28,7 @@ from frigate.config import (
RecordConfig,
SnapshotsConfig,
)
from frigate.const import UPDATE_CAMERA_ACTIVITY
from frigate.const import FAST_QUEUE_TIMEOUT, UPDATE_CAMERA_ACTIVITY
from frigate.events.types import EventStateEnum, EventTypeEnum
from frigate.models import Event, Timeline
from frigate.track.tracked_object import TrackedObject
@ -682,7 +682,9 @@ class TrackedObjectProcessor(threading.Thread):
# cleanup event finished queue
while not self.stop_event.is_set():
update = self.event_end_subscriber.check_for_update(timeout=0.01)
update = self.event_end_subscriber.check_for_update(
timeout=FAST_QUEUE_TIMEOUT
)
if not update:
break

View File

@ -25,3 +25,5 @@ class ModelStatusTypesEnum(str, Enum):
class TrackedObjectUpdateTypesEnum(str, Enum):
description = "description"
face = "face"
lpr = "lpr"

View File

@ -319,6 +319,21 @@ def migrate_016_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]
camera_config["live"] = live_config
# add another value to movement_weights for autotracking cams
onvif_config = camera_config.get("onvif", {})
if "autotracking" in onvif_config:
movement_weights = (
camera_config.get("onvif", {})
.get("autotracking")
.get("movement_weights", {})
)
if movement_weights and len(movement_weights.split(",")) == 5:
onvif_config["autotracking"]["movement_weights"] = (
movement_weights + ", 0"
)
camera_config["onvif"] = onvif_config
new_config["cameras"][name] = camera_config
new_config["version"] = "0.16-0"

View File

@ -340,7 +340,6 @@ def get_ort_providers(
providers.append(provider)
options.append(
{
"arena_extend_strategy": "kSameAsRequested",
"cache_dir": os.path.join(MODEL_CACHE_DIR, "openvino/ort"),
"device_type": device,
}

View File

@ -408,7 +408,13 @@ def get_rockchip_npu_stats() -> dict[str, str]:
try:
with open("/sys/kernel/debug/rknpu/load", "r") as f:
npu_output = f.read()
core_loads = re.findall(r"Core\d+:\s*(\d+)%", npu_output)
if "Core0:" in npu_output:
# multi core NPU
core_loads = re.findall(r"Core\d+:\s*(\d+)%", npu_output)
else:
# single core NPU
core_loads = re.findall(r"NPU load:\s+(\d+)%", npu_output)
except FileNotFoundError:
core_loads = None

View File

@ -7,6 +7,9 @@ import signal
import subprocess as sp
import threading
import time
from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
import cv2
from setproctitle import setproctitle
@ -99,10 +102,10 @@ def capture_frames(
frame_shape: tuple[int, int],
frame_manager: FrameManager,
frame_queue,
fps: mp.Value,
skipped_fps: mp.Value,
current_frame: mp.Value,
stop_event: mp.Event,
fps: Value,
skipped_fps: Value,
current_frame: Value,
stop_event: MpEvent,
):
frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond()
@ -167,7 +170,7 @@ class CameraWatchdog(threading.Thread):
camera_name,
config: CameraConfig,
shm_frame_count: int,
frame_queue: mp.Queue,
frame_queue: Queue,
camera_fps,
skipped_fps,
ffmpeg_pid,
@ -402,10 +405,10 @@ class CameraCapture(threading.Thread):
frame_index: int,
ffmpeg_process,
frame_shape: tuple[int, int],
frame_queue: mp.Queue,
fps,
skipped_fps,
stop_event,
frame_queue: Queue,
fps: Value,
skipped_fps: Value,
stop_event: MpEvent,
):
threading.Thread.__init__(self)
self.name = f"capture:{config.name}"
@ -419,7 +422,7 @@ class CameraCapture(threading.Thread):
self.skipped_fps = skipped_fps
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
self.current_frame = mp.Value("d", 0.0)
self.current_frame = Value("d", 0.0)
self.last_frame = 0
def run(self):
@ -469,14 +472,14 @@ def capture_camera(
def track_camera(
name,
config: CameraConfig,
model_config,
labelmap,
detection_queue,
result_connection,
model_config: ModelConfig,
labelmap: dict[int, str],
detection_queue: Queue,
result_connection: MpEvent,
detected_objects_queue,
camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics,
region_grid,
region_grid: list[list[dict[str, Any]]],
):
stop_event = mp.Event()
@ -584,8 +587,8 @@ def detect(
def process_frames(
camera_name: str,
requestor: InterProcessRequestor,
frame_queue: mp.Queue,
frame_shape,
frame_queue: Queue,
frame_shape: tuple[int, int],
model_config: ModelConfig,
camera_config: CameraConfig,
detect_config: DetectConfig,
@ -593,13 +596,13 @@ def process_frames(
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue,
detected_objects_queue: Queue,
camera_metrics: CameraMetrics,
objects_to_track: list[str],
object_filters,
stop_event,
stop_event: MpEvent,
ptz_metrics: PTZMetrics,
region_grid,
region_grid: list[list[dict[str, Any]]],
exit_on_empty: bool = False,
):
next_region_update = get_tomorrow_at_time(2)

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -54,7 +54,7 @@
"disabled": "Vypnuto",
"disable": "Vypni",
"save": "Uložit",
"saving": "Ukládám...",
"saving": "Ukládám",
"cancel": "Zrušit",
"close": "Zavři",
"copy": "Zkopíruj",

View File

@ -2,7 +2,7 @@
"iconPicker": {
"selectIcon": "Zvolte ikonu",
"search": {
"placeholder": "Hledejte ikonu...."
"placeholder": "Hledejte ikonu"
}
}
}

View File

@ -26,7 +26,7 @@
"value": "{{seconds}} sekund",
"short": {
"title": "Latence",
"value": "{{seconds}} sek."
"value": "{{seconds}} sek"
}
},
"totalFrames": "Celkový počet snímků:",

View File

@ -56,6 +56,27 @@
"formattedTimestampExcludeSeconds": {
"24hour": "%-d %b, %H:%M",
"12hour": "%-d %b, %H:%M"
},
"formattedTimestampHourMinute": {
"12hour": "h:mm aaa",
"24hour": "HH:mm"
},
"formattedTimestampHourMinuteSecond": {
"12hour": "h:mm:ss aaa",
"24hour": "HH:mm:ss"
},
"formattedTimestampMonthDayHourMinute": {
"12hour": "MMM d, h:mm aaa",
"24hour": "MMM d, HH:mm"
},
"formattedTimestampMonthDayYearHourMinute": {
"12hour": "MMM d yyyy, h:mm aaa",
"24hour": "MMM d yyyy, HH:mm"
},
"formattedTimestampMonthDay": "MMM d",
"formattedTimestampFilename": {
"12hour": "MM-dd-yy-h-mm-ss-a",
"24hour": "MM-dd-yy-HH-mm-ss"
}
},
"button": {
@ -66,7 +87,7 @@
"enable": "Aktivieren",
"disabled": "deaktiviert",
"disable": "deaktivieren",
"saving": "Speichere...",
"saving": "Speichere",
"close": "Schließen",
"back": "Zurück",
"history": "Historie",
@ -132,7 +153,8 @@
"fa": "Persisch",
"uk": "Ukrainisch",
"he": "Hebräisch",
"sk": "Slowakisch"
"sk": "Slowakisch",
"yue": "粵語 (Kantonesisch)"
},
"appearance": "Erscheinung",
"theme": {
@ -142,7 +164,8 @@
"default": "Standard",
"nord": "Norden",
"red": "Rot",
"contrast": "Hoher Kontrast"
"contrast": "Hoher Kontrast",
"highcontrast": "Hoher Kontrast"
},
"help": "Hilfe",
"documentation": {

View File

@ -9,7 +9,7 @@
},
"name": {
"label": "Name",
"placeholder": "Gib einen Namen ein...",
"placeholder": "Gib einen Namen ein",
"errorMessage": {
"exists": "Name der Kameragruppe bereits vorhanden.",
"nameMustNotPeriod": "Name einer Kameragruppe darf keinen Punkt enthalten.",

View File

@ -23,6 +23,12 @@
"false_one": "Das ist kein(e) {{label}}",
"false_other": "Das sind kein(e) {{label}}",
"label": "Bestätige dieses Label nicht für Frigate Plus"
},
"question": {
"label": "Bestätige diese Beschriftung für Frigate Plus",
"ask_a": "Ist dieses Objekt ein <code>{{label}}</code>?",
"ask_an": "Ist dieses Objekt ein <code>{{label}}</code>?",
"ask_full": "Ist dieses Objekt ein <code>{{untranslatedLabel}}</code> ({{translatedLabel}})?"
}
},
"submitToPlus": {
@ -72,7 +78,7 @@
"restreaming": {
"disabled": "Für diese Kamera ist das Restreaming nicht aktiviert.",
"desc": {
"readTheDocumentation": "Weitere Informationen in der Dokumentation ",
"readTheDocumentation": "Weitere Informationen in der Dokumentation",
"title": "Konfiguriere go2rtc, um erweiterte Live-Ansichtsoptionen und Audio für diese Kamera zu nutzen."
}
},

View File

@ -118,8 +118,8 @@
"noLicensePlatesFound": "Keine Kennzeichen gefunden.",
"title": "Bekannte Kennzeichen",
"loadFailed": "Bekannte Nummernschilder konnten nicht geladen werden.",
"loading": "Lade bekannte Nummernschilder...",
"placeholder": "Tippe, um Kennzeichen zu suchen...",
"loading": "Lade bekannte Nummernschilder",
"placeholder": "Tippe, um Kennzeichen zu suchen",
"selectPlatesFromList": "Wählen eine oder mehrere Kennzeichen aus der Liste aus."
}
}

View File

@ -1,7 +1,7 @@
{
"iconPicker": {
"search": {
"placeholder": "Suche nach einem Icon..."
"placeholder": "Suche nach einem Icon"
},
"selectIcon": "Wähle ein Icon"
}

View File

@ -31,5 +31,7 @@
"markTheseItemsAsReviewed": "Diese Objekte als geprüft kennzeichnen",
"camera": "Kamera",
"allCameras": "Alle Kameras",
"markAsReviewed": "Als geprüft kennzeichnen"
"markAsReviewed": "Als geprüft kennzeichnen",
"selected_one": "{{count}} ausgewählt",
"selected_other": "{{count}} ausgewählt"
}

View File

@ -75,7 +75,7 @@
"title": "Erkunden ist nicht Verfügbar",
"embeddingsReindexing": {
"context": "Erkunden kann nach der Re-Indexierung der verfolgten Objekte verwendet werden.",
"startingUp": "Startet...",
"startingUp": "Startet",
"estimatedTime": "Voraussichtlich verbleibende Zeit:",
"finishingShortly": "Bald erledigt",
"step": {

View File

@ -24,7 +24,7 @@
"selectItem": "Wähle {{item}}",
"selectFace": "Wähle Gesicht",
"imageEntry": {
"dropActive": "Ziehe das Bild hierher ...",
"dropActive": "Ziehe das Bild hierher",
"dropInstructions": "Ziehe ein Bild hier her oder klicke um eines auszuwählen",
"maxSize": "Maximale Größe: {{size}} MB",
"validation": {
@ -35,7 +35,9 @@
"addFace": "Gesicht hinzufügen",
"uploadImage": "Bild hochladen",
"deleteFaceAttempts": "Lösche Gesichtsversuche",
"reprocessFace": "Gesichter erneut verarbeiten"
"reprocessFace": "Gesichter erneut verarbeiten",
"renameFace": "Gesicht umbenennen",
"deleteFace": "Lösche Gesicht"
},
"train": {
"title": "Trainiere",
@ -72,5 +74,9 @@
"uploadFace": "Lade Bild des Gesichts hoch",
"nextSteps": "Nächste Schritte",
"faceName": "Gib den Namen zum Gesicht ein"
},
"renameFace": {
"title": "Gesicht umbenennen",
"desc": "Gib den neuen Namen für das Gesicht von {{name}} ein."
}
}

View File

@ -62,6 +62,6 @@
},
"search": "Suche",
"placeholder": {
"search": "Suchen..."
"search": "Suchen"
}
}

View File

@ -86,7 +86,7 @@
"semanticSearch": {
"title": "Semantische Suche",
"desc": "Die semantische Suche in Frigate ermöglicht es, verfolgte Objekte innerhalb der Überprüfungselemente zu finden, indem entweder das Bild selbst, eine benutzerdefinierte Textbeschreibung oder eine automatisch generierte Beschreibung verwendet wird.",
"readTheDocumentation": "Lesen Sie die Dokumentation.",
"readTheDocumentation": "Lesen Sie die Dokumentation",
"reindexNow": {
"alreadyInProgress": "Neu-Indizierung läufts bereits.",
"label": "Neuindizieren",
@ -221,7 +221,7 @@
"add": "Zone hinzufügen",
"name": {
"title": "Name",
"inputPlaceHolder": "Geben Sie einen Namen ein...",
"inputPlaceHolder": "Geben Sie einen Namen ein",
"tips": "Der Name muss aus mindestens 2 Zeichen bestehen und sollte nicht den Namen einer Kamera oder anderen Zone entsprechen."
},
"objects": {

View File

@ -29,14 +29,17 @@
"gpuUsage": "GPU Auslastung",
"gpuMemory": "Grafikspeicher",
"gpuDecoder": "GPU Decoder",
"gpuEncoder": "GPU Encoder"
"gpuEncoder": "GPU Encoder",
"npuUsage": "NPU Verwendung",
"npuMemory": "NPU Speicher"
},
"title": "Allgemein",
"detector": {
"title": "Detektoren",
"cpuUsage": "CPU-Auslastung des Detektors",
"memoryUsage": "Arbeitsspeichernutzung des Detektors",
"inferenceSpeed": "Detektoren Inferenzgeschwindigkeit"
"inferenceSpeed": "Detektoren Inferenzgeschwindigkeit",
"temperature": "Temperatur des Detektors"
},
"otherProcesses": {
"title": "Andere Prozesse",
@ -142,7 +145,11 @@
"image_embedding_speed": "Geschwindigkeit der Bildeinbettung",
"face_embedding_speed": "Geschwindigkeit der Gesichtseinbettung",
"plate_recognition_speed": "Geschwindigkeit der Kennzeichenerkennung",
"text_embedding_speed": "Geschwindigkeit der Texteinbettung"
"text_embedding_speed": "Geschwindigkeit der Texteinbettung",
"plate_recognition": "Kennzeichen Erkennung",
"face_recognition_speed": "Gesichts Erkennungs Geschwindigkeit",
"text_embedding": "Einbettung von Bildern",
"face_recognition": "Gesichts Erkennung"
},
"title": "Optimierungen",
"infPerSecond": "Rückschlüsse pro Sekunde"
@ -151,7 +158,10 @@
"healthy": "Das System läuft problemlos",
"ffmpegHighCpuUsage": "{{camera}} hat eine hohe FFMPEG CPU Auslastung ({{ffmpegAvg}}%)",
"detectHighCpuUsage": "{{camera}} hat eine hohe CPU Auslastung bei der Erkennung ({{detectAvg}}%)",
"reindexingEmbeddings": "Neuindizierung von Einbettungen ({{processed}}% erledigt)"
"reindexingEmbeddings": "Neuindizierung von Einbettungen ({{processed}}% erledigt)",
"detectIsSlow": "{{detect}} ist langsam ({{speed}} ms)",
"detectIsVerySlow": "{{detect}} ist sehr langsam ({{speed}} ms)",
"cameraIsOffline": "{{camera}} ist offline"
},
"lastRefreshed": "Zuletzt aktualisiert: "
}

View File

@ -88,7 +88,7 @@
"disabled": "Disabled",
"disable": "Disable",
"save": "Save",
"saving": "Saving...",
"saving": "Saving",
"cancel": "Cancel",
"close": "Close",
"copy": "Copy",
@ -153,6 +153,7 @@
"fi": "Suomi (Finnish)",
"da": "Dansk (Danish)",
"sk": "Slovenčina (Slovak)",
"yue": "粵語 (Cantonese)",
"withSystem": {
"label": "Use the system settings for language"
}
@ -173,7 +174,7 @@
"green": "Green",
"nord": "Nord",
"red": "Red",
"contrast": "High Contrast",
"highcontrast": "High Contrast",
"default": "Default"
},
"help": "Help",

View File

@ -12,7 +12,7 @@
},
"name": {
"label": "Name",
"placeholder": "Enter a name...",
"placeholder": "Enter a name",
"errorMessage": {
"mustLeastCharacters": "Camera group name must be at least 2 characters.",
"exists": "Camera group name already exists.",

View File

@ -70,7 +70,7 @@
"disabled": "Restreaming is not enabled for this camera.",
"desc": {
"title": "Set up go2rtc for additional live view options and audio for this camera.",
"readTheDocumentation": "Read the documentation "
"readTheDocumentation": "Read the documentation"
}
},
"showStats": {

View File

@ -117,8 +117,8 @@
"recognizedLicensePlates": {
"title": "Recognized License Plates",
"loadFailed": "Failed to load recognized license plates.",
"loading": "Loading recognized license plates...",
"placeholder": "Type to search license plates...",
"loading": "Loading recognized license plates",
"placeholder": "Type to search license plates",
"noLicensePlatesFound": "No license plates found.",
"selectPlatesFromList": "Select one or more plates from the list."
}

Some files were not shown because too many files have changed in this diff Show More