Merge branch 'dev' into motion_improvements

This commit is contained in:
p-boon 2025-04-03 20:42:53 +02:00
commit 8f04624eee
151 changed files with 5441 additions and 1421 deletions

View File

@ -108,7 +108,6 @@ imagestream
imdecode
imencode
imread
imutils
imwrite
interp
iostat

View File

@ -73,7 +73,7 @@ body:
attributes:
label: Operating system
options:
- HassOS
- Home Assistant OS
- Debian
- Other Linux
- Proxmox
@ -87,7 +87,7 @@ body:
attributes:
label: Install method
options:
- HassOS Addon
- Home Assistant Add-on
- Docker Compose
- Docker CLI
- Proxmox via Docker

View File

@ -59,7 +59,7 @@ body:
attributes:
label: Operating system
options:
- HassOS
- Home Assistant OS
- Debian
- Other Linux
- Proxmox
@ -73,7 +73,7 @@ body:
attributes:
label: Install method
options:
- HassOS Addon
- Home Assistant Add-on
- Docker Compose
- Docker CLI
- Proxmox via Docker

View File

@ -53,7 +53,7 @@ body:
attributes:
label: Install method
options:
- HassOS Addon
- Home Assistant Add-on
- Docker Compose
- Docker CLI
- Proxmox via Docker

View File

@ -73,7 +73,7 @@ body:
attributes:
label: Install method
options:
- HassOS Addon
- Home Assistant Add-on
- Docker Compose
- Docker CLI
- Proxmox via Docker

View File

@ -69,7 +69,7 @@ body:
attributes:
label: Install method
options:
- HassOS Addon
- Home Assistant Add-on
- Docker Compose
- Docker CLI
- Proxmox via Docker

View File

@ -97,7 +97,7 @@ body:
attributes:
label: Operating system
options:
- HassOS
- Home Assistant OS
- Debian
- Other Linux
- Proxmox
@ -111,7 +111,7 @@ body:
attributes:
label: Install method
options:
- HassOS Addon
- Home Assistant Add-on
- Docker Compose
- Docker CLI
validations:

View File

@ -1,8 +1,8 @@
version: "3"
services:
devcontainer:
container_name: frigate-devcontainer
# add groups from host for render, plugdev, video
# Check host system's actual render/video/plugdev group IDs with 'getent group render', 'getent group video', and 'getent group plugdev'
# Must add these exact IDs in container's group_add section or OpenVINO GPU acceleration will fail
group_add:
- "109" # render
- "110" # render
@ -24,8 +24,8 @@ services:
# capabilities: [gpu]
environment:
YOLO_MODELS: ""
devices:
- /dev/bus/usb:/dev/bus/usb
# devices:
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
volumes:
- .:/workspace/frigate:cached
@ -33,9 +33,10 @@ services:
- /etc/localtime:/etc/localtime:ro
- ./config:/config
- ./debug:/media/frigate
- /dev/bus/usb:/dev/bus/usb
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
mqtt:
container_name: mqtt
image: eclipse-mosquitto:1.6
image: eclipse-mosquitto:2.0
command: mosquitto -c /mosquitto-no-auth.conf # enable no-auth mode
ports:
- "1883:1883"

View File

@ -78,8 +78,9 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip" \
&& pip install -r /requirements-ov.txt
&& pip3 install -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
@ -172,6 +173,7 @@ RUN apt-get -qq update \
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt
@ -235,6 +237,7 @@ ENV DEFAULT_FFMPEG_VERSION="7.0"
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
@ -262,7 +265,7 @@ HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s
# Frigate deps with Node.js and NPM for devcontainer
FROM deps AS devcontainer
# Do not start the actual Frigate service on devcontainer as it will be started by VSCode
# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
# But start a fake service for simulating the logs
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run

View File

@ -2,7 +2,7 @@
set -euxo pipefail
NGINX_VERSION="1.25.3"
NGINX_VERSION="1.27.4"
VOD_MODULE_VERSION="1.31"
SECURE_TOKEN_MODULE_VERSION="1.5"
SET_MISC_MODULE_VERSION="v0.33"

View File

@ -7,7 +7,6 @@ starlette-context == 0.3.6
fastapi == 0.115.*
uvicorn == 0.30.*
slowapi == 0.1.*
imutils == 0.5.*
joserfc == 1.0.*
pathvalidate == 3.2.*
markupsafe == 3.0.*

View File

@ -9,39 +9,6 @@ set -o errexit -o nounset -o pipefail
# Tell S6-Overlay not to restart this service
s6-svc -O .
function migrate_db_path() {
# Find config file in yaml or yml, but prefer yaml
local config_file="${CONFIG_FILE:-"/config/config.yml"}"
local config_file_yaml="${config_file//.yml/.yaml}"
if [[ -f "${config_file_yaml}" ]]; then
config_file="${config_file_yaml}"
elif [[ ! -f "${config_file}" ]]; then
# Frigate will create the config file on startup
return 0
fi
unset config_file_yaml
# Use yq to check if database.path is set
local user_db_path
user_db_path=$(yq eval '.database.path' "${config_file}")
if [[ "${user_db_path}" == "null" ]]; then
local previous_db_path="/media/frigate/frigate.db"
local new_db_dir="/config"
if [[ -f "${previous_db_path}" ]]; then
if mountpoint --quiet "${new_db_dir}"; then
# /config is a mount point, move the db
echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..."
# Move all files that starts with frigate.db to the new directory
mv -vf "${previous_db_path}"* "${new_db_dir}"
else
echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir"
return 1
fi
fi
fi
}
function set_libva_version() {
local ffmpeg_path
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
@ -50,8 +17,8 @@ function set_libva_version() {
}
echo "[INFO] Preparing Frigate..."
migrate_db_path
set_libva_version
echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"

View File

@ -61,7 +61,7 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Preparing new go2rtc config..."
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
# Running as a Home Assistant add-on, infer the IP address and port
# Running as a Home Assistant Add-on, infer the IP address and port
get_ip_and_port_from_supervisor
fi

View File

@ -0,0 +1,142 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Do preparation tasks before starting the main services
set -o errexit -o nounset -o pipefail
function migrate_addon_config_dir() {
local home_assistant_config_dir="/homeassistant"
if ! mountpoint --quiet "${home_assistant_config_dir}"; then
# Not running as a Home Assistant Add-on
return 0
fi
local config_dir="/config"
local new_config_file="${config_dir}/config.yml"
local new_config_file_yaml="${new_config_file//.yml/.yaml}"
if [[ -f "${new_config_file_yaml}" || -f "${new_config_file}" ]]; then
# Already migrated
return 0
fi
local old_config_file="${home_assistant_config_dir}/frigate.yml"
local old_config_file_yaml="${old_config_file//.yml/.yaml}"
if [[ -f "${old_config_file}" ]]; then
:
elif [[ -f "${old_config_file_yaml}" ]]; then
old_config_file="${old_config_file_yaml}"
new_config_file="${new_config_file_yaml}"
else
# Nothing to migrate
return 0
fi
unset old_config_file_yaml new_config_file_yaml
echo "[INFO] Starting migration from Home Assistant config dir to Add-on config dir..." >&2
local db_path
db_path=$(yq -r '.database.path' "${old_config_file}")
if [[ "${db_path}" == "null" ]]; then
db_path="${config_dir}/frigate.db"
fi
if [[ "${db_path}" == "${config_dir}/"* ]]; then
# replace /config/ prefix with /homeassistant/
local old_db_path="${home_assistant_config_dir}/${db_path:8}"
if [[ -f "${old_db_path}" ]]; then
local new_db_dir
new_db_dir="$(dirname "${db_path}")"
echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2
mkdir -vp "${new_db_dir}"
mv -vf "${old_db_path}" "${new_db_dir}"
local db_file
for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do
if [[ -f "${db_file}" ]]; then
mv -vf "${db_file}" "${new_db_dir}"
fi
done
unset db_file
fi
fi
local config_entry
for config_entry in .model.path .model.labelmap_path .ffmpeg.path .mqtt.tls_ca_certs .mqtt.tls_client_cert .mqtt.tls_client_key; do
local config_entry_path
config_entry_path=$(yq -r "${config_entry}" "${old_config_file}")
if [[ "${config_entry_path}" == "${config_dir}/"* ]]; then
# replace /config/ prefix with /homeassistant/
local old_config_entry_path="${home_assistant_config_dir}/${config_entry_path:8}"
if [[ -f "${old_config_entry_path}" ]]; then
local new_config_entry_entry
new_config_entry_entry="$(dirname "${config_entry_path}")"
echo "[INFO] Migrating ${config_entry} from '${old_config_entry_path}' to '${config_entry_path}'..." >&2
mkdir -vp "${new_config_entry_entry}"
mv -vf "${old_config_entry_path}" "${config_entry_path}"
fi
fi
done
local old_model_cache_path="${home_assistant_config_dir}/model_cache"
if [[ -d "${old_model_cache_path}" ]]; then
echo "[INFO] Migrating '${old_model_cache_path}' to '${config_dir}'..." >&2
mv -f "${old_model_cache_path}" "${config_dir}"
fi
echo "[INFO] Migrating other files from '${home_assistant_config_dir}' to '${config_dir}'..." >&2
local file
for file in .exports .jwt_secret .timeline .vacuum go2rtc; do
file="${home_assistant_config_dir}/${file}"
if [[ -f "${file}" ]]; then
mv -vf "${file}" "${config_dir}"
fi
done
echo "[INFO] Migrating config file from '${old_config_file}' to '${new_config_file}'..." >&2
mv -vf "${old_config_file}" "${new_config_file}"
echo "[INFO] Migration from Home Assistant config dir to Add-on config dir completed." >&2
}
function migrate_db_from_media_to_config() {
# Find config file in yml or yaml, but prefer yml
local config_file="${CONFIG_FILE:-"/config/config.yml"}"
local config_file_yaml="${config_file//.yml/.yaml}"
if [[ -f "${config_file}" ]]; then
:
elif [[ -f "${config_file_yaml}" ]]; then
config_file="${config_file_yaml}"
else
# Frigate will create the config file on startup
return 0
fi
unset config_file_yaml
local user_db_path
user_db_path=$(yq -r '.database.path' "${config_file}")
if [[ "${user_db_path}" == "null" ]]; then
local old_db_path="/media/frigate/frigate.db"
local new_db_dir="/config"
if [[ -f "${old_db_path}" ]]; then
echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2
if mountpoint --quiet "${new_db_dir}"; then
# /config is a mount point, move the db
mv -vf "${old_db_path}" "${new_db_dir}"
local db_file
for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do
if [[ -f "${db_file}" ]]; then
mv -vf "${db_file}" "${new_db_dir}"
fi
done
unset db_file
else
echo "[ERROR] Trying to migrate the database path from '${old_db_path}' to '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" >&2
return 1
fi
fi
fi
}
migrate_addon_config_dir
migrate_db_from_media_to_config

View File

@ -0,0 +1 @@
oneshot

View File

@ -0,0 +1 @@
/etc/s6-overlay/s6-rc.d/prepare/run

View File

@ -1,5 +1,4 @@
import json
import os
import sys
from ruamel.yaml import YAML
@ -9,17 +8,13 @@ from frigate.const import (
DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS,
)
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
config_file = find_config_file()
try:
with open(config_file) as f:

View File

@ -15,6 +15,7 @@ from frigate.const import (
LIBAVFORMAT_VERSION_MAJOR,
)
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
@ -29,12 +30,7 @@ if os.path.isdir("/run/secrets"):
Path(os.path.join("/run/secrets", secret_file)).read_text().strip()
)
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
config_file = find_config_file()
try:
with open(config_file) as f:

View File

@ -30,7 +30,7 @@ http {
gzip on;
gzip_comp_level 6;
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp image/png image/gif image/jpeg image/jpg;
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
gzip_proxied no-cache no-store private expired auth;
gzip_vary on;
@ -300,6 +300,11 @@ http {
add_header Cache-Control "public";
}
location /locales/ {
access_log off;
add_header Cache-Control "public";
}
location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
access_log off;
expires 1y;
@ -308,8 +313,8 @@ http {
proxy_set_header Accept-Encoding "";
sub_filter_once off;
sub_filter_types application/json;
sub_filter '"start_url": "/"' '"start_url" : "$http_x_ingress_path"';
sub_filter '"src": "/' '"src": "$http_x_ingress_path/';
sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
}
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
@ -317,6 +322,7 @@ http {
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';

View File

@ -1,18 +1,18 @@
"""Prints the tls config as json to stdout."""
import json
import os
import sys
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
config_file = find_config_file()
try:
with open(config_file) as f:

View File

@ -14,7 +14,7 @@ try:
with open("/config/conv2rknn.yaml", "r") as config_file:
configuration = yaml.safe_load(config_file)
except FileNotFoundError:
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
raise Exception("Please place a config file at /config/conv2rknn.yaml")
if configuration["config"] != None:
rknn_config = configuration["config"]

View File

@ -39,6 +39,7 @@ WORKDIR /opt/frigate
COPY --from=rootfs / /
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip" --break-system-packages
RUN python3 -m pip config set global.break-system-packages true

View File

@ -9,9 +9,9 @@ ARG DEBIAN_FRONTEND
# Add deadsnakes PPA for python3.11
RUN apt-get -qq update && \
apt-get -qq install -y --no-install-recommends \
software-properties-common \
&& add-apt-repository ppa:deadsnakes/ppa
apt-get -qq install -y --no-install-recommends \
software-properties-common \
&& add-apt-repository ppa:deadsnakes/ppa
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
@ -24,6 +24,7 @@ RUN apt-get -qq update \
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
&& python3 get-pip.py "pip"
FROM build-wheels AS trt-wheels

View File

@ -21,7 +21,20 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target
RUN mkdir -p /usr/local/cuda-deps
RUN if [ "$TARGETARCH" = "amd64" ]; then \
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ ; \
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ && \
cd /usr/local/cuda-deps/ && \
for lib in libnvrtc.so.*; do \
if [[ "$lib" =~ libnvrtc.so\.([0-9]+\.[0-9]+\.[0-9]+) ]]; then \
version="${BASH_REMATCH[1]}"; \
ln -sf "libnvrtc.so.$version" libnvrtc.so; \
fi; \
done && \
for lib in libcurand.so.*; do \
if [[ "$lib" =~ libcurand.so\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) ]]; then \
version="${BASH_REMATCH[1]}"; \
ln -sf "libcurand.so.$version" libcurand.so; \
fi; \
done; \
fi
# Frigate w/ TensorRT Support as separate image

View File

@ -1,8 +1,7 @@
/usr/local/lib
/usr/local/cuda
/usr/local/lib/python3.11/dist-packages/tensorrt
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.11/dist-packages/tensorrt
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib

View File

@ -44,7 +44,7 @@ go2rtc:
### `environment_vars`
This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS)
This section can be used to set environment variables for those unable to modify the environment of the container, like within Home Assistant OS.
Example:

View File

@ -43,13 +43,13 @@ Restarting Frigate will reset the rate limits.
If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated.
If you are running a reverse proxy in the same docker compose file as Frigate, here is an example of how your auth config might look:
If you are running a reverse proxy in the same Docker Compose file as Frigate, here is an example of how your auth config might look:
```yaml
auth:
failed_login_rate_limit: "1/second;5/minute;20/hour"
trusted_proxies:
- 172.18.0.0/16 # <---- this is the subnet for the internal docker compose network
- 172.18.0.0/16 # <---- this is the subnet for the internal Docker Compose network
```
## JWT Token Secret
@ -66,7 +66,7 @@ Frigate looks for a JWT token secret in the following order:
1. An environment variable named `FRIGATE_JWT_SECRET`
2. A docker secret named `FRIGATE_JWT_SECRET` in `/run/secrets/`
3. A `jwt_secret` option from the Home Assistant Addon options
3. A `jwt_secret` option from the Home Assistant Add-on options
4. A `.jwt_secret` file in the config directory
If no secret is found on startup, Frigate generates one and stores it in a `.jwt_secret` file in the config directory.

View File

@ -0,0 +1,31 @@
---
id: bird_classification
title: Bird Classification
---
Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
## Minimum System Requirements
Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself.
## Model
The classification model used is the MobileNet INat Bird Classification, [available identifiers can be found here.](https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt)
## Configuration
Bird classification is disabled by default, it must be enabled in your config file before it can be used. Bird classification is a global configuration setting.
```yaml
classification:
bird:
enabled: true
```
## Advanced Configuration
Fine-tune bird classification with these optional parameters:
- `threshold`: Classification confidence score required to set the sub label on the object.
- Default: `0.9`.

View File

@ -4,7 +4,7 @@ In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads
Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras.
Birdseye can also be used in HomeAssistant dashboards, cast to media devices, etc.
Birdseye can also be used in Home Assistant dashboards, cast to media devices, etc.
## Birdseye Behavior

View File

@ -7,21 +7,26 @@ Face recognition identifies known individuals by matching detected faces with pr
## Model Requirements
Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer.
### Face Detection
Users running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
Users without a model that detects faces can still run face recognition. Frigate uses a lightweight DNN face detection model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.
When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.
:::note
### Face Recognition
Frigate needs to first detect a `face` before it can recognize a face.
Frigate has support for two face recognition model types:
:::
- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate.
- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available.
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
## Minimum System Requirements
Face recognition is lightweight and runs on the CPU, there are no significantly different system requirements than running Frigate itself.
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
The `large` model is optimized for accuracy, an integrated or discrete GPU is highly recommended.
## Configuration
@ -47,12 +52,17 @@ Fine-tune face recognition with these optional parameters:
### Recognition
- `model_size`: Which model size to use, options are `small` or `large`
- `unknown_score`: Min score to mark a person as a potential match, matches at or below this will be marked as unknown.
- Default: `0.8`.
- `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label.
- Default: `0.9`.
- `save_attempts`: Number of images of recognized faces to save for training.
- Default: `100`.
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- Default: `True`.
## Dataset
## Creating a Robust Training Set
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
@ -61,11 +71,9 @@ The number of images needed for a sufficient training set for face recognition v
However, here are some general guidelines:
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.
## Creating a Robust Training Set
- Minimum: For basic face recognition tasks, a minimum of 5-10 images per person is often recommended.
- Recommended: For more robust and accurate systems, 20-30 images per person is a good starting point.
- Ideal: For optimal performance, especially in challenging conditions, 50-100 images per person can be beneficial.
The accuracy of face recognition is heavily dependent on the quality of data given to it for training. It is recommended to build the face training library in phases.
@ -74,19 +82,46 @@ The accuracy of face recognition is heavily dependent on the quality of data giv
When choosing images to include in the face training set it is recommended to always follow these recommendations:
- If it is difficult to make out details in a persons face it will not be helpful in training.
- Avoid images with under/over-exposure.
- Avoid images with extreme under/over-exposure.
- Avoid blurry / pixelated images.
- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the training.
- Do not upload too many images at the same time, it is recommended to train 4-6 images for each person each day so it is easier to know if the previously added images helped or hurt performance.
- Avoid training on infrared (gray-scale). The models are trained on color images and will be able to extract features from gray-scale images.
- Using images of people wearing hats / sunglasses may confuse the model.
- Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid over-fitting.
:::
### Step 1 - Building a Strong Foundation
When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-2 photos taken by a smartphone for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point.
When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-5 "portrait" photos for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point.
Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step.
Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle.
Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have over-fitting.
Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step.
### Step 2 - Expanding The Dataset
Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.
## FAQ
### Why can't I bulk upload photos?
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
### Why can't I bulk reprocess faces?
Face embedding models work by breaking apart faces into different features. This means that when reprocessing an image, only images from a similar angle will have its score affected.
### Why do unknown people score similarly to known people?
This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting:
- If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images.
- When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations.
- By training on a diverse set of images, the algorithm becomes less sensitive to minor variations and noise in the input image.
### I see scores above the threshold in the train tab, but a sub label wasn't assigned?
The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results.

View File

@ -14,7 +14,7 @@ Depending on your system, these parameters may not be compatible. More informati
## Raspberry Pi 3/4
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration.
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
```yaml
# if you want to decode a h264 stream
@ -28,8 +28,8 @@ ffmpeg:
:::note
If running Frigate in Docker, you either need to run in privileged mode or
map the `/dev/video*` devices to Frigate. With Docker compose add:
If running Frigate through Docker, you either need to run in privileged mode or
map the `/dev/video*` devices to Frigate. With Docker Compose add:
```yaml
services:
@ -80,7 +80,7 @@ Or map in all the `/dev/video*` devices.
:::note
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is.
@ -191,7 +191,7 @@ VAAPI supports automatic profile selection so it will work automatically with bo
:::note
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
:::
@ -312,7 +312,6 @@ docker run -d \
### Docker Compose - Jetson
```yaml
version: '2.4'
services:
frigate:
...

View File

@ -3,10 +3,12 @@ id: index
title: Frigate Configuration
---
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored.
For Home Assistant Add-on installations, the config file should be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](#accessing-add-on-config-dir).
For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
It can be named `config.yml` or `config.yaml`, but if both files exist `config.yml` will be preferred and `config.yaml` will be ignored.
It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation.
```yaml
@ -23,9 +25,24 @@ cameras:
- detect
```
## VSCode Configuration Schema
## Accessing the Home Assistant Add-on configuration directory {#accessing-add-on-config-dir}
VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VSCode on another machine.
When running Frigate through the HA Add-on, the Frigate `/config` directory is mapped to `/addon_configs/<addon_directory>` in the host, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running.
| Add-on Variant | Configuration directory |
| -------------------------- | -------------------------------------------- |
| Frigate | `/addon_configs/ccab4aaf_frigate` |
| Frigate (Full Access) | `/addon_configs/ccab4aaf_frigate-fa` |
| Frigate Beta | `/addon_configs/ccab4aaf_frigate-beta` |
| Frigate Beta (Full Access) | `/addon_configs/ccab4aaf_frigate-fa-beta` |
**Whenever you see `/config` in the documentation, it refers to this directory.**
If for example you are running the standard Add-on variant and use the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file.
## VS Code Configuration Schema
VS Code supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VS Code and Frigate as an Add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VS Code on another machine.
## Environment Variable Substitution
@ -65,10 +82,10 @@ genai:
Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values.
### Raspberry Pi Home Assistant Addon with USB Coral
### Raspberry Pi Home Assistant Add-on with USB Coral
- Single camera with 720p, 5fps stream for detect
- MQTT connected to home assistant mosquitto addon
- MQTT connected to the Home Assistant Mosquitto Add-on
- Hardware acceleration for decoding video
- USB Coral detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not

View File

@ -3,16 +3,17 @@ id: license_plate_recognition
title: License Plate Recognition (LPR)
---
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles.
When a plate is recognized, the recognized name is:
- Added to the `car` tracked object as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown)
- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore.
- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object.
- Viewable in the Review Item Details pane in Review (sub labels).
- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates).
- Filterable through the More Filters menu in Explore.
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the tracked object.
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` tracked object.
## Model Requirements
@ -22,7 +23,7 @@ Users without a model that detects license plates can still run LPR. Frigate use
:::note
Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably.
In the default mode, Frigate's LPR needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera and have a zoomed-in view where a `car` will not be detected, you can still run LPR, but the configuration parameters will differ from the default mode. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section below.
:::
@ -39,7 +40,17 @@ lpr:
enabled: True
```
Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run.
Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You can disable it for specific cameras at the camera level:
```yaml
cameras:
driveway:
...
lpr:
enabled: False
```
For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run.
Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements.
@ -51,9 +62,9 @@ Fine-tune the LPR feature using these optional parameters:
- **`detection_threshold`**: License plate object detection confidence score required before recognition runs.
- Default: `0.7`
- Note: This is field only applies to the standalone license plate detection model, `min_score` should be used to filter for models that have license plate detection built in.
- **`min_area`**: Defines the minimum size (in pixels) a license plate must be before recognition runs.
- Default: `1000` pixels.
- Note: This is field only applies to the standalone license plate detection model, `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in.
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
### Recognition
@ -76,12 +87,28 @@ Fine-tune the LPR feature using these optional parameters:
- For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
- This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
### Image Enhancement
- **`enhancement`**: A value between **0 and 10** that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. This preprocessing step can sometimes improve accuracy but may also have the opposite effect.
- **Default:** `0` (no enhancement)
- Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters, actually making them much harder for Frigate to recognize.
- This setting is best adjusted **at the camera level** if running LPR on multiple cameras.
- If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at **5** and adjusting as needed. To preview how different enhancement levels affect your plates, use the `debug_save_plates` configuration option (see below).
### Debugging
- **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `<camera>/<event_id>`, and named based on the capture timestamp.
- These saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection.
- **Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage.
## Configuration Examples
These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`.
```yaml
lpr:
enabled: True
min_area: 1500 # Ignore plates smaller than 1500 pixels
min_area: 1500 # Ignore plates with an area (length x width) smaller than 1500 pixels
min_plate_length: 4 # Only recognize plates with 4 or more characters
known_plates:
Wife's Car:
@ -98,7 +125,7 @@ lpr:
```yaml
lpr:
enabled: True
min_area: 4000 # Run recognition on larger plates only
min_area: 4000 # Run recognition on larger plates only (4000 pixels represents a 63x63 pixel square in your image)
recognition_threshold: 0.85
format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers
match_distance: 1 # Allow one character variation in plate matching
@ -110,22 +137,171 @@ lpr:
- "MN D3163"
```
:::note
If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras.
```yaml
cameras:
side_yard:
lpr:
enabled: False
...
```
:::
## Dedicated LPR Cameras
Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night.
Users can configure Frigate's LPR in two different ways depending on whether they are using a Frigate+ model:
### Using a Frigate+ Model
Users running a Frigate+ model (or any model that natively detects `license_plate`) can take advantage of `license_plate` detection. This allows license plates to be treated as standard objects in dedicated LPR mode, meaning that alerts, detections, snapshots, zones, and other Frigate features work as usual, and plates are detected efficiently through your configured object detector.
An example configuration for a dedicated LPR camera using a Frigate+ model:
```yaml
# LPR global configuration
lpr:
enabled: True
# Dedicated LPR camera configuration
cameras:
dedicated_lpr_camera:
type: "lpr" # required to use dedicated LPR camera mode
detect:
enabled: True
fps: 5 # increase to 10 if vehicles move quickly across your frame
min_initialized: 2
width: 1920
height: 1080
objects:
track:
- license_plate
filters:
license_plate:
threshold: 0.7
motion:
threshold: 30
contour_area: 60 # use an increased value to tune out small motion changes
improve_contrast: false
mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked
record:
enabled: True # disable recording if you only want snapshots
snapshots:
enabled: True
review:
detections:
labels:
- license_plate
```
With this setup:
- License plates are treated as normal objects in Frigate.
- Scores, alerts, detections, snapshots, zones, and object masks work as expected.
- Snapshots will have license plate bounding boxes on them.
- The `frigate/events` MQTT topic will publish tracked object updates.
- Debug view will display `license_plate` bounding boxes.
- If you are using a Frigate+ model and want to submit images from your dedicated LPR camera for model training and fine-tuning, annotate both the `car` and the `license_plate` in the snapshots on the Frigate+ website, even if the car is barely visible.
### Using the Secondary LPR Pipeline (Without Frigate+)
If you are not running a Frigate+ model, you can use Frigates built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs.
An example configuration for a dedicated LPR camera using the secondary pipeline:
```yaml
# LPR global configuration
lpr:
enabled: True
detection_threshold: 0.7 # change if necessary
# Dedicated LPR camera configuration
cameras:
dedicated_lpr_camera:
type: "lpr" # required to use dedicated LPR camera mode
lpr:
enabled: True
enhancement: 3 # optional, enhance the image before trying to recognize characters
ffmpeg: ...
detect:
enabled: False # disable Frigate's standard object detection pipeline
fps: 5 # increase if necessary, though high values may slow down Frigate's enrichments pipeline and use considerable CPU
width: 1920
height: 1080
objects:
track: [] # required when not using a Frigate+ model for dedicated LPR mode
motion:
threshold: 30
contour_area: 60 # use an increased value here to tune out small motion changes
improve_contrast: false
mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked
record:
enabled: True # disable recording if you only want snapshots
review:
detections:
enabled: True
retain:
default: 7
```
With this setup:
- The standard object detection pipeline is bypassed. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. You must **not** specify `license_plate` as an object to track.
- The license plate detector runs on the full frame whenever motion is detected and processes frames according to your detect `fps` setting.
- Review items will always be classified as a `detection`.
- Snapshots will always be saved.
- Zones and object masks are **not** used.
- The `frigate/events` MQTT topic will **not** publish tracked object updates, though `frigate/reviews` will if recordings are enabled.
- License plate snapshots are saved at the highest-scoring moment and appear in Explore.
- Debug view will not show `license_plate` bounding boxes.
### Summary
| Feature | Native `license_plate` detecting Model (like Frigate+) | Secondary Pipeline (without native model or Frigate+) |
| ----------------------- | ------------------------------------------------------ | --------------------------------------------------------------- |
| License Plate Detection | Uses `license_plate` as a tracked object | Runs a dedicated LPR pipeline |
| FPS Setting | 5 (increase for fast-moving cars) | 5 (increase for fast-moving cars, but it may use much more CPU) |
| Object Detection | Standard Frigate+ detection applies | Bypasses standard object detection |
| Zones & Object Masks | Supported | Not supported |
| Debug View | May show `license_plate` bounding boxes | May **not** show `license_plate` bounding boxes |
| MQTT `frigate/events` | Publishes tracked object updates | Does **not** publish tracked object updates |
| Explore | Recognized plates available in More Filters | Recognized plates available in More Filters |
By selecting the appropriate configuration, users can optimize their dedicated LPR cameras based on whether they are using a Frigate+ model or the secondary LPR pipeline.
### Best practices for using Dedicated LPR camera mode
- Tune your motion detection and increase the `contour_area` until you see only larger motion boxes being created as cars pass through the frame (likely somewhere between 50-90 for a 1920x1080 detect stream). Increasing the `contour_area` filters out small areas of motion and will prevent excessive resource use from looking for license plates in frames that don't even have a car passing through it.
- Disable the `improve_contrast` motion setting, especially if you are running LPR at night and the frame is mostly dark. This will prevent small pixel changes and smaller areas of motion from triggering license plate detection.
- Ensure your camera's timestamp is covered with a motion mask so that it's not incorrectly detected as a license plate.
- For non-Frigate+ users, you may need to change your camera settings for a clearer image or decrease your global `recognition_threshold` config if your plates are not being accurately recognized at night.
- The secondary pipeline mode runs a local AI model on your CPU to detect plates. Increasing detect `fps` will increase CPU usage proportionally.
## FAQ
### Why isn't my license plate being detected and recognized?
Ensure that:
- Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate, Frigate certainly won't be able to. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling.
- Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate's characters, Frigate certainly won't be able to, even if the model is recognizing a `license_plate`. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling.
- The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream.
- A `car` is detected first, as LPR only runs on recognized vehicles.
- Your `enhancement` level (if you've changed it from the default of `0`) is not too high. Too much enhancement will run too much denoising and cause the plate characters to become blurry and unreadable.
If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track.
If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track.
Recognized plates will show as object labels in the debug view and will appear in the "Recognized License Plates" select box in the More Filters popout in Explore.
If you are still having issues detecting plates, start with a basic configuration and see the debugging tips below.
### Can I run LPR without detecting `car` objects?
No, Frigate requires a `car` to be detected first before recognizing a license plate.
In normal LPR mode, Frigate requires a `car` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above.
### How can I improve detection accuracy?
@ -144,10 +320,19 @@ Use `match_distance` to allow small character mismatches. Alternatively, define
### How do I debug LPR issues?
- View MQTT messages for `frigate/events` to verify detected plates.
- Adjust `detection_threshold` and `recognition_threshold` settings.
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`.
- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` label will change to the recognized plate when LPR is enabled and working.
- Adjust `detection_threshold` and `recognition_threshold` settings per the suggestions [above](#advanced-configuration).
- Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`).
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary.
```yaml
logger:
default: info
logs:
frigate.data_processing.common.license_plate: debug
```
### Will LPR slow down my system?
LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results.
LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results. If you are running the Dedicated LPR Camera mode, resource usage will be higher compared to users who run a model that natively detects license plates. Tune your motion detection settings for your dedicated LPR camera so that the license plate detection model runs only when necessary.

View File

@ -104,9 +104,9 @@ cameras:
WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration:
- For external access, over the internet, setup your router to forward port `8555` to port `8555` on the Frigate device, for both TCP and UDP.
- For internal/local access, unless you are running through the add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
- For internal/local access, unless you are running through the HA Add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
```yaml title="/config/frigate.yaml"
```yaml title="config.yml"
go2rtc:
streams:
test_cam: ...
@ -121,9 +121,9 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
:::tip
This extra configuration may not be required if Frigate has been installed as a Home Assistant add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate.
This extra configuration may not be required if Frigate has been installed as a Home Assistant Add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate.
However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the add-on logs page during the initialization:
However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate Add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the Add-on logs page during the initialization:
```log
[WARN] Failed to get IP address from supervisor
@ -203,9 +203,11 @@ Note that disabling a camera through the config file (`enabled: False`) removes
Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible.
When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream.
When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. Continuous streaming mode does not have an automatic reset mechanism, but you can use the _Reset_ option to force a reload of your stream.
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available.
If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again.
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations).
3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**
@ -221,6 +223,8 @@ Note that disabling a camera through the config file (`enabled: False`) removes
This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats.
Smart streaming depends on having your camera's motion `threshold` and `contour_area` config values dialed in. Use the Motion Tuner in Settings in the UI to tune these values in real-time.
This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras.
6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?**

View File

@ -77,7 +77,7 @@ At this point if motion is working as desired there is no reason to continue wit
Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone.
However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection.
However, if the preferred day settings do not work well at night it is recommended to use Home Assistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection.
## Tuning For Large Changes In Motion

View File

@ -129,8 +129,8 @@ detectors:
type: edgetpu
device: pci
```
---
---
## Hailo-8
@ -146,6 +146,7 @@ If both are provided, the detector will first check for the model at the given l
#### YOLO
Use this configuration for YOLO-based models. When no custom model path or URL is provided, the detector automatically downloads the default model based on the detected hardware:
- **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
- **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
@ -224,6 +225,7 @@ model:
# Alternatively, or as a fallback, provide a custom URL:
# path: https://custom-model-url.com/path/to/model.hef
```
For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo
Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation.
@ -233,8 +235,6 @@ Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-proc
---
## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
@ -340,6 +340,63 @@ model:
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
#### RF-DETR
[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate.
:::warning
Due to the size and complexity of the RF-DETR model, it is only recommended to be run with discrete Arc Graphics Cards.
:::
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml
detectors:
ov:
type: openvino
device: GPU
model:
model_type: rfdetr
width: 560
height: 560
input_tensor: nchw
input_dtype: float
path: /config/model_cache/rfdetr.onnx
```
#### D-FINE
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
:::warning
Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model
:::
After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
```yaml
detectors:
ov:
type: openvino
device: GPU
model:
model_type: dfine
width: 640
height: 640
input_tensor: nchw
input_dtype: float
path: /config/model_cache/dfine_s_obj2coco.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## NVidia TensorRT Detector
Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection.
@ -462,7 +519,7 @@ $ docker run --device=/dev/kfd --device=/dev/dri \
...
```
When using docker compose:
When using Docker Compose:
```yaml
services:
@ -494,7 +551,7 @@ $ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
...
```
When using docker compose:
When using Docker Compose:
```yaml
services:
@ -529,6 +586,7 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/
### Supported Models
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
- D-FINE models are not supported
- YOLO-NAS models are known to not run well on integrated GPUs
@ -622,17 +680,31 @@ model:
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
#### RF-DETR
[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate.
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: onnx
model:
model_type: rfdetr
width: 560
height: 560
input_tensor: nchw
input_dtype: float
path: /config/model_cache/rfdetr.onnx
```
#### D-FINE
[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
:::warning
D-FINE is currently not supported on OpenVINO
:::
After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml
detectors:
@ -854,6 +926,26 @@ Make sure you change the batch size to 1 before exporting.
:::
### Download RF-DETR Model
To export as ONNX:
1. `pip3 install rfdetr`
2. `python3`
3. `from rfdetr import RFDETRBase`
4. `x = RFDETRBase()`
5. `x.export()`
#### Additional Configuration
The input tensor resolution can be customized:
```python
from rfdetr import RFDETRBase
x = RFDETRBase(resolution=560) # resolution must be a multiple of 56
x.export()
```
### Downloading YOLO-NAS Model
You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).

View File

@ -146,7 +146,7 @@ The above configuration example can be added globally or on a per camera basis.
## Can I have "continuous" recordings, but only at certain times?
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
## How do I export recordings?

View File

@ -125,7 +125,7 @@ auth:
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Required: path to the model (default: automatic based on detector)
# Required: path to the model. Frigate+ models use plus://<model_id> (default: automatic based on detector)
path: /edgetpu_model.tflite
# Required: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
@ -543,14 +543,26 @@ semantic_search:
model_size: "small"
# Optional: Configuration for face recognition capability
# NOTE: Can (enabled, min_area) be overridden at the camera level
face_recognition:
# Optional: Enable semantic search (default: shown below)
enabled: False
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Minimum face distance score required to mark as a potential match (default: shown below)
unknown_score: 0.8
# Optional: Minimum face detection score required to detect a face (default: shown below)
# NOTE: This only applies when not running a Frigate+ model
detection_threshold: 0.7
# Optional: Minimum face distance score required to be considered a match (default: shown below)
recognition_threshold: 0.9
# Optional: Min area of detected face box to consider running face recognition (default: shown below)
min_area: 500
# Optional: Number of images of recognized faces to save for training (default: shown below)
save_attempts: 100
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
blur_confidence_filter: True
# Optional: Configuration for license plate recognition capability
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level
lpr:
# Optional: Enable license plate recognition (default: shown below)
enabled: False
@ -568,6 +580,11 @@ lpr:
match_distance: 1
# Optional: Known plates to track (strings or regular expressions) (default: shown below)
known_plates: {}
# Optional: Enhance the detected plate image with contrast adjustment and denoising (default: shown below)
# A value between 0 and 10. Higher values are not always better and may perform worse than lower values.
enhancement: 0
# Optional: Save plate images to /media/frigate/clips/lpr for debugging purposes (default: shown below)
debug_save_plates: False
# Optional: Configuration for AI generated tracked object descriptions
# WARNING: Depending on the provider, this will send thumbnails over the internet
@ -645,6 +662,9 @@ cameras:
# If disabled: config is used but no live stream and no capture etc.
# Events/Recordings are still viewable.
enabled: True
# Optional: camera type used for some Frigate features (default: shown below)
# Options are "generic" and "lpr"
type: "generic"
# Required: ffmpeg settings for the camera
ffmpeg:
# Required: A list of input streams for the camera. See documentation for more information.
@ -875,7 +895,7 @@ telemetry:
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
network_bandwidth: False
# Optional: Enable the latest version outbound check (default: shown below)
# NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions
# NOTE: If you use the Home Assistant integration, disabling this will prevent it from reporting new versions
version_check: True
# Optional: Camera groups (default: no groups are setup)

View File

@ -17,15 +17,15 @@ From here, follow the guides for:
- [Web Interface](#web-interface)
- [Documentation](#documentation)
### Frigate Home Assistant Addon
### Frigate Home Assistant Add-on
This repository holds the Home Assistant Addon, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab.
This repository holds the Home Assistant Add-on, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab.
Fork [blakeblackshear/frigate-hass-addons](https://github.com/blakeblackshear/frigate-hass-addons) to your own Github profile, then clone the forked repo to your local machine.
### Frigate Home Assistant Integration
This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you run that with the [addon](#frigate-home-assistant-addon) or in a separate Docker instance.
This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you are running Frigate as a standalone Docker container or as a [Home Assistant Add-on](#frigate-home-assistant-add-on).
Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshear/frigate-hass-integration) to your own GitHub profile, then clone the forked repo to your local machine.
@ -77,14 +77,15 @@ Create and place these files in a `debug` folder in the root of the repo. This i
#### 4. Run Frigate from the command line
VSCode will start the docker compose file for you and open a terminal window connected to `frigate-dev`.
VS Code will start the Docker Compose file for you and open a terminal window connected to `frigate-dev`.
- Depending on what hardware you're developing on, you may need to amend `docker-compose.yml` in the project root to pass through a USB Coral or GPU for hardware acceleration.
- Run `python3 -m frigate` to start the backend.
- In a separate terminal window inside VS Code, change into the `web` directory and run `npm install && npm run dev` to start the frontend.
#### 5. Teardown
After closing VSCode, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers.
After closing VS Code, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers.
### Testing

View File

@ -107,23 +107,19 @@ More information is available [in the detector docs](/configuration/object_detec
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes |
| -------------------- | -------------------------- | ------------------------- | -------------------------------------- |
| Intel Celeron J4105 | ~ 25 ms | | Can only run one detector instance |
| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance |
| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance |
| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads |
| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance |
| Intel i3 8100 | ~ 15 ms | | |
| Intel i5 4590 | ~ 20 ms | | |
| Intel i5 6500 | ~ 15 ms | | |
| Intel i5 7200u | 15 - 25 ms | | |
| Intel i5 7500 | ~ 15 ms | | |
| Intel i5 1135G7 | 10 - 15 ms | | |
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | |
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | |
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
| -------------------- | -------------------------- | ------------------------- | ------------------------- | -------------------------------------- |
| Intel i3 6100T | 15 - 35 ms | | | Can only run one detector instance |
| Intel i5 6500 | ~ 15 ms | | | |
| Intel i5 7200u | 15 - 25 ms | | | |
| Intel i5 7500 | ~ 15 ms | | | |
| Intel i3 8100 | ~ 15 ms | | | |
| Intel i5 1135G7 | 10 - 15 ms | | | |
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel i7 12650H | ~ 15 ms | 320: ~ 20 ms 640: ~ 42 ms | 336: 50 ms | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | |
### TensorRT - Nvidia GPU
@ -132,15 +128,16 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
Inference speeds will vary greatly depending on the GPU and the model used.
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time |
| --------------- | --------------------- | ------------------------- |
| GTX 1060 6GB | ~ 7 ms | |
| GTX 1070 | ~ 6 ms | |
| GTX 1660 SUPER | ~ 4 ms | |
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms |
| RTX 3070 Mobile | ~ 5 ms | |
| Quadro P400 2GB | 20 - 25 ms | |
| Quadro P2000 | ~ 12 ms | |
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time |
| --------------- | --------------------- | ------------------------- | ------------------------- |
| GTX 1060 6GB | ~ 7 ms | | |
| GTX 1070 | ~ 6 ms | | |
| GTX 1660 SUPER | ~ 4 ms | | |
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms | 336: ~ 16 ms 560: ~ 40 ms |
| RTX 3070 Mobile | ~ 5 ms | | |
| RTX 3070 | 4 - 6 ms | 320: ~ 6 ms 640: ~ 12 ms | 336: ~ 14 ms 560: ~ 36 ms |
| Quadro P400 2GB | 20 - 25 ms | | |
| Quadro P2000 | ~ 12 ms | | |
### AMD GPUs

View File

@ -3,11 +3,11 @@ id: installation
title: Installation
---
Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant.
Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant Add-on](https://www.home-assistant.io/addons/). Note that the Home Assistant Add-on is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant Add-on.
:::tip
If you already have Frigate installed as a Home Assistant addon, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
If you already have Frigate installed as a Home Assistant Add-on, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
:::
@ -45,7 +45,7 @@ The following ports are used by Frigate and can be mapped via docker as required
| `8554` | RTSP restreaming. By default, these streams are unauthenticated. Authentication can be configured in go2rtc section of config. |
| `8555` | WebRTC connections for low latency live views. |
#### Common docker compose storage configurations
#### Common Docker Compose storage configurations
Writing to a local disk or external USB drive:
@ -73,7 +73,7 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**.
The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose).
The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in Docker Compose).
The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well.
@ -184,10 +184,9 @@ Next, you should configure [hardware object detection](/configuration/object_det
## Docker
Running in Docker with compose is the recommended install method.
Running through Docker with Docker Compose is the recommended install method.
```yaml
version: "3.9"
services:
frigate:
container_name: frigate
@ -219,7 +218,7 @@ services:
FRIGATE_RTSP_PASSWORD: "password"
```
If you can't use docker compose, you can run the container with something similar to this:
If you can't use Docker Compose, you can run the container with something similar to this:
```bash
docker run -d \
@ -243,25 +242,23 @@ docker run -d \
The official docker image tags for the current stable version are:
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64. This build includes support for Hailo devices as well.
- `stable-standard-arm64` - Standard Frigate build for arm64
- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
The community supported docker image tags for the current stable version are:
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
- `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat
## Home Assistant Addon
## Home Assistant Add-on
:::warning
As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported.
As of Home Assistant Operating System 10.2 and Home Assistant 2023.6 defining separate network storage for media is supported.
There are important limitations in Home Assistant Operating System to be aware of:
There are important limitations in HA OS to be aware of:
- Separate local storage for media is not yet supported by Home Assistant
- AMD GPUs are not supported because HA OS does not include the mesa driver.
@ -275,24 +272,27 @@ See [the network storage guide](/guides/ha_network_storage.md) for instructions
:::
HassOS users can install via the addon repository.
Home Assistant OS users can install via the Add-on repository.
1. Navigate to Supervisor > Add-on Store > Repositories
2. Add https://github.com/blakeblackshear/frigate-hass-addons
3. Install your desired Frigate NVR Addon and navigate to it's page
1. In Home Assistant, navigate to _Settings_ > _Add-ons_ > _Add-on Store_ > _Repositories_
2. Add `https://github.com/blakeblackshear/frigate-hass-addons`
3. Install the desired variant of the Frigate Add-on (see below)
4. Setup your network configuration in the `Configuration` tab
5. (not for proxy addon) Create the file `frigate.yaml` in your `config` directory with your detailed Frigate configuration
6. Start the addon container
7. (not for proxy addon) If you are using hardware acceleration for ffmpeg, you may need to disable "Protection mode"
5. Start the Add-on
6. Use the _Open Web UI_ button to access the Frigate UI, then click in the _cog icon_ > _Configuration editor_ and configure Frigate to your liking
There are several versions of the addon available:
There are several variants of the Add-on available:
| Addon Version | Description |
| ------------------------------ | ---------------------------------------------------------- |
| Frigate NVR | Current release with protection mode on |
| Frigate NVR (Full Access) | Current release with the option to disable protection mode |
| Frigate NVR Beta | Beta release with protection mode on |
| Frigate NVR Beta (Full Access) | Beta release with the option to disable protection mode |
| Add-on Variant | Description |
| -------------------------- | ---------------------------------------------------------- |
| Frigate | Current release with protection mode on |
| Frigate (Full Access) | Current release with the option to disable protection mode |
| Frigate Beta | Beta release with protection mode on |
| Frigate Beta (Full Access) | Beta release with the option to disable protection mode |
If you are using hardware acceleration for ffmpeg, you **may** need to use the _Full Access_ variant of the Add-on. This is because the Frigate Add-on runs in a container with limited access to the host system. The _Full Access_ variant allows you to disable _Protection mode_ and give Frigate full access to the host system.
You can also edit the Frigate configuration file through the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) or similar. In that case, the configuration file will be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](../configuration/index.md#accessing-add-on-config-dir).
## Kubernetes

View File

@ -115,3 +115,7 @@ section.
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router.
## Important considerations
If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts.

View File

@ -9,7 +9,7 @@ title: Getting started
If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below.
If you already have Frigate installed in Docker or as a Home Assistant addon, you can continue to [Configuring Frigate](#configuring-frigate) below.
If you already have Frigate installed through Docker or through a Home Assistant Add-on, you can continue to [Configuring Frigate](#configuring-frigate) below.
:::
@ -81,7 +81,7 @@ Now you have a minimal Debian server that requires very little maintenance.
## Installing Frigate
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate).
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant Add-on or another way, you can continue to [Configuring Frigate](#configuring-frigate).
### Setup directories
@ -110,7 +110,6 @@ This `docker-compose.yml` file is just a starter for amd64 devices. You will nee
`docker-compose.yml`
```yaml
version: "3.9"
services:
frigate:
container_name: frigate
@ -170,7 +169,6 @@ Here is an example configuration with hardware acceleration configured to work w
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
```yaml
version: "3.9"
services:
frigate:
...
@ -199,7 +197,6 @@ By default, Frigate will use a single CPU detector. If you have a USB Coral, you
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
```yaml
version: "3.9"
services:
frigate:
...

View File

@ -3,24 +3,18 @@ id: ha_network_storage
title: Home Assistant network storage
---
As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons.
As of Home Assistant 2023.6, Network Mounted Storage is supported for Add-ons.
## Setting Up Remote Storage For Frigate
### Prerequisites
- HA Core 2023.6 or newer is installed
- Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install)
- Home Assistant 2023.6 or newer is installed
- Running Home Assistant Operating System 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install)
### Initial Setup
1. Stop the Frigate addon
2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding:
```yaml
database:
path: /config/frigate.db
```
1. Stop the Frigate Add-on
### Move current data
@ -43,4 +37,4 @@ Keeping the current data is optional, but the data will need to be moved regardl
4. Fill out the additional required info for your particular NAS
5. Connect
6. Move files from `/media/frigate_tmp` to `/media/frigate` if they were kept in previous step
7. Start the Frigate addon
7. Start the Frigate Add-on

View File

@ -51,7 +51,7 @@ When configuring the integration, you will be asked for the `URL` of your Frigat
### Docker Compose Examples
If you are running Home Assistant Core and Frigate with Docker Compose on the same device, here are some examples.
If you are running Home Assistant and Frigate with Docker Compose on the same device, here are some examples.
#### Home Assistant running with host networking
@ -60,7 +60,6 @@ It is not recommended to run Frigate in host networking mode. In this example, y
```yaml
services:
homeassistant:
container_name: hass
image: ghcr.io/home-assistant/home-assistant:stable
network_mode: host
...
@ -80,7 +79,6 @@ In this example, it is recommended to connect to the authenticated port, for exa
```yaml
services:
homeassistant:
container_name: hass
image: ghcr.io/home-assistant/home-assistant:stable
# network_mode: host
...
@ -93,17 +91,16 @@ services:
...
```
### HassOS Addon
### Home Assistant Add-on
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
If you are using Home Assistant Add-on, the URL should be one of the following depending on which Add-on variant you are using. Note that if you are using the Proxy Add-on, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network.
| Addon Version | URL |
| ------------------------------ | ----------------------------------------- |
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` |
| Add-on Variant | URL |
| -------------------------- | ----------------------------------------- |
| Frigate | `http://ccab4aaf-frigate:5000` |
| Frigate (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
### Frigate running on a separate machine

View File

@ -305,6 +305,10 @@ Topic to adjust motion contour area for a camera. Expected value is an integer.
Topic with current motion contour area for a camera. Published value is an integer.
### `frigate/<camera_name>/review_status`
Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`.
### `frigate/<camera_name>/ptz`
Topic to send PTZ commands to camera.

View File

@ -19,11 +19,11 @@ Once logged in, you can generate an API key for Frigate in Settings.
### Set your API key
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Add-ons > Frigate > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
:::warning
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or HA addon config.
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or Home Assistant Add-on config.
:::
@ -51,6 +51,8 @@ You can view all of your submitted images at [https://plus.frigate.video](https:
Once you have [requested your first model](../plus/first_model.md) and gotten your own model ID, it can be used with a special model path. No other information needs to be configured for Frigate+ models because it fetches the remaining config from Frigate+ automatically.
You can either choose the new model from the Frigate+ pane in the Settings page of the Frigate UI, or manually set the model at the root level in your config:
```yaml
model:
path: plus://<your_model_id>

View File

@ -21,7 +21,7 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
## [Frigate Notify](https://github.com/0x2142/frigate-notify)
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)

View File

@ -22,3 +22,13 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co
### Can I keep using my Frigate+ models even if I do not renew my subscription?
Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models.
### Why can't I submit images to Frigate+?
If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that you've enabled both snapshots and `clean_copy` snapshots for the cameras you'd like to submit images for. Note that `clean_copy` is enabled by default when snapshots are enabled.
```yaml
snapshots:
enabled: true
clean_copy: true
```

View File

@ -3,7 +3,7 @@ id: index
title: Models
---
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
:::info

View File

@ -32,7 +32,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U
The USB coral has different IDs when it is uninitialized and initialized.
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed.
- When running through the Home Assistant OS you may need to run the Full Access variant of the Frigate Add-on with the _Protection mode_ switch disabled so that the coral can be accessed.
### Synology 716+II running DSM 7.2.1-69057 Update 5

View File

@ -47,10 +47,9 @@ On linux, some helpful tools/commands in diagnosing would be:
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
**Docker Compose example**
```yaml
version: "3.9"
services:
frigate:
...

View File

@ -33,11 +33,12 @@ const sidebars: SidebarsConfig = {
"configuration/object_detectors",
"configuration/audio_detectors",
],
Classifiers: [
Enrichments: [
"configuration/semantic_search",
"configuration/genai",
"configuration/face_recognition",
"configuration/license_plate_recognition",
"configuration/bird_classification",
],
Cameras: [
"configuration/cameras",

View File

@ -9,6 +9,7 @@ import traceback
from datetime import datetime, timedelta
from functools import reduce
from io import StringIO
from pathlib import Path as FilePath
from typing import Any, Optional
import aiofiles
@ -79,12 +80,16 @@ def go2rtc_streams():
@router.get("/go2rtc/streams/{camera_name}")
def go2rtc_camera_stream(camera_name: str):
def go2rtc_camera_stream(request: Request, camera_name: str):
r = requests.get(
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=all&microphone"
)
if not r.ok:
logger.error("Failed to fetch streams from go2rtc")
camera_config = request.app.frigate_config.cameras.get(camera_name)
if camera_config and camera_config.enabled:
logger.error("Failed to fetch streams from go2rtc")
return JSONResponse(
content=({"success": False, "message": "Error fetching stream data"}),
status_code=500,
@ -174,6 +179,22 @@ def config(request: Request):
config["model"]["all_attributes"] = config_obj.model.all_attributes
config["model"]["non_logo_attributes"] = config_obj.model.non_logo_attributes
# Add model plus data if plus is enabled
if config["plus"]["enabled"]:
model_path = config.get("model", {}).get("path")
if model_path:
model_json_path = FilePath(model_path).with_suffix(".json")
try:
with open(model_json_path, "r") as f:
model_plus_data = json.load(f)
config["model"]["plus"] = model_plus_data
except FileNotFoundError:
config["model"]["plus"] = None
except json.JSONDecodeError:
config["model"]["plus"] = None
else:
config["model"]["plus"] = None
# use merged labelamp
for detector_config in config["detectors"].values():
detector_config["model"]["labelmap"] = (
@ -619,6 +640,48 @@ def get_sub_labels(split_joined: Optional[int] = None):
return JSONResponse(content=sub_labels)
@router.get("/plus/models")
def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
if not request.app.frigate_config.plus_api.is_active():
return JSONResponse(
content=({"success": False, "message": "Frigate+ is not enabled"}),
status_code=400,
)
models: dict[any, any] = request.app.frigate_config.plus_api.get_models()
if not models["list"]:
return JSONResponse(
content=({"success": False, "message": "No models found"}),
status_code=400,
)
modelList = models["list"]
# current model type
modelType = request.app.frigate_config.model.model_type
# current detectorType for comparing to supportedDetectors
detectorType = list(request.app.frigate_config.detectors.values())[0].type
validModels = []
for model in sorted(
filter(
lambda m: (
not filterByCurrentModelDetector
or (detectorType in m["supportedDetectors"] and modelType in m["type"])
),
modelList,
),
key=(lambda m: m["trainDate"]),
reverse=True,
):
validModels.append(model)
return JSONResponse(content=validModels)
@router.get("/recognized_license_plates")
def get_recognized_license_plates(split_joined: Optional[int] = None):
try:

View File

@ -109,11 +109,11 @@ def get_jwt_secret() -> str:
jwt_secret = (
Path(os.path.join("/run/secrets", JWT_SECRET_ENV_VAR)).read_text().strip()
)
# check for the addon options file
# check for the add-on options file
elif os.path.isfile("/data/options.json"):
with open("/data/options.json") as f:
raw_options = f.read()
logger.debug("Using jwt secret from Home Assistant addon options file.")
logger.debug("Using jwt secret from Home Assistant Add-on options file.")
options = json.loads(raw_options)
jwt_secret = options.get("jwt_secret")

View File

@ -6,6 +6,7 @@ import random
import shutil
import string
import cv2
from fastapi import APIRouter, Depends, Request, UploadFile
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filename
@ -14,9 +15,11 @@ from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags
from frigate.config.camera import DetectConfig
from frigate.const import FACE_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event
from frigate.util.path import get_event_snapshot
logger = logging.getLogger(__name__)
@ -27,6 +30,9 @@ router = APIRouter(tags=[Tags.events])
def get_faces():
face_dict: dict[str, list[str]] = {}
if not os.path.exists(FACE_DIR):
return JSONResponse(status_code=200, content={})
for name in os.listdir(FACE_DIR):
face_dir = os.path.join(FACE_DIR, name)
@ -35,10 +41,9 @@ def get_faces():
face_dict[name] = []
for file in sorted(
for file in filter(
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
os.listdir(face_dir),
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
reverse=True,
):
face_dict[name].append(file)
@ -87,16 +92,27 @@ def train_face(request: Request, name: str, body: dict = None):
)
json: dict[str, any] = body or {}
training_file = os.path.join(
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
)
training_file_name = sanitize_filename(json.get("training_file", ""))
training_file = os.path.join(FACE_DIR, f"train/{training_file_name}")
event_id = json.get("event_id")
if not training_file or not os.path.isfile(training_file):
if not training_file_name and not event_id:
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file}",
"message": "A training file or event_id must be passed.",
}
),
status_code=400,
)
if training_file_name and not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file_name}",
}
),
status_code=404,
@ -105,8 +121,40 @@ def train_face(request: Request, name: str, body: dict = None):
sanitized_name = sanitize_filename(name)
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
new_name = f"{sanitized_name}-{rand_id}.webp"
new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}")
shutil.move(training_file, new_file)
new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}")
if not os.path.exists(new_file_folder):
os.mkdir(new_file_folder)
if training_file_name:
shutil.move(training_file, os.path.join(new_file_folder, new_name))
else:
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid event_id or no event exists: {event_id}",
}
),
status_code=404,
)
snapshot = get_event_snapshot(event)
face_box = event.data["attributes"][0]["box"]
detect_config: DetectConfig = request.app.frigate_config.cameras[
event.camera
].detect
# crop onto the face box minus the bounding box itself
x1 = int(face_box[0] * detect_config.width) + 2
y1 = int(face_box[1] * detect_config.height) + 2
x2 = x1 + int(face_box[2] * detect_config.width) - 4
y2 = y1 + int(face_box[3] * detect_config.height) - 4
face = snapshot[y1:y2, x1:x2]
cv2.imwrite(os.path.join(new_file_folder, new_name), face)
context: EmbeddingsContext = request.app.embeddings
context.clear_face_classifier()
@ -115,7 +163,7 @@ def train_face(request: Request, name: str, body: dict = None):
content=(
{
"success": True,
"message": f"Successfully saved {training_file} as {new_name}.",
"message": f"Successfully saved {training_file_name} as {new_name}.",
}
),
status_code=200,
@ -149,6 +197,42 @@ async def register_face(request: Request, name: str, file: UploadFile):
context: EmbeddingsContext = request.app.embeddings
result = context.register_face(name, await file.read())
if not isinstance(result, dict):
return JSONResponse(
status_code=500,
content={
"success": False,
"message": "Could not process request. Try restarting Frigate.",
},
)
return JSONResponse(
status_code=200 if result.get("success", True) else 400,
content=result,
)
@router.post("/faces/recognize")
async def recognize_face(request: Request, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
context: EmbeddingsContext = request.app.embeddings
result = context.recognize_face(await file.read())
if not isinstance(result, dict):
return JSONResponse(
status_code=500,
content={
"success": False,
"message": "Could not process request. Try restarting Frigate.",
},
)
return JSONResponse(
status_code=200 if result.get("success", True) else 400,
content=result,
@ -213,3 +297,49 @@ def reprocess_license_plate(request: Request, event_id: str):
content=response,
status_code=200,
)
@router.put("/reindex", dependencies=[Depends(require_role(["admin"]))])
def reindex_embeddings(request: Request):
if not request.app.frigate_config.semantic_search.enabled:
message = (
"Cannot reindex tracked object embeddings, Semantic Search is not enabled."
)
logger.error(message)
return JSONResponse(
content=(
{
"success": False,
"message": message,
}
),
status_code=400,
)
context: EmbeddingsContext = request.app.embeddings
response = context.reindex_embeddings()
if response == "started":
return JSONResponse(
content={
"success": True,
"message": "Embeddings reindexing has started.",
},
status_code=202, # 202 Accepted
)
elif response == "in_progress":
return JSONResponse(
content={
"success": False,
"message": "Embeddings reindexing is already in progress.",
},
status_code=409, # 409 Conflict
)
else:
return JSONResponse(
content={
"success": False,
"message": "Failed to start reindexing.",
},
status_code=500,
)

View File

@ -701,6 +701,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
for k, v in event["data"].items()
if k
in [
"attributes",
"type",
"score",
"top_score",

View File

@ -2,6 +2,7 @@
import glob
import logging
import math
import os
import subprocess as sp
import time
@ -240,25 +241,50 @@ def get_snapshot_from_recording(
content={"success": False, "message": "Camera not found"},
status_code=404,
)
recording_query = (
Recordings.select(
Recordings.path,
Recordings.start_time,
)
.where(
(
(frame_time >= Recordings.start_time)
& (frame_time <= Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.order_by(Recordings.start_time.desc())
.limit(1)
)
recording: Recordings | None = None
try:
recording: Recordings = recording_query.get()
recording = (
Recordings.select(
Recordings.path,
Recordings.start_time,
)
.where(
(
(frame_time >= Recordings.start_time)
& (frame_time <= Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.order_by(Recordings.start_time.desc())
.limit(1)
.get()
)
except DoesNotExist:
# try again with a rounded frame time as it may be between
# the rounded segment start time
frame_time = math.ceil(frame_time)
try:
recording = (
Recordings.select(
Recordings.path,
Recordings.start_time,
)
.where(
(
(frame_time >= Recordings.start_time)
& (frame_time <= Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.order_by(Recordings.start_time.desc())
.limit(1)
.get()
)
except DoesNotExist:
pass
if recording is not None:
time_in_segment = frame_time - recording.start_time
codec = "png" if format == "png" else "mjpeg"
mime_type = "png" if format == "png" else "jpeg"
@ -279,7 +305,7 @@ def get_snapshot_from_recording(
status_code=404,
)
return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
except DoesNotExist:
else:
return JSONResponse(
content={
"success": False,

View File

@ -5,7 +5,7 @@ import logging
import os
import threading
from collections import defaultdict
from typing import Callable
from typing import Any, Callable
import cv2
import numpy as np
@ -53,8 +53,19 @@ class CameraState:
self.callbacks = defaultdict(list)
self.ptz_autotracker_thread = ptz_autotracker_thread
self.prev_enabled = self.camera_config.enabled
self.requires_face_detection = (
self.config.face_recognition.enabled
and "face" not in self.config.objects.all_objects
)
def get_current_frame(self, draw_options={}):
def get_max_update_frequency(self, obj: TrackedObject) -> int:
return (
1
if self.requires_face_detection and obj.obj_data["label"] == "person"
else 5
)
def get_current_frame(self, draw_options: dict[str, Any] = {}):
with self.current_frame_lock:
frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time
@ -77,7 +88,9 @@ class CameraState:
thickness = 1
else:
thickness = 2
color = self.config.model.colormap[obj["label"]]
color = self.config.model.colormap.get(
obj["label"], (255, 255, 255)
)
else:
thickness = 1
color = (255, 0, 0)
@ -99,7 +112,9 @@ class CameraState:
and obj["frame_time"] == frame_time
):
thickness = 5
color = self.config.model.colormap[obj["label"]]
color = self.config.model.colormap.get(
obj["label"], (255, 255, 255)
)
# debug autotracking zooming - show the zoom factor box
if (
@ -283,11 +298,12 @@ class CameraState:
updated_obj.last_updated = frame_time
# if it has been more than 5 seconds since the last thumb update
# if it has been more than max_update_frequency seconds since the last thumb update
# and the last update is greater than the last publish or
# the object has changed significantly
if (
frame_time - updated_obj.last_published > 5
frame_time - updated_obj.last_published
> self.get_max_update_frequency(updated_obj)
and updated_obj.last_updated > updated_obj.last_published
) or significant_update:
# call event handlers
@ -306,7 +322,6 @@ class CameraState:
# TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects
camera_activity: dict[str, list[any]] = {
"enabled": True,
"motion": len(motion_boxes) > 0,
"objects": [],
}
@ -410,9 +425,13 @@ class CameraState:
self.previous_frame_id = frame_name
def save_manual_event_image(
self, event_id: str, label: str, draw: dict[str, list[dict]]
self,
frame: np.ndarray | None,
event_id: str,
label: str,
draw: dict[str, list[dict]],
) -> None:
img_frame = self.get_current_frame()
img_frame = frame if frame is not None else self.get_current_frame()
# write clean snapshot if enabled
if self.camera_config.snapshots.clean_copy:
@ -458,9 +477,9 @@ class CameraState:
# create thumbnail with max height of 175 and save
width = int(175 * img_frame.shape[1] / img_frame.shape[0])
thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA)
cv2.imwrite(
os.path.join(THUMB_DIR, self.camera_config.name, f"{event_id}.webp"), thumb
)
thumb_path = os.path.join(THUMB_DIR, self.camera_config.name)
os.makedirs(thumb_path, exist_ok=True)
cv2.imwrite(os.path.join(thumb_path, f"{event_id}.webp"), thumb)
def shutdown(self) -> None:
for obj in self.tracked_objects.values():

View File

@ -11,6 +11,7 @@ class DetectionTypeEnum(str, Enum):
api = "api"
video = "video"
audio = "audio"
lpr = "lpr"
class DetectionPublisher(Publisher):

View File

@ -164,8 +164,12 @@ class Dispatcher:
def handle_on_connect():
camera_status = self.camera_activity.last_camera_activity.copy()
cameras_with_status = camera_status.keys()
for camera in self.config.cameras.keys():
if camera not in cameras_with_status:
camera_status[camera] = {}
for camera in camera_status.keys():
camera_status[camera]["config"] = {
"detect": self.config.cameras[camera].detect.enabled,
"enabled": self.config.cameras[camera].enabled,

View File

@ -13,9 +13,11 @@ class EmbeddingsRequestEnum(Enum):
embed_description = "embed_description"
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
recognize_face = "recognize_face"
register_face = "register_face"
reprocess_face = "reprocess_face"
reprocess_plate = "reprocess_plate"
reindex = "reindex"
class EmbeddingsResponder:

View File

@ -15,6 +15,8 @@ class EventMetadataTypeEnum(str, Enum):
regenerate_description = "regenerate_description"
sub_label = "sub_label"
recognized_license_plate = "recognized_license_plate"
lpr_event_create = "lpr_event_create"
save_lpr_snapshot = "save_lpr_snapshot"
class EventMetadataPublisher(Publisher):

View File

@ -1,4 +1,5 @@
import os
from enum import Enum
from typing import Optional
from pydantic import Field, PrivateAttr
@ -17,6 +18,10 @@ from frigate.util.builtin import (
)
from ..base import FrigateBaseModel
from ..classification import (
CameraFaceRecognitionConfig,
CameraLicensePlateRecognitionConfig,
)
from .audio import AudioConfig
from .birdseye import BirdseyeCameraConfig
from .detect import DetectConfig
@ -38,6 +43,11 @@ from .zone import ZoneConfig
__all__ = ["CameraConfig"]
class CameraTypeEnum(str, Enum):
generic = "generic"
lpr = "lpr"
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME)
enabled: bool = Field(default=True, title="Enable camera.")
@ -52,6 +62,9 @@ class CameraConfig(FrigateBaseModel):
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration."
)
face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
)
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
genai: GenAICameraConfig = Field(
default_factory=GenAICameraConfig, title="Generative AI configuration."
@ -59,6 +72,9 @@ class CameraConfig(FrigateBaseModel):
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
)
lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
)
motion: Optional[MotionConfig] = Field(
None, title="Motion detection configuration."
)
@ -92,6 +108,7 @@ class CameraConfig(FrigateBaseModel):
onvif: OnvifConfig = Field(
default_factory=OnvifConfig, title="Camera Onvif Configuration."
)
type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type")
ui: CameraUiConfig = Field(
default_factory=CameraUiConfig, title="Camera UI Modifications."
)

View File

@ -1,11 +1,13 @@
from enum import Enum
from typing import Dict, List, Optional
from pydantic import Field
from pydantic import ConfigDict, Field
from .base import FrigateBaseModel
__all__ = [
"CameraFaceRecognitionConfig",
"CameraLicensePlateRecognitionConfig",
"FaceRecognitionConfig",
"SemanticSearchConfig",
"LicensePlateRecognitionConfig",
@ -49,8 +51,11 @@ class SemanticSearchConfig(FrigateBaseModel):
class FaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
min_score: float = Field(
title="Minimum face distance score required to save the attempt.",
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
unknown_score: float = Field(
title="Minimum face distance score required to be marked as a potential match.",
default=0.8,
gt=0.0,
le=1.0,
@ -70,14 +75,23 @@ class FaceRecognitionConfig(FrigateBaseModel):
min_area: int = Field(
default=500, title="Min area of face box to consider running face recognition."
)
save_attempts: bool = Field(
default=True, title="Save images of face detections for training."
save_attempts: int = Field(
default=100, ge=0, title="Number of face attempts to save in the train tab."
)
blur_confidence_filter: bool = Field(
default=True, title="Apply blur quality filter to face confidence."
)
class CameraFaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
min_area: int = Field(
default=500, title="Min area of face box to consider running face recognition."
)
model_config = ConfigDict(extra="ignore", protected_namespaces=())
class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
detection_threshold: float = Field(
@ -112,3 +126,34 @@ class LicensePlateRecognitionConfig(FrigateBaseModel):
known_plates: Optional[Dict[str, List[str]]] = Field(
default={}, title="Known plates to track (strings or regular expressions)."
)
enhancement: int = Field(
default=0,
title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.",
ge=0,
le=10,
)
debug_save_plates: bool = Field(
default=False,
title="Save plates captured for LPR for debugging purposes.",
)
class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
expire_time: int = Field(
default=3,
title="Expire plates not seen after number of seconds (for dedicated LPR cameras only).",
gt=0,
)
min_area: int = Field(
default=1000,
title="Minimum area of license plate to begin running recognition.",
)
enhancement: int = Field(
default=0,
title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.",
ge=0,
le=10,
)
model_config = ConfigDict(extra="ignore", protected_namespaces=())

View File

@ -292,13 +292,30 @@ def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None:
"""Verify that required_zones are specified when autotracking is enabled."""
"""Verify that motion detection is not disabled and object detection is enabled."""
if camera_config.detect.enabled and not camera_config.motion.enabled:
raise ValueError(
f"Camera {camera_config.name} has motion detection disabled and object detection enabled but object detection requires motion detection."
)
def verify_lpr_and_face(
frigate_config: FrigateConfig, camera_config: CameraConfig
) -> ValueError | None:
"""Verify that lpr and face are enabled at the global level if enabled at the camera level."""
if camera_config.lpr.enabled and not frigate_config.lpr.enabled:
raise ValueError(
f"Camera {camera_config.name} has lpr enabled but lpr is disabled at the global level of the config. You must enable lpr at the global level."
)
if (
camera_config.face_recognition.enabled
and not frigate_config.face_recognition.enabled
):
raise ValueError(
f"Camera {camera_config.name} has face_recognition enabled but face_recognition is disabled at the global level of the config. You must enable face_recognition at the global level."
)
class FrigateConfig(FrigateBaseModel):
version: Optional[str] = Field(default=None, title="Current config version.")
@ -331,19 +348,6 @@ class FrigateConfig(FrigateBaseModel):
default_factory=TelemetryConfig, title="Telemetry configuration."
)
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
)
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config
@ -395,6 +399,21 @@ class FrigateConfig(FrigateBaseModel):
title="Global timestamp style configuration.",
)
# Classification Config
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
)
camera_groups: Dict[str, CameraGroupConfig] = Field(
default_factory=dict, title="Camera group configuration"
)
@ -435,6 +454,8 @@ class FrigateConfig(FrigateBaseModel):
include={
"audio": ...,
"birdseye": ...,
"face_recognition": ...,
"lpr": ...,
"record": ...,
"snapshots": ...,
"live": ...,
@ -603,6 +624,7 @@ class FrigateConfig(FrigateBaseModel):
verify_required_zones_exist(camera_config)
verify_autotrack_zones(camera_config)
verify_motion_and_detect(camera_config)
verify_lpr_and_face(self, camera_config)
self.objects.parse_all_objects(self.cameras)
self.model.create_colormap(sorted(self.objects.all_objects))

View File

@ -0,0 +1,370 @@
import logging
import os
import queue
import threading
from abc import ABC, abstractmethod
import cv2
import numpy as np
from scipy import stats
from frigate.config import FrigateConfig
from frigate.const import MODEL_CACHE_DIR
from frigate.embeddings.onnx.face_embedding import ArcfaceEmbedding, FaceNetEmbedding
logger = logging.getLogger(__name__)
class FaceRecognizer(ABC):
"""Face recognition runner."""
def __init__(self, config: FrigateConfig) -> None:
self.config = config
self.landmark_detector: cv2.face.FacemarkLBF = None
self.init_landmark_detector()
@abstractmethod
def build(self) -> None:
"""Build face recognition model."""
pass
@abstractmethod
def clear(self) -> None:
"""Clear current built model."""
pass
@abstractmethod
def classify(self, face_image: np.ndarray) -> tuple[str, float] | None:
pass
def init_landmark_detector(self) -> None:
landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml")
if os.path.exists(landmark_model):
self.landmark_detector = cv2.face.createFacemarkLBF()
self.landmark_detector.loadModel(landmark_model)
def align_face(
self,
image: np.ndarray,
output_width: int,
output_height: int,
) -> np.ndarray:
# landmark is run on grayscale images
if image.ndim == 3:
land_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
land_image = image
_, lands = self.landmark_detector.fit(
land_image, np.array([(0, 0, land_image.shape[1], land_image.shape[0])])
)
landmarks: np.ndarray = lands[0][0]
# get landmarks for eyes
leftEyePts = landmarks[42:48]
rightEyePts = landmarks[36:42]
# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - 0.35
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX**2) + (dY**2))
desiredDist = desiredRightEyeX - 0.35
desiredDist *= output_width
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
# grab the rotation matrix for rotating and scaling the face
eyesCenter = (
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
)
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = output_width * 0.5
tY = output_height * 0.35
M[0, 2] += tX - eyesCenter[0]
M[1, 2] += tY - eyesCenter[1]
# apply the affine transformation
return cv2.warpAffine(
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
)
def get_blur_factor(self, input: np.ndarray) -> float:
"""Calculates the factor for the confidence based on the blur of the image."""
if not self.config.face_recognition.blur_confidence_filter:
return 1.0
variance = cv2.Laplacian(input, cv2.CV_64F).var()
if variance < 60: # image is very blurry
return 0.96
elif variance < 70: # image moderately blurry
return 0.98
elif variance < 80: # image is slightly blurry
return 0.99
else:
return 1.0
def similarity_to_confidence(
cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12
):
"""
Default sigmoid function to map cosine similarity to confidence.
Args:
cosine_similarity (float): The input cosine similarity.
median (float): Assumed median of cosine similarity distribution.
range_width (float): Assumed range of cosine similarity distribution (90th percentile - 10th percentile).
slope_factor (float): Adjusts the steepness of the curve.
Returns:
float: The confidence score.
"""
# Calculate slope and bias
slope = slope_factor / range_width
bias = median
# Calculate confidence
confidence = 1 / (1 + np.exp(-slope * (cosine_similarity - bias)))
return confidence
class FaceNetRecognizer(FaceRecognizer):
def __init__(self, config: FrigateConfig):
super().__init__(config)
self.mean_embs: dict[int, np.ndarray] = {}
self.face_embedder: FaceNetEmbedding = FaceNetEmbedding()
self.model_builder_queue: queue.Queue | None = None
def clear(self) -> None:
self.mean_embs = {}
def run_build_task(self) -> None:
self.model_builder_queue = queue.Queue()
def build_model():
face_embeddings_map: dict[str, list[np.ndarray]] = {}
idx = 0
dir = "/media/frigate/clips/faces"
for name in os.listdir(dir):
if name == "train":
continue
face_folder = os.path.join(dir, name)
if not os.path.isdir(face_folder):
continue
face_embeddings_map[name] = []
for image in os.listdir(face_folder):
img = cv2.imread(os.path.join(face_folder, image))
if img is None:
continue
img = self.align_face(img, img.shape[1], img.shape[0])
emb = self.face_embedder([img])[0].squeeze()
face_embeddings_map[name].append(emb)
idx += 1
self.model_builder_queue.put(face_embeddings_map)
thread = threading.Thread(target=build_model, daemon=True)
thread.start()
def build(self):
if not self.landmark_detector:
self.init_landmark_detector()
return None
if self.model_builder_queue is not None:
try:
face_embeddings_map: dict[str, list[np.ndarray]] = (
self.model_builder_queue.get(timeout=0.1)
)
self.model_builder_queue = None
except queue.Empty:
return
else:
self.run_build_task()
return
if not face_embeddings_map:
return
for name, embs in face_embeddings_map.items():
if embs:
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
logger.debug("Finished building ArcFace model")
def classify(self, face_image):
if not self.landmark_detector:
return None
if not self.mean_embs:
self.build()
if not self.mean_embs:
return None
# face recognition is best run on grayscale images
# get blur factor before aligning face
blur_factor = self.get_blur_factor(face_image)
logger.debug(f"face detected with blurriness {blur_factor}")
# align face and run recognition
img = self.align_face(face_image, face_image.shape[1], face_image.shape[0])
embedding = self.face_embedder([img])[0].squeeze()
score = 0
label = ""
for name, mean_emb in self.mean_embs.items():
dot_product = np.dot(embedding, mean_emb)
magnitude_A = np.linalg.norm(embedding)
magnitude_B = np.linalg.norm(mean_emb)
cosine_similarity = dot_product / (magnitude_A * magnitude_B)
confidence = similarity_to_confidence(
cosine_similarity, median=0.5, range_width=0.6
)
if confidence > score:
score = confidence
label = name
return label, round(score * blur_factor, 2)
class ArcFaceRecognizer(FaceRecognizer):
def __init__(self, config: FrigateConfig):
super().__init__(config)
self.mean_embs: dict[int, np.ndarray] = {}
self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding()
self.model_builder_queue: queue.Queue | None = None
def clear(self) -> None:
self.mean_embs = {}
def run_build_task(self) -> None:
self.model_builder_queue = queue.Queue()
def build_model():
face_embeddings_map: dict[str, list[np.ndarray]] = {}
idx = 0
dir = "/media/frigate/clips/faces"
for name in os.listdir(dir):
if name == "train":
continue
face_folder = os.path.join(dir, name)
if not os.path.isdir(face_folder):
continue
face_embeddings_map[name] = []
for image in os.listdir(face_folder):
img = cv2.imread(os.path.join(face_folder, image))
if img is None:
continue
img = self.align_face(img, img.shape[1], img.shape[0])
emb = self.face_embedder([img])[0].squeeze()
face_embeddings_map[name].append(emb)
idx += 1
self.model_builder_queue.put(face_embeddings_map)
thread = threading.Thread(target=build_model, daemon=True)
thread.start()
def build(self):
if not self.landmark_detector:
self.init_landmark_detector()
return None
if self.model_builder_queue is not None:
try:
face_embeddings_map: dict[str, list[np.ndarray]] = (
self.model_builder_queue.get(timeout=0.1)
)
self.model_builder_queue = None
except queue.Empty:
return
else:
self.run_build_task()
return
if not face_embeddings_map:
return
for name, embs in face_embeddings_map.items():
if embs:
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
logger.debug("Finished building ArcFace model")
def classify(self, face_image):
if not self.landmark_detector:
return None
if not self.mean_embs:
self.build()
if not self.mean_embs:
return None
# face recognition is best run on grayscale images
# get blur factor before aligning face
blur_factor = self.get_blur_factor(face_image)
logger.debug(f"face detected with blurriness {blur_factor}")
# align face and run recognition
img = self.align_face(face_image, face_image.shape[1], face_image.shape[0])
embedding = self.face_embedder([img])[0].squeeze()
score = 0
label = ""
for name, mean_emb in self.mean_embs.items():
dot_product = np.dot(embedding, mean_emb)
magnitude_A = np.linalg.norm(embedding)
magnitude_B = np.linalg.norm(mean_emb)
cosine_similarity = dot_product / (magnitude_A * magnitude_B)
confidence = similarity_to_confidence(cosine_similarity)
if confidence > score:
score = confidence
label = name
return label, round(score * blur_factor, 2)

View File

@ -1,18 +1,29 @@
"""Handle processing images for face detection and recognition."""
import base64
import datetime
import logging
import math
import os
import random
import re
import string
from pathlib import Path
from typing import List, Optional, Tuple
import cv2
import numpy as np
from Levenshtein import distance
from Levenshtein import distance, jaro_winkler
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
from shapely.geometry import Polygon
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.const import CLIPS_DIR
from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE
from frigate.util.builtin import EventsPerSecond
from frigate.util.image import area
logger = logging.getLogger(__name__)
@ -23,13 +34,12 @@ WRITE_DEBUG_IMAGES = False
class LicensePlateProcessingMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.requires_license_plate_detection = (
"license_plate" not in self.config.objects.all_objects
)
self.plates_rec_second = EventsPerSecond()
self.plates_rec_second.start()
self.plates_det_second = EventsPerSecond()
self.plates_det_second.start()
self.event_metadata_publisher = EventMetadataPublisher()
self.ctc_decoder = CTCDecoder()
self.batch_size = 6
# Detection specific parameters
@ -38,6 +48,9 @@ class LicensePlateProcessingMixin:
self.box_thresh = 0.6
self.mask_thresh = 0.6
# matching
self.similarity_threshold = 0.8
def _detect(self, image: np.ndarray) -> List[np.ndarray]:
"""
Detect possible license plates in the input image by first resizing and normalizing it,
@ -98,7 +111,7 @@ class LicensePlateProcessingMixin:
return self._process_classification_output(images, outputs)
def _recognize(
self, images: List[np.ndarray]
self, camera: string, images: List[np.ndarray]
) -> Tuple[List[str], List[List[float]]]:
"""
Recognize the characters on the detected license plates using the recognition model.
@ -128,7 +141,7 @@ class LicensePlateProcessingMixin:
# preprocess the images based on the max aspect ratio
for i in range(index, min(num_images, index + self.batch_size)):
norm_image = self._preprocess_recognition_image(
images[indices[i]], max_wh_ratio
camera, images[indices[i]], max_wh_ratio
)
norm_image = norm_image[np.newaxis, :]
norm_images.append(norm_image)
@ -137,7 +150,7 @@ class LicensePlateProcessingMixin:
return self.ctc_decoder(outputs)
def _process_license_plate(
self, image: np.ndarray
self, camera: string, id: string, image: np.ndarray
) -> Tuple[List[str], List[float], List[int]]:
"""
Complete pipeline for detecting, classifying, and recognizing license plates in the input image.
@ -165,21 +178,37 @@ class LicensePlateProcessingMixin:
boxes = self._sort_boxes(list(boxes))
plate_images = [self._crop_license_plate(image, x) for x in boxes]
current_time = int(datetime.datetime.now().timestamp())
if WRITE_DEBUG_IMAGES:
current_time = int(datetime.datetime.now().timestamp())
for i, img in enumerate(plate_images):
cv2.imwrite(
f"debug/frames/license_plate_cropped_{current_time}_{i + 1}.jpg",
img,
)
if self.config.lpr.debug_save_plates:
logger.debug(f"{camera}: Saving plates for event {id}")
Path(os.path.join(CLIPS_DIR, f"lpr/{camera}/{id}")).mkdir(
parents=True, exist_ok=True
)
for i, img in enumerate(plate_images):
cv2.imwrite(
os.path.join(
CLIPS_DIR, f"lpr/{camera}/{id}/{current_time}_{i + 1}.jpg"
),
img,
)
# keep track of the index of each image for correct area calc later
sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in plate_images])
reverse_mapping = {
idx: original_idx for original_idx, idx in enumerate(sorted_indices)
}
results, confidences = self._recognize(plate_images)
results, confidences = self._recognize(camera, plate_images)
if results:
license_plates = [""] * len(plate_images)
@ -197,11 +226,8 @@ class LicensePlateProcessingMixin:
# set to True to write each cropped image for debugging
if False:
save_image = cv2.cvtColor(
plate_images[original_idx], cv2.COLOR_RGB2BGR
)
filename = f"debug/frames/plate_{original_idx}_{plate}_{area}.jpg"
cv2.imwrite(filename, save_image)
cv2.imwrite(filename, plate_images[original_idx])
license_plates[original_idx] = plate
average_confidences[original_idx] = average_confidence
@ -302,7 +328,6 @@ class LicensePlateProcessingMixin:
# get minimum bounding box (rotated rectangle) around the contour and the smallest side length.
points, min_side = self._get_min_boxes(contour)
logger.debug(f"min side {index}, {min_side}")
if min_side < self.min_size:
continue
@ -310,7 +335,6 @@ class LicensePlateProcessingMixin:
points = np.array(points)
score = self._box_score(output, contour)
logger.debug(f"box score {index}, {score}")
if self.box_thresh > score:
continue
@ -320,7 +344,7 @@ class LicensePlateProcessingMixin:
# Use pyclipper to shrink the polygon slightly based on the computed distance.
offset = PyclipperOffset()
offset.AddPath(points, JT_ROUND, ET_CLOSEDPOLYGON)
points = np.array(offset.Execute(distance * 1.75)).reshape((-1, 1, 2))
points = np.array(offset.Execute(distance * 1.5)).reshape((-1, 1, 2))
# get the minimum bounding box around the shrunken polygon.
box, min_side = self._get_min_boxes(points)
@ -602,7 +626,7 @@ class LicensePlateProcessingMixin:
return images, results
def _preprocess_recognition_image(
self, image: np.ndarray, max_wh_ratio: float
self, camera: string, image: np.ndarray, max_wh_ratio: float
) -> np.ndarray:
"""
Preprocess an image for recognition by dynamically adjusting its width.
@ -624,6 +648,48 @@ class LicensePlateProcessingMixin:
assert image.shape[2] == input_shape[0], "Unexpected number of image channels."
# convert to grayscale
if image.shape[2] == 3:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
gray = image
if self.config.cameras[camera].lpr.enhancement > 3:
# denoise using a configurable pixel neighborhood value
logger.debug(
f"{camera}: Denoising recognition image (level: {self.config.cameras[camera].lpr.enhancement})"
)
smoothed = cv2.bilateralFilter(
gray,
d=5 + self.config.cameras[camera].lpr.enhancement,
sigmaColor=10 * self.config.cameras[camera].lpr.enhancement,
sigmaSpace=10 * self.config.cameras[camera].lpr.enhancement,
)
sharpening_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
processed = cv2.filter2D(smoothed, -1, sharpening_kernel)
else:
processed = gray
if self.config.cameras[camera].lpr.enhancement > 0:
# always apply the same CLAHE for contrast enhancement when enhancement level is above 3
logger.debug(
f"{camera}: Enhancing contrast for recognition image (level: {self.config.cameras[camera].lpr.enhancement})"
)
grid_size = (
max(4, input_w // 40),
max(4, input_h // 40),
)
clahe = cv2.createCLAHE(
clipLimit=2 if self.config.cameras[camera].lpr.enhancement > 5 else 1.5,
tileGridSize=grid_size,
)
enhanced = clahe.apply(processed)
else:
enhanced = processed
# Convert back to 3-channel for model compatibility
image = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2RGB)
# dynamically adjust input width based on max_wh_ratio
input_w = int(input_h * max_wh_ratio)
@ -649,6 +715,13 @@ class LicensePlateProcessingMixin:
)
padded_image[:, :, :resized_w] = resized_image
if False:
current_time = int(datetime.datetime.now().timestamp() * 1000)
cv2.imwrite(
f"debug/frames/preprocessed_recognition_{current_time}.jpg",
image,
)
return padded_image
@staticmethod
@ -710,18 +783,38 @@ class LicensePlateProcessingMixin:
top_score = -1
top_box = None
img_h, img_w = input.shape[0], input.shape[1]
# Calculate resized dimensions and padding based on _preprocess_inputs
if img_w > img_h:
resized_h = int(((img_h / img_w) * LPR_EMBEDDING_SIZE) // 4 * 4)
resized_w = LPR_EMBEDDING_SIZE
x_offset = (LPR_EMBEDDING_SIZE - resized_w) // 2
y_offset = (LPR_EMBEDDING_SIZE - resized_h) // 2
scale_x = img_w / resized_w
scale_y = img_h / resized_h
else:
resized_w = int(((img_w / img_h) * LPR_EMBEDDING_SIZE) // 4 * 4)
resized_h = LPR_EMBEDDING_SIZE
x_offset = (LPR_EMBEDDING_SIZE - resized_w) // 2
y_offset = (LPR_EMBEDDING_SIZE - resized_h) // 2
scale_x = img_w / resized_w
scale_y = img_h / resized_h
# Loop over predictions
for prediction in predictions:
score = prediction[6]
if score >= confidence_threshold:
bbox = prediction[1:5]
# Scale boxes back to original image size
scale_x = input.shape[1] / 256
scale_y = input.shape[0] / 256
bbox[0] *= scale_x
bbox[1] *= scale_y
bbox[2] *= scale_x
bbox[3] *= scale_y
# Adjust for padding and scale to original image
bbox[0] = (bbox[0] - x_offset) * scale_x
bbox[1] = (bbox[1] - y_offset) * scale_y
bbox[2] = (bbox[2] - x_offset) * scale_x
bbox[3] = (bbox[3] - y_offset) * scale_y
if score > top_score:
top_score = score
top_box = bbox
if score > top_score:
top_score = score
@ -729,8 +822,8 @@ class LicensePlateProcessingMixin:
# Return the top scoring bounding box if found
if top_box is not None:
# expand box by 30% to help with OCR
expansion = (top_box[2:] - top_box[:2]) * 0.30
# expand box by 5% to help with OCR
expansion = (top_box[2:] - top_box[:2]) * 0.05
# Expand box
expanded_box = np.array(
@ -742,7 +835,9 @@ class LicensePlateProcessingMixin:
]
).clip(0, [input.shape[1], input.shape[0]] * 2)
logger.debug(f"Found license plate: {expanded_box.astype(int)}")
logger.debug(
f"Found license plate. Bounding box: {expanded_box.astype(int)}"
)
return tuple(expanded_box.astype(int))
else:
return None # No detection above the threshold
@ -750,6 +845,7 @@ class LicensePlateProcessingMixin:
def _should_keep_previous_plate(
self, id, top_plate, top_char_confidences, top_area, avg_confidence
):
"""Determine if the previous plate should be kept over the current one."""
if id not in self.detected_license_plates:
return False
@ -764,238 +860,364 @@ class LicensePlateProcessingMixin:
)
# 1. Normalize metrics
# Length score - use relative comparison
# If lengths are equal, score is 0.5 for both
# If one is longer, it gets a higher score up to 1.0
max_length_diff = 4 # Maximum expected difference in plate lengths
# Length score: Equal lengths = 0.5, penalize extra characters if low confidence
length_diff = len(top_plate) - len(prev_plate)
curr_length_score = 0.5 + (
length_diff / (2 * max_length_diff)
) # Normalize to 0-1
curr_length_score = max(0, min(1, curr_length_score)) # Clamp to 0-1
prev_length_score = 1 - curr_length_score # Inverse relationship
max_length_diff = 3
curr_length_score = 0.5 + (length_diff / (2 * max_length_diff))
curr_length_score = max(0, min(1, curr_length_score))
prev_length_score = 0.5 - (length_diff / (2 * max_length_diff))
prev_length_score = max(0, min(1, prev_length_score))
# Area score (normalize based on max of current and previous)
# Adjust length score based on confidence of extra characters
conf_threshold = 0.75 # Minimum confidence for a character to be "trusted"
if len(top_plate) > len(prev_plate):
extra_conf = min(
top_char_confidences[len(prev_plate) :]
) # Lowest extra char confidence
if extra_conf < conf_threshold:
curr_length_score *= extra_conf / conf_threshold # Penalize if weak
elif len(prev_plate) > len(top_plate):
extra_conf = min(prev_char_confidences[len(top_plate) :])
if extra_conf < conf_threshold:
prev_length_score *= extra_conf / conf_threshold
# Area score: Normalize by max area
max_area = max(top_area, prev_area)
curr_area_score = top_area / max_area
prev_area_score = prev_area / max_area
curr_area_score = top_area / max_area if max_area > 0 else 0
prev_area_score = prev_area / max_area if max_area > 0 else 0
# Average confidence score (already normalized 0-1)
# Confidence scores
curr_conf_score = avg_confidence
prev_conf_score = prev_avg_confidence
# Character confidence comparison score
# Character confidence comparison (average over shared length)
min_length = min(len(top_plate), len(prev_plate))
if min_length > 0:
curr_char_conf = sum(top_char_confidences[:min_length]) / min_length
prev_char_conf = sum(prev_char_confidences[:min_length]) / min_length
else:
curr_char_conf = 0
prev_char_conf = 0
curr_char_conf = prev_char_conf = 0
# 2. Define weights
# Penalize any character below threshold
curr_min_conf = min(top_char_confidences) if top_char_confidences else 0
prev_min_conf = min(prev_char_confidences) if prev_char_confidences else 0
curr_conf_penalty = (
1.0 if curr_min_conf >= conf_threshold else (curr_min_conf / conf_threshold)
)
prev_conf_penalty = (
1.0 if prev_min_conf >= conf_threshold else (prev_min_conf / conf_threshold)
)
# 2. Define weights (boost confidence importance)
weights = {
"length": 0.4,
"area": 0.3,
"avg_confidence": 0.2,
"char_confidence": 0.1,
"length": 0.2,
"area": 0.2,
"avg_confidence": 0.35,
"char_confidence": 0.25,
}
# 3. Calculate weighted scores
# 3. Calculate weighted scores with penalty
curr_score = (
curr_length_score * weights["length"]
+ curr_area_score * weights["area"]
+ curr_conf_score * weights["avg_confidence"]
+ curr_char_conf * weights["char_confidence"]
)
) * curr_conf_penalty
prev_score = (
prev_length_score * weights["length"]
+ prev_area_score * weights["area"]
+ prev_conf_score * weights["avg_confidence"]
+ prev_char_conf * weights["char_confidence"]
)
) * prev_conf_penalty
# 4. Log the comparison for debugging
# 4. Log the comparison
logger.debug(
f"Plate comparison - Current plate: {top_plate} (score: {curr_score:.3f}) vs "
f"Previous plate: {prev_plate} (score: {prev_score:.3f})\n"
f"Plate comparison - Current: {top_plate} (score: {curr_score:.3f}, min_conf: {curr_min_conf:.2f}) vs "
f"Previous: {prev_plate} (score: {prev_score:.3f}, min_conf: {prev_min_conf:.2f})\n"
f"Metrics - Length: {len(top_plate)} vs {len(prev_plate)} (scores: {curr_length_score:.2f} vs {prev_length_score:.2f}), "
f"Area: {top_area} vs {prev_area}, "
f"Avg Conf: {avg_confidence:.2f} vs {prev_avg_confidence:.2f}"
f"Avg Conf: {avg_confidence:.2f} vs {prev_avg_confidence:.2f}, "
f"Char Conf: {curr_char_conf:.2f} vs {prev_char_conf:.2f}"
)
# 5. Return True if we should keep the previous plate (i.e., if it scores higher)
# 5. Return True if previous plate scores higher
return prev_score > curr_score
def __update_yolov9_metrics(self, duration: float) -> None:
"""
Update inference metrics.
"""
self.metrics.yolov9_lpr_fps.value = (
self.metrics.yolov9_lpr_fps.value * 9 + duration
self.metrics.yolov9_lpr_speed.value = (
self.metrics.yolov9_lpr_speed.value * 9 + duration
) / 10
def __update_lpr_metrics(self, duration: float) -> None:
"""
Update inference metrics.
"""
self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10
self.metrics.alpr_speed.value = (
self.metrics.alpr_speed.value * 9 + duration
) / 10
def lpr_process(self, obj_data: dict[str, any], frame: np.ndarray):
def _generate_plate_event(self, camera: str, plate: str, plate_score: float) -> str:
"""Generate a unique ID for a plate event based on camera and text."""
now = datetime.datetime.now().timestamp()
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
event_id = f"{now}-{rand_id}"
self.event_metadata_publisher.publish(
EventMetadataTypeEnum.lpr_event_create,
(
now,
camera,
"license_plate",
event_id,
True,
plate_score,
None,
plate,
),
)
return event_id
def lpr_process(
self, obj_data: dict[str, any], frame: np.ndarray, dedicated_lpr: bool = False
):
"""Look for license plates in image."""
self.metrics.alpr_pps.value = self.plates_rec_second.eps()
self.metrics.yolov9_lpr_pps.value = self.plates_det_second.eps()
camera = obj_data if dedicated_lpr else obj_data["camera"]
current_time = int(datetime.datetime.now().timestamp())
id = obj_data["id"]
# don't run for non car objects
if obj_data.get("label") != "car":
logger.debug("Not a processing license plate for non car object.")
if not self.config.cameras[camera].lpr.enabled:
return
# don't run for stationary car objects
if obj_data.get("stationary") == True:
logger.debug("Not a processing license plate for a stationary car object.")
return
# don't overwrite sub label for objects that have a sub label
# that is not a license plate
if obj_data.get("sub_label") and id not in self.detected_license_plates:
logger.debug(
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
)
return
license_plate: Optional[dict[str, any]] = None
if self.requires_license_plate_detection:
logger.debug("Running manual license_plate detection.")
car_box = obj_data.get("box")
if not car_box:
return
# dedicated LPR cam without frigate+
if dedicated_lpr:
id = "dedicated-lpr"
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
left, top, right, bottom = car_box
car = rgb[top:bottom, left:right]
# double the size of the car for better box detection
car = cv2.resize(car, (int(2 * car.shape[1]), int(2 * car.shape[0])))
# apply motion mask
rgb[self.config.cameras[obj_data].motion.mask == 0] = [0, 0, 0]
if WRITE_DEBUG_IMAGES:
current_time = int(datetime.datetime.now().timestamp())
cv2.imwrite(
f"debug/frames/car_frame_{current_time}.jpg",
car,
f"debug/frames/dedicated_lpr_masked_{current_time}.jpg",
rgb,
)
yolov9_start = datetime.datetime.now().timestamp()
license_plate = self._detect_license_plate(car)
license_plate = self._detect_license_plate(rgb)
logger.debug(
f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
)
self.plates_det_second.update()
self.__update_yolov9_metrics(
datetime.datetime.now().timestamp() - yolov9_start
)
if not license_plate:
logger.debug("Detected no license plates for car object.")
logger.debug(f"{camera}: Detected no license plates in full frame.")
return
license_plate_area = max(
0,
(license_plate[2] - license_plate[0])
* (license_plate[3] - license_plate[1]),
license_plate_area = (license_plate[2] - license_plate[0]) * (
license_plate[3] - license_plate[1]
)
# check that license plate is valid
# double the value because we've doubled the size of the car
if license_plate_area < self.lpr_config.min_area * 2:
logger.debug("License plate is less than min_area")
if license_plate_area < self.lpr_config.min_area:
logger.debug(f"{camera}: License plate area below minimum threshold.")
return
license_plate_frame = car[
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
]
else:
# don't run for object without attributes
if not obj_data.get("current_attributes"):
logger.debug("No attributes to parse.")
return
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
for attr in attributes:
if attr.get("label") != "license_plate":
continue
if license_plate is None or attr.get("score", 0.0) > license_plate.get(
"score", 0.0
):
license_plate = attr
# no license plates detected in this frame
if not license_plate:
return
license_plate_box = license_plate.get("box")
# check that license plate is valid
if (
not license_plate_box
or area(license_plate_box) < self.lpr_config.min_area
):
logger.debug(f"Invalid license plate box {license_plate}")
return
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
# Expand the license_plate_box by 30%
box_array = np.array(license_plate_box)
expansion = (box_array[2:] - box_array[:2]) * 0.30
expanded_box = np.array(
[
license_plate_box[0] - expansion[0],
license_plate_box[1] - expansion[1],
license_plate_box[2] + expansion[0],
license_plate_box[3] + expansion[1],
]
).clip(0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2)
# Crop using the expanded box
license_plate_frame = license_plate_frame[
int(expanded_box[1]) : int(expanded_box[3]),
int(expanded_box[0]) : int(expanded_box[2]),
license_plate_frame = rgb[
license_plate[1] : license_plate[3],
license_plate[0] : license_plate[2],
]
# double the size of the license plate frame for better OCR
license_plate_frame = cv2.resize(
license_plate_frame,
(
int(2 * license_plate_frame.shape[1]),
int(2 * license_plate_frame.shape[0]),
),
)
if WRITE_DEBUG_IMAGES:
current_time = int(datetime.datetime.now().timestamp())
cv2.imwrite(
f"debug/frames/license_plate_frame_{current_time}.jpg",
# Double the size for better OCR
license_plate_frame = cv2.resize(
license_plate_frame,
(
int(2 * license_plate_frame.shape[1]),
int(2 * license_plate_frame.shape[0]),
),
)
start = datetime.datetime.now().timestamp()
else:
id = obj_data["id"]
# don't run for non car or non license plate (dedicated lpr with frigate+) objects
if (
obj_data.get("label") != "car"
and obj_data.get("label") != "license_plate"
):
logger.debug(
f"{camera}: Not a processing license plate for non car object."
)
return
# don't run for stationary car objects
if obj_data.get("stationary") == True:
logger.debug(
f"{camera}: Not a processing license plate for a stationary car object."
)
return
# don't overwrite sub label for objects that have a sub label
# that is not a license plate
if obj_data.get("sub_label") and id not in self.detected_license_plates:
logger.debug(
f"{camera}: Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
)
return
license_plate: Optional[dict[str, any]] = None
if "license_plate" not in self.config.cameras[camera].objects.track:
logger.debug(f"{camera}: Running manual license_plate detection.")
car_box = obj_data.get("box")
if not car_box:
return
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
left, top, right, bottom = car_box
car = rgb[top:bottom, left:right]
# double the size of the car for better box detection
car = cv2.resize(car, (int(2 * car.shape[1]), int(2 * car.shape[0])))
if WRITE_DEBUG_IMAGES:
cv2.imwrite(
f"debug/frames/car_frame_{current_time}.jpg",
car,
)
yolov9_start = datetime.datetime.now().timestamp()
license_plate = self._detect_license_plate(car)
logger.debug(
f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
)
self.plates_det_second.update()
self.__update_yolov9_metrics(
datetime.datetime.now().timestamp() - yolov9_start
)
if not license_plate:
logger.debug(
f"{camera}: Detected no license plates for car object."
)
return
license_plate_area = max(
0,
(license_plate[2] - license_plate[0])
* (license_plate[3] - license_plate[1]),
)
# check that license plate is valid
# double the value because we've doubled the size of the car
if (
license_plate_area
< self.config.cameras[obj_data["camera"]].lpr.min_area * 2
):
logger.debug(f"{camera}: License plate is less than min_area")
return
license_plate_frame = car[
license_plate[1] : license_plate[3],
license_plate[0] : license_plate[2],
]
else:
# don't run for object without attributes if this isn't dedicated lpr with frigate+
if (
not obj_data.get("current_attributes")
and obj_data.get("label") != "license_plate"
):
logger.debug(f"{camera}: No attributes to parse.")
return
if obj_data.get("label") == "car":
attributes: list[dict[str, any]] = obj_data.get(
"current_attributes", []
)
for attr in attributes:
if attr.get("label") != "license_plate":
continue
if license_plate is None or attr.get(
"score", 0.0
) > license_plate.get("score", 0.0):
license_plate = attr
# no license plates detected in this frame
if not license_plate:
return
# we are using dedicated lpr with frigate+
if obj_data.get("label") == "license_plate":
license_plate = obj_data
license_plate_box = license_plate.get("box")
# check that license plate is valid
if (
not license_plate_box
or area(license_plate_box)
< self.config.cameras[obj_data["camera"]].lpr.min_area
):
logger.debug(
f"{camera}: Area for license plate box {area(license_plate_box)} is less than min_area {self.config.cameras[obj_data['camera']].lpr.min_area}"
)
return
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
# Expand the license_plate_box by 10%
box_array = np.array(license_plate_box)
expansion = (box_array[2:] - box_array[:2]) * 0.10
expanded_box = np.array(
[
license_plate_box[0] - expansion[0],
license_plate_box[1] - expansion[1],
license_plate_box[2] + expansion[0],
license_plate_box[3] + expansion[1],
]
).clip(
0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2
)
# Crop using the expanded box
license_plate_frame = license_plate_frame[
int(expanded_box[1]) : int(expanded_box[3]),
int(expanded_box[0]) : int(expanded_box[2]),
]
# double the size of the license plate frame for better OCR
license_plate_frame = cv2.resize(
license_plate_frame,
(
int(2 * license_plate_frame.shape[1]),
int(2 * license_plate_frame.shape[0]),
),
)
if WRITE_DEBUG_IMAGES:
cv2.imwrite(
f"debug/frames/license_plate_frame_{current_time}.jpg",
license_plate_frame,
)
# run detection, returns results sorted by confidence, best first
start = datetime.datetime.now().timestamp()
license_plates, confidences, areas = self._process_license_plate(
license_plate_frame
camera, id, license_plate_frame
)
self.plates_rec_second.update()
self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start)
logger.debug(f"Text boxes: {license_plates}")
logger.debug(f"Confidences: {confidences}")
logger.debug(f"Areas: {areas}")
if license_plates:
for plate, confidence, text_area in zip(license_plates, confidences, areas):
avg_confidence = (
@ -1003,10 +1225,9 @@ class LicensePlateProcessingMixin:
)
logger.debug(
f"Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)"
f"{camera}: Detected text: {plate} (average confidence: {avg_confidence:.2f}, area: {text_area} pixels)"
)
else:
# no plates found
logger.debug("No text detected")
return
@ -1021,21 +1242,57 @@ class LicensePlateProcessingMixin:
else 0
)
# Check against minimum confidence threshold
if avg_confidence < self.lpr_config.recognition_threshold:
logger.debug(
f"{camera}: Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})"
)
return
# For dedicated LPR cameras, match or assign plate ID using Jaro-Winkler distance
if (
dedicated_lpr
and "license_plate" not in self.config.cameras[camera].objects.track
):
plate_id = None
for existing_id, data in self.detected_license_plates.items():
if (
data["camera"] == camera
and data["last_seen"] is not None
and current_time - data["last_seen"]
<= self.config.cameras[camera].lpr.expire_time
):
similarity = jaro_winkler(data["plate"], top_plate)
if similarity >= self.similarity_threshold:
plate_id = existing_id
logger.debug(
f"{camera}: Matched plate {top_plate} to {data['plate']} (similarity: {similarity:.3f})"
)
break
if plate_id is None:
plate_id = self._generate_plate_event(
obj_data, top_plate, avg_confidence
)
logger.debug(
f"{camera}: New plate event for dedicated LPR camera {plate_id}: {top_plate}"
)
else:
logger.debug(
f"{camera}: Matched existing plate event for dedicated LPR camera {plate_id}: {top_plate}"
)
self.detected_license_plates[plate_id]["last_seen"] = current_time
id = plate_id
# Check if we have a previously detected plate for this ID
if id in self.detected_license_plates:
if self._should_keep_previous_plate(
id, top_plate, top_char_confidences, top_area, avg_confidence
):
logger.debug("Keeping previous plate")
logger.debug(f"{camera}: Keeping previous plate")
return
# Check against minimum confidence threshold
if avg_confidence < self.lpr_config.recognition_threshold:
logger.debug(
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.recognition_threshold})"
)
return
# Determine subLabel based on known plates, use regex matching
# Default to the detected plate, use label name if there's a match
sub_label = next(
@ -1062,11 +1319,28 @@ class LicensePlateProcessingMixin:
(id, top_plate, avg_confidence),
)
# save the best snapshot for dedicated lpr cams not using frigate+
if (
dedicated_lpr
and "license_plate" not in self.config.cameras[camera].objects.track
):
logger.debug(
f"{camera}: Writing snapshot for {id}, {top_plate}, {current_time}"
)
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
_, encoded_img = cv2.imencode(".jpg", frame_bgr)
self.sub_label_publisher.publish(
EventMetadataTypeEnum.save_lpr_snapshot,
(base64.b64encode(encoded_img).decode("ASCII"), id, camera),
)
self.detected_license_plates[id] = {
"plate": top_plate,
"char_confidences": top_char_confidences,
"area": top_area,
"obj_data": obj_data,
"camera": camera,
"last_seen": current_time if dedicated_lpr else None,
}
def handle_request(self, topic, request_data) -> dict[str, any] | None:

View File

@ -139,7 +139,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
scale_y = image.shape[0] / detect_height
# Determine which box to enlarge based on detection mode
if self.requires_license_plate_detection:
if "license_plate" not in self.config.cameras[camera_name].objects.track:
# Scale and enlarge the car box
box = obj_data.get("box")
if not box:
@ -189,7 +189,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
)
keyframe_obj_data = obj_data.copy()
if self.requires_license_plate_detection:
if "license_plate" not in self.config.cameras[camera_name].objects.track:
# car box
keyframe_obj_data["box"] = [new_left, new_top, new_right, new_bottom]
else:

View File

@ -19,6 +19,12 @@ from frigate.comms.event_metadata_updater import (
)
from frigate.config import FrigateConfig
from frigate.const import FACE_DIR, MODEL_CACHE_DIR
from frigate.data_processing.common.face.model import (
ArcFaceRecognizer,
FaceNetRecognizer,
FaceRecognizer,
)
from frigate.util.builtin import EventsPerSecond
from frigate.util.image import area
from ..types import DataProcessorMetrics
@ -28,7 +34,8 @@ logger = logging.getLogger(__name__)
MAX_DETECTION_HEIGHT = 1080
MIN_MATCHING_FACES = 2
MAX_FACES_ATTEMPTS_AFTER_REC = 6
MAX_FACE_ATTEMPTS = 12
class FaceRealTimeProcessor(RealTimeProcessorApi):
@ -42,10 +49,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.face_config = config.face_recognition
self.sub_label_publisher = sub_label_publisher
self.face_detector: cv2.FaceDetectorYN = None
self.landmark_detector: cv2.face.FacemarkLBF = None
self.recognizer: cv2.face.LBPHFaceRecognizer = None
self.requires_face_detection = "face" not in self.config.objects.all_objects
self.detected_faces: dict[str, float] = {}
self.person_face_history: dict[str, list[tuple[str, float, int]]] = {}
self.recognizer: FaceRecognizer | None = None
self.faces_per_second = EventsPerSecond()
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
self.model_files = {
@ -72,7 +79,13 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.__build_detector()
self.label_map: dict[int, str] = {}
self.__build_classifier()
if self.face_config.model_size == "small":
self.recognizer = FaceNetRecognizer(self.config)
else:
self.recognizer = ArcFaceRecognizer(self.config)
self.recognizer.build()
def __download_models(self, path: str) -> None:
try:
@ -92,126 +105,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
score_threshold=0.5,
nms_threshold=0.3,
)
self.landmark_detector = cv2.face.createFacemarkLBF()
self.landmark_detector.loadModel(
os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml")
)
def __build_classifier(self) -> None:
if not self.landmark_detector:
return None
labels = []
faces = []
dir = "/media/frigate/clips/faces"
for idx, name in enumerate(os.listdir(dir)):
if name == "train":
continue
face_folder = os.path.join(dir, name)
if not os.path.isdir(face_folder):
continue
self.label_map[idx] = name
for image in os.listdir(face_folder):
img = cv2.imread(os.path.join(face_folder, image))
if img is None:
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = self.__align_face(img, img.shape[1], img.shape[0])
faces.append(img)
labels.append(idx)
if not faces:
return
self.recognizer: cv2.face.LBPHFaceRecognizer = (
cv2.face.LBPHFaceRecognizer_create(
radius=2, threshold=(1 - self.face_config.min_score) * 1000
)
)
self.recognizer.train(faces, np.array(labels))
def __align_face(
self,
image: np.ndarray,
output_width: int,
output_height: int,
) -> np.ndarray:
_, lands = self.landmark_detector.fit(
image, np.array([(0, 0, image.shape[1], image.shape[0])])
)
landmarks: np.ndarray = lands[0][0]
# get landmarks for eyes
leftEyePts = landmarks[42:48]
rightEyePts = landmarks[36:42]
# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - 0.35
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX**2) + (dY**2))
desiredDist = desiredRightEyeX - 0.35
desiredDist *= output_width
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
# grab the rotation matrix for rotating and scaling the face
eyesCenter = (
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
)
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = output_width * 0.5
tY = output_height * 0.35
M[0, 2] += tX - eyesCenter[0]
M[1, 2] += tY - eyesCenter[1]
# apply the affine transformation
return cv2.warpAffine(
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
)
def __get_blur_factor(self, input: np.ndarray) -> float:
"""Calculates the factor for the confidence based on the blur of the image."""
if not self.face_config.blur_confidence_filter:
return 1.0
variance = cv2.Laplacian(input, cv2.CV_64F).var()
if variance < 60: # image is very blurry
return 0.96
elif variance < 70: # image moderately blurry
return 0.98
elif variance < 80: # image is slightly blurry
return 0.99
else:
return 1.0
def __clear_classifier(self) -> None:
self.face_recognizer = None
self.label_map = {}
self.faces_per_second.start()
def __detect_face(
self, input: np.ndarray, threshold: float
@ -227,6 +121,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
scale_factor = MAX_DETECTION_HEIGHT / input.shape[0]
new_width = int(scale_factor * input.shape[1])
input = cv2.resize(input, (new_width, MAX_DETECTION_HEIGHT))
else:
scale_factor = 1
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
faces = self.face_detector.detect(input)
@ -241,10 +137,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
continue
raw_bbox = potential_face[0:4].astype(np.uint16)
x: int = max(raw_bbox[0], 0)
y: int = max(raw_bbox[1], 0)
w: int = raw_bbox[2]
h: int = raw_bbox[3]
x: int = int(max(raw_bbox[0], 0) / scale_factor)
y: int = int(max(raw_bbox[1], 0) / scale_factor)
w: int = int(raw_bbox[2] / scale_factor)
h: int = int(raw_bbox[3] / scale_factor)
bbox = (x, y, x + w, y + h)
if face is None or area(bbox) > area(face):
@ -252,40 +148,19 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
return face
def __classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None:
if not self.landmark_detector:
return None
if not self.label_map or not self.recognizer:
self.__build_classifier()
if not self.recognizer:
return None
# face recognition is best run on grayscale images
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
# get blur factor before aligning face
blur_factor = self.__get_blur_factor(img)
logger.debug(f"face detected with bluriness {blur_factor}")
# align face and run recognition
img = self.__align_face(img, img.shape[1], img.shape[0])
index, distance = self.recognizer.predict(img)
if index == -1:
return None
score = (1.0 - (distance / 1000)) * blur_factor
return self.label_map[index], round(score, 2)
def __update_metrics(self, duration: float) -> None:
self.metrics.face_rec_fps.value = (
self.metrics.face_rec_fps.value * 9 + duration
self.faces_per_second.update()
self.metrics.face_rec_speed.value = (
self.metrics.face_rec_speed.value * 9 + duration
) / 10
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
"""Look for faces in image."""
self.metrics.face_rec_fps.value = self.faces_per_second.eps()
if not self.config.cameras[obj_data["camera"]].face_recognition.enabled:
return
start = datetime.datetime.now().timestamp()
id = obj_data["id"]
@ -296,12 +171,29 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
# don't overwrite sub label for objects that have a sub label
# that is not a face
if obj_data.get("sub_label") and id not in self.detected_faces:
if obj_data.get("sub_label") and id not in self.person_face_history:
logger.debug(
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
)
return
# check if we have hit limits
if (
id in self.person_face_history
and len(self.person_face_history[id]) >= MAX_FACES_ATTEMPTS_AFTER_REC
):
# if we are at max attempts after rec and we have a rec
if obj_data.get("sub_label"):
logger.debug(
"Not processing due to hitting max attempts after true recognition."
)
return
# if we don't have a rec and are at max attempts
if len(self.person_face_history[id]) >= MAX_FACE_ATTEMPTS:
logger.debug("Not processing due to hitting max rec attempts.")
return
face: Optional[dict[str, any]] = None
if self.requires_face_detection:
@ -324,7 +216,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
]
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
try:
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
except Exception:
return
else:
# don't run for object without attributes
if not obj_data.get("current_attributes"):
@ -346,7 +242,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
face_box = face.get("box")
# check that face is valid
if not face_box or area(face_box) < self.config.face_recognition.min_area:
if (
not face_box
or area(face_box)
< self.config.cameras[obj_data["camera"]].face_recognition.min_area
):
logger.debug(f"Invalid face box {face}")
return
@ -357,53 +257,71 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
]
res = self.__classify_face(face_frame)
res = self.recognizer.classify(face_frame)
if not res:
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
sub_label, score = res
# calculate the overall face score as the probability * area of face
# this will help to reduce false positives from small side-angle faces
# if a large front-on face image may have scored slightly lower but
# is more likely to be accurate due to the larger face area
face_score = round(score * face_frame.shape[0] * face_frame.shape[1], 2)
if score <= self.face_config.unknown_score:
sub_label = "unknown"
logger.debug(
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
f"Detected best face for person as: {sub_label} with probability {score}"
)
if self.config.face_recognition.save_attempts:
# write face to library
folder = os.path.join(FACE_DIR, "train")
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
os.makedirs(folder, exist_ok=True)
cv2.imwrite(file, face_frame)
if score < self.config.face_recognition.recognition_threshold:
logger.debug(
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.recognition_threshold}"
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
if id in self.detected_faces and face_score <= self.detected_faces[id]:
logger.debug(
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
self.sub_label_publisher.publish(
EventMetadataTypeEnum.sub_label, (id, sub_label, score)
self.write_face_attempt(
face_frame, id, datetime.datetime.now().timestamp(), sub_label, score
)
self.detected_faces[id] = face_score
if id not in self.person_face_history:
self.person_face_history[id] = []
self.person_face_history[id].append(
(sub_label, score, face_frame.shape[0] * face_frame.shape[1])
)
(weighted_sub_label, weighted_score) = self.weighted_average(
self.person_face_history[id]
)
if weighted_score >= self.face_config.recognition_threshold:
self.sub_label_publisher.publish(
EventMetadataTypeEnum.sub_label,
(id, weighted_sub_label, weighted_score),
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
def handle_request(self, topic, request_data) -> dict[str, any] | None:
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
self.__clear_classifier()
self.recognizer.clear()
elif topic == EmbeddingsRequestEnum.recognize_face.value:
img = cv2.imdecode(
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
cv2.IMREAD_COLOR,
)
# detect faces with lower confidence since we expect the face
# to be visible in uploaded images
face_box = self.__detect_face(img, 0.5)
if not face_box:
return {"message": "No face was detected.", "success": False}
face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]]
res = self.recognizer.classify(face)
if not res:
return {"success": False, "message": "No face was recognized."}
sub_label, score = res
if score <= self.face_config.unknown_score:
sub_label = "unknown"
return {"success": True, "score": score, "face_name": sub_label}
elif topic == EmbeddingsRequestEnum.register_face.value:
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
@ -445,16 +363,16 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
with open(file, "wb") as output:
output.write(thumbnail.tobytes())
self.__clear_classifier()
self.recognizer.clear()
return {
"message": "Successfully registered face.",
"success": True,
}
elif topic == EmbeddingsRequestEnum.reprocess_face.value:
current_file: str = request_data["image_file"]
id = current_file[0 : current_file.index("-", current_file.index("-") + 1)]
face_score = current_file[current_file.rfind("-") : current_file.rfind(".")]
(id_time, id_rand, timestamp, _, _) = current_file.split("-")
img = None
id = f"{id_time}-{id_rand}"
if current_file:
img = cv2.imread(current_file)
@ -465,21 +383,95 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
"success": False,
}
res = self.__classify_face(img)
res = self.recognizer.classify(img)
if not res:
return
sub_label, score = res
if score <= self.face_config.unknown_score:
sub_label = "unknown"
if self.config.face_recognition.save_attempts:
# write face to library
folder = os.path.join(FACE_DIR, "train")
os.makedirs(folder, exist_ok=True)
new_file = os.path.join(
folder, f"{id}-{sub_label}-{score}-{face_score}.webp"
folder, f"{id}-{timestamp}-{sub_label}-{score}.webp"
)
shutil.move(current_file, new_file)
def expire_object(self, object_id: str):
if object_id in self.detected_faces:
self.detected_faces.pop(object_id)
if object_id in self.person_face_history:
self.person_face_history.pop(object_id)
def weighted_average(
self, results_list: list[tuple[str, float, int]], max_weight: int = 4000
):
"""
Calculates a robust weighted average, capping the area weight and giving more weight to higher scores.
Args:
results_list: A list of tuples, where each tuple contains (name, score, face_area).
max_weight: The maximum weight to apply based on face area.
Returns:
A tuple containing the prominent name and its weighted average score, or (None, 0.0) if the list is empty.
"""
if not results_list:
return None, 0.0
weighted_scores = {}
total_weights = {}
for name, score, face_area in results_list:
if name == "unknown":
continue
if name not in weighted_scores:
weighted_scores[name] = 0.0
total_weights[name] = 0.0
# Capped weight based on face area
weight = min(face_area, max_weight)
# Score-based weighting (higher scores get more weight)
weight *= (score - self.face_config.unknown_score) * 10
weighted_scores[name] += score * weight
total_weights[name] += weight
if not weighted_scores:
return None, 0.0
best_name = max(weighted_scores, key=weighted_scores.get)
weighted_average = weighted_scores[best_name] / total_weights[best_name]
return best_name, weighted_average
def write_face_attempt(
self,
frame: np.ndarray,
event_id: str,
timestamp: float,
sub_label: str,
score: float,
) -> None:
if self.config.face_recognition.save_attempts:
# write face to library
folder = os.path.join(FACE_DIR, "train")
file = os.path.join(
folder, f"{event_id}-{timestamp}-{sub_label}-{score}.webp"
)
os.makedirs(folder, exist_ok=True)
cv2.imwrite(file, frame)
files = sorted(
filter(lambda f: (f.endswith(".webp")), os.listdir(folder)),
key=lambda f: os.path.getctime(os.path.join(folder, f)),
reverse=True,
)
# delete oldest face image if maximum is reached
if len(files) > self.config.face_recognition.save_attempts:
os.unlink(os.path.join(folder, files[-1]))

View File

@ -35,9 +35,14 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
self.sub_label_publisher = sub_label_publisher
super().__init__(config, metrics)
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
def process_frame(
self,
obj_data: dict[str, any],
frame: np.ndarray,
dedicated_lpr: bool | None = False,
):
"""Look for license plates in image."""
self.lpr_process(obj_data, frame)
self.lpr_process(obj_data, frame, dedicated_lpr)
def handle_request(self, topic, request_data) -> dict[str, any] | None:
return

View File

@ -6,18 +6,26 @@ from multiprocessing.sharedctypes import Synchronized
class DataProcessorMetrics:
image_embeddings_fps: Synchronized
text_embeddings_sps: Synchronized
image_embeddings_speed: Synchronized
text_embeddings_speed: Synchronized
face_rec_speed: Synchronized
face_rec_fps: Synchronized
alpr_speed: Synchronized
alpr_pps: Synchronized
yolov9_lpr_fps: Synchronized
yolov9_lpr_speed: Synchronized
yolov9_lpr_pps: Synchronized
def __init__(self):
self.image_embeddings_fps = mp.Value("d", 0.01)
self.text_embeddings_sps = mp.Value("d", 0.01)
self.face_rec_fps = mp.Value("d", 0.01)
self.alpr_pps = mp.Value("d", 0.01)
self.yolov9_lpr_fps = mp.Value("d", 0.01)
self.image_embeddings_speed = mp.Value("d", 0.01)
self.image_embeddings_eps = mp.Value("d", 0.0)
self.text_embeddings_speed = mp.Value("d", 0.01)
self.text_embeddings_eps = mp.Value("d", 0.0)
self.face_rec_speed = mp.Value("d", 0.01)
self.face_rec_fps = mp.Value("d", 0.0)
self.alpr_speed = mp.Value("d", 0.01)
self.alpr_pps = mp.Value("d", 0.0)
self.yolov9_lpr_speed = mp.Value("d", 0.01)
self.yolov9_lpr_pps = mp.Value("d", 0.0)
class DataProcessorModelRunner:

View File

@ -33,11 +33,12 @@ class InputDTypeEnum(str, Enum):
class ModelTypeEnum(str, Enum):
dfine = "dfine"
rfdetr = "rfdetr"
ssd = "ssd"
yolox = "yolox"
yolov9 = "yolov9"
yolonas = "yolonas"
dfine = "dfine"
yologeneric = "yolo-generic"

View File

@ -12,6 +12,7 @@ from frigate.detectors.detector_config import (
from frigate.util.model import (
get_ort_providers,
post_process_dfine,
post_process_rfdetr,
post_process_yolov9,
)
@ -73,7 +74,9 @@ class ONNXDetector(DetectionApi):
model_input_name = self.model.get_inputs()[0].name
tensor_output = self.model.run(None, {model_input_name: tensor_input})
if self.onnx_model_type == ModelTypeEnum.yolonas:
if self.onnx_model_type == ModelTypeEnum.rfdetr:
return post_process_rfdetr(tensor_output)
elif self.onnx_model_type == ModelTypeEnum.yolonas:
predictions = tensor_output[0]
detections = np.zeros((20, 6), np.float32)
@ -94,7 +97,10 @@ class ONNXDetector(DetectionApi):
x_max / self.w,
]
return detections
elif self.onnx_model_type == ModelTypeEnum.yolov9:
elif (
self.onnx_model_type == ModelTypeEnum.yolov9
or self.onnx_model_type == ModelTypeEnum.yologeneric
):
predictions: np.ndarray = tensor_output[0]
return post_process_yolov9(predictions, self.w, self.h)
else:

View File

@ -10,7 +10,11 @@ from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolov9
from frigate.util.model import (
post_process_dfine,
post_process_rfdetr,
post_process_yolov9,
)
logger = logging.getLogger(__name__)
@ -25,9 +29,12 @@ class OvDetectorConfig(BaseDetectorConfig):
class OvDetector(DetectionApi):
type_key = DETECTOR_KEY
supported_models = [
ModelTypeEnum.dfine,
ModelTypeEnum.rfdetr,
ModelTypeEnum.ssd,
ModelTypeEnum.yolonas,
ModelTypeEnum.yolov9,
ModelTypeEnum.yologeneric,
ModelTypeEnum.yolox,
]
@ -163,12 +170,34 @@ class OvDetector(DetectionApi):
infer_request = self.interpreter.create_infer_request()
# TODO: see if we can use shared_memory=True
input_tensor = ov.Tensor(array=tensor_input)
if self.ov_model_type == ModelTypeEnum.dfine:
infer_request.set_tensor("images", input_tensor)
target_sizes_tensor = ov.Tensor(
np.array([[self.h, self.w]], dtype=np.int64)
)
infer_request.set_tensor("orig_target_sizes", target_sizes_tensor)
infer_request.infer()
tensor_output = (
infer_request.get_output_tensor(0).data,
infer_request.get_output_tensor(1).data,
infer_request.get_output_tensor(2).data,
)
return post_process_dfine(tensor_output, self.w, self.h)
infer_request.infer(input_tensor)
detections = np.zeros((20, 6), np.float32)
if self.model_invalid:
return detections
elif self.ov_model_type == ModelTypeEnum.rfdetr:
return post_process_rfdetr(
[
infer_request.get_output_tensor(0).data,
infer_request.get_output_tensor(1).data,
]
)
elif self.ov_model_type == ModelTypeEnum.ssd:
results = infer_request.get_output_tensor(0).data[0][0]
@ -203,7 +232,10 @@ class OvDetector(DetectionApi):
x_max / self.w,
]
return detections
elif self.ov_model_type == ModelTypeEnum.yolov9:
elif (
self.ov_model_type == ModelTypeEnum.yolov9
or self.ov_model_type == ModelTypeEnum.yologeneric
):
out_tensor = infer_request.get_output_tensor(0).data
return post_process_yolov9(out_tensor, self.w, self.h)
elif self.ov_model_type == ModelTypeEnum.yolox:

View File

@ -197,6 +197,14 @@ class EmbeddingsContext:
},
)
def recognize_face(self, image_data: bytes) -> dict[str, any]:
return self.requestor.send_data(
EmbeddingsRequestEnum.recognize_face.value,
{
"image": base64.b64encode(image_data).decode("ASCII"),
},
)
def get_face_ids(self, name: str) -> list[str]:
sql_query = f"""
SELECT
@ -228,6 +236,10 @@ class EmbeddingsContext:
if len(os.listdir(folder)) == 0:
os.rmdir(folder)
self.requestor.send_data(
EmbeddingsRequestEnum.clear_face_classifier.value, None
)
def update_description(self, event_id: str, description: str) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.embed_description.value,
@ -238,3 +250,6 @@ class EmbeddingsContext:
return self.requestor.send_data(
EmbeddingsRequestEnum.reprocess_plate.value, {"event": event}
)
def reindex_embeddings(self) -> dict[str, any]:
return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {})

View File

@ -3,6 +3,7 @@
import datetime
import logging
import os
import threading
import time
from numpy import ndarray
@ -20,7 +21,7 @@ from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event
from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import serialize
from frigate.util.builtin import EventsPerSecond, serialize
from frigate.util.path import get_event_thumbnail_bytes
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
@ -74,6 +75,15 @@ class Embeddings:
self.metrics = metrics
self.requestor = InterProcessRequestor()
self.image_eps = EventsPerSecond()
self.image_eps.start()
self.text_eps = EventsPerSecond()
self.text_eps.start()
self.reindex_lock = threading.Lock()
self.reindex_thread = None
self.reindex_running = False
# Create tables if they don't exist
self.db.create_embeddings_tables()
@ -115,6 +125,10 @@ class Embeddings:
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
)
def update_stats(self) -> None:
self.metrics.image_embeddings_eps.value = self.image_eps.eps()
self.metrics.text_embeddings_eps.value = self.text_eps.eps()
def get_model_definitions(self):
# Version-specific models
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
@ -170,9 +184,10 @@ class Embeddings:
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.image_embeddings_fps.value = (
self.metrics.image_embeddings_fps.value * 9 + duration
self.metrics.image_embeddings_speed.value = (
self.metrics.image_embeddings_speed.value * 9 + duration
) / 10
self.image_eps.update()
return embedding
@ -194,6 +209,7 @@ class Embeddings:
for i in range(len(ids)):
items.append(ids[i])
items.append(serialize(embeddings[i]))
self.image_eps.update()
self.db.execute_sql(
"""
@ -204,8 +220,8 @@ class Embeddings:
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.text_embeddings_sps.value = (
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
self.metrics.text_embeddings_speed.value = (
self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids))
) / 10
return embeddings
@ -226,9 +242,10 @@ class Embeddings:
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.text_embeddings_sps.value = (
self.metrics.text_embeddings_sps.value * 9 + duration
self.metrics.text_embeddings_speed.value = (
self.metrics.text_embeddings_speed.value * 9 + duration
) / 10
self.text_eps.update()
return embedding
@ -249,6 +266,7 @@ class Embeddings:
for i in range(len(ids)):
items.append(ids[i])
items.append(serialize(embeddings[i]))
self.text_eps.update()
self.db.execute_sql(
"""
@ -259,8 +277,8 @@ class Embeddings:
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.text_embeddings_sps.value = (
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
self.metrics.text_embeddings_speed.value = (
self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids))
) / 10
return embeddings
@ -368,3 +386,27 @@ class Embeddings:
totals["status"] = "completed"
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
def start_reindex(self) -> bool:
"""Start reindexing in a separate thread if not already running."""
with self.reindex_lock:
if self.reindex_running:
logger.warning("Reindex embeddings is already running.")
return False
# Mark as running and start the thread
self.reindex_running = True
self.reindex_thread = threading.Thread(
target=self._reindex_wrapper, daemon=True
)
self.reindex_thread.start()
return True
def _reindex_wrapper(self) -> None:
"""Wrapper to run reindex and reset running flag when done."""
try:
self.reindex()
finally:
with self.reindex_lock:
self.reindex_running = False
self.reindex_thread = None

View File

@ -1,6 +1,7 @@
"""Maintain embeddings in SQLite-vec."""
import base64
import datetime
import logging
import os
import threading
@ -13,6 +14,7 @@ import numpy as np
from peewee import DoesNotExist
from playhouse.sqliteq import SqliteQueueDatabase
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
@ -26,6 +28,7 @@ from frigate.comms.recordings_updater import (
RecordingsDataTypeEnum,
)
from frigate.config import FrigateConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.const import (
CLIPS_DIR,
UPDATE_EVENT_DESCRIPTION,
@ -97,6 +100,7 @@ class EmbeddingMaintainer(threading.Thread):
self.recordings_subscriber = RecordingsDataSubscriber(
RecordingsDataTypeEnum.recordings_available_through
)
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video)
self.embeddings_responder = EmbeddingsResponder()
self.frame_manager = SharedMemoryFrameManager()
@ -162,12 +166,15 @@ class EmbeddingMaintainer(threading.Thread):
self._process_requests()
self._process_updates()
self._process_recordings_updates()
self._process_dedicated_lpr()
self._expire_dedicated_lpr()
self._process_finalized()
self._process_event_metadata()
self.event_subscriber.stop()
self.event_end_subscriber.stop()
self.recordings_subscriber.stop()
self.detection_subscriber.stop()
self.event_metadata_publisher.stop()
self.event_metadata_subscriber.stop()
self.embeddings_responder.stop()
@ -199,12 +206,18 @@ class EmbeddingMaintainer(threading.Thread):
self.embeddings.embed_description("", data, upsert=False),
pack=False,
)
elif topic == EmbeddingsRequestEnum.reindex.value:
response = self.embeddings.start_reindex()
return "started" if response else "in_progress"
processors = [self.realtime_processors, self.post_processors]
for processor_list in processors:
for processor in processor_list:
resp = processor.handle_request(topic, data)
if resp is not None:
return resp
return None
except Exception as e:
logger.error(f"Unable to handle embeddings request {e}", exc_info=True)
@ -222,6 +235,9 @@ class EmbeddingMaintainer(threading.Thread):
if not camera or source_type != EventTypeEnum.tracked_object:
return
if self.config.semantic_search.enabled:
self.embeddings.update_stats()
camera_config = self.config.cameras[camera]
# no need to process updated objects if face recognition, lpr, genai are disabled
@ -317,6 +333,7 @@ class EmbeddingMaintainer(threading.Thread):
if (
recordings_available is not None
and event_id in self.detected_license_plates
and self.config.cameras[camera].type != "lpr"
):
processor.process_data(
{
@ -374,6 +391,26 @@ class EmbeddingMaintainer(threading.Thread):
if event_id in self.tracked_events:
del self.tracked_events[event_id]
def _expire_dedicated_lpr(self) -> None:
"""Remove plates not seen for longer than expiration timeout for dedicated lpr cameras."""
now = datetime.datetime.now().timestamp()
to_remove = []
for id, data in self.detected_license_plates.items():
last_seen = data.get("last_seen", 0)
if not last_seen:
continue
if now - last_seen > self.config.cameras[data["camera"]].lpr.expire_time:
to_remove.append(id)
for id in to_remove:
self.event_metadata_publisher.publish(
EventMetadataTypeEnum.manual_event_end,
(id, now),
)
self.detected_license_plates.pop(id)
def _process_recordings_updates(self) -> None:
"""Process recordings updates."""
while True:
@ -406,6 +443,46 @@ class EmbeddingMaintainer(threading.Thread):
event_id, RegenerateDescriptionEnum(source)
)
def _process_dedicated_lpr(self) -> None:
"""Process event updates"""
(topic, data) = self.detection_subscriber.check_for_update(timeout=0.01)
if topic is None:
return
camera, frame_name, _, _, motion_boxes, _ = data
if not camera or not self.config.lpr.enabled or len(motion_boxes) == 0:
return
camera_config = self.config.cameras[camera]
if (
camera_config.type != CameraTypeEnum.lpr
or "license_plate" in camera_config.objects.track
):
# we're not a dedicated lpr camera or we are one but we're using frigate+
return
try:
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
)
except FileNotFoundError:
pass
if yuv_frame is None:
logger.debug(
"Unable to process dedicated LPR update because frame is unavailable."
)
return
for processor in self.realtime_processors:
if isinstance(processor, LicensePlateRealTimeProcessor):
processor.process_frame(camera, yuv_frame, True)
self.frame_manager.close(frame_name)
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame."""
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)

View File

@ -69,6 +69,8 @@ class BaseEmbedding(ABC):
image = Image.open(BytesIO(response.content)).convert(output)
elif isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert(output)
elif isinstance(image, np.ndarray):
image = Image.fromarray(image)
return image

View File

@ -0,0 +1,193 @@
"""Facenet Embeddings."""
import logging
import os
import numpy as np
from frigate.const import MODEL_CACHE_DIR
from frigate.util.downloader import ModelDownloader
from .base_embedding import BaseEmbedding
from .runner import ONNXModelRunner
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
ARCFACE_INPUT_SIZE = 112
FACENET_INPUT_SIZE = 160
class FaceNetEmbedding(BaseEmbedding):
def __init__(
self,
device: str = "AUTO",
):
super().__init__(
model_name="facedet",
model_file="facenet.tflite",
download_urls={
"facenet.tflite": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite",
},
)
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None
self.feature_extractor = None
self.runner = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.runner = Interpreter(
model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"),
num_threads=2,
)
self.runner.allocate_tensors()
self.tensor_input_details = self.runner.get_input_details()
self.tensor_output_details = self.runner.get_output_details()
def _preprocess_inputs(self, raw_inputs):
pil = self._process_image(raw_inputs[0])
# handle images larger than input size
width, height = pil.size
if width != FACENET_INPUT_SIZE or height != FACENET_INPUT_SIZE:
if width > height:
new_height = int(((height / width) * FACENET_INPUT_SIZE) // 4 * 4)
pil = pil.resize((FACENET_INPUT_SIZE, new_height))
else:
new_width = int(((width / height) * FACENET_INPUT_SIZE) // 4 * 4)
pil = pil.resize((new_width, FACENET_INPUT_SIZE))
og = np.array(pil).astype(np.float32)
# Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE
og_h, og_w, channels = og.shape
frame = np.zeros(
(FACENET_INPUT_SIZE, FACENET_INPUT_SIZE, channels), dtype=np.float32
)
# compute center offset
x_center = (FACENET_INPUT_SIZE - og_w) // 2
y_center = (FACENET_INPUT_SIZE - og_h) // 2
# copy img image into center of result image
frame[y_center : y_center + og_h, x_center : x_center + og_w] = og
# run facenet normalization
frame = (frame / 127.5) - 1.0
frame = np.expand_dims(frame, axis=0)
return frame
def __call__(self, inputs):
self._load_model_and_utils()
processed = self._preprocess_inputs(inputs)
self.runner.set_tensor(self.tensor_input_details[0]["index"], processed)
self.runner.invoke()
return self.runner.get_tensor(self.tensor_output_details[0]["index"])
class ArcfaceEmbedding(BaseEmbedding):
def __init__(
self,
device: str = "AUTO",
):
super().__init__(
model_name="facedet",
model_file="arcface.onnx",
download_urls={
"arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
},
)
self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None
self.feature_extractor = None
self.runner = None
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
):
logger.debug(f"starting model download for {self.model_name}")
self.downloader = ModelDownloader(
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
else:
self.downloader = None
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
)
def _preprocess_inputs(self, raw_inputs):
pil = self._process_image(raw_inputs[0])
# handle images larger than input size
width, height = pil.size
if width != ARCFACE_INPUT_SIZE or height != ARCFACE_INPUT_SIZE:
if width > height:
new_height = int(((height / width) * ARCFACE_INPUT_SIZE) // 4 * 4)
pil = pil.resize((ARCFACE_INPUT_SIZE, new_height))
else:
new_width = int(((width / height) * ARCFACE_INPUT_SIZE) // 4 * 4)
pil = pil.resize((new_width, ARCFACE_INPUT_SIZE))
og = np.array(pil).astype(np.float32)
# Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE
og_h, og_w, channels = og.shape
frame = np.zeros(
(ARCFACE_INPUT_SIZE, ARCFACE_INPUT_SIZE, channels), dtype=np.float32
)
# compute center offset
x_center = (ARCFACE_INPUT_SIZE - og_w) // 2
y_center = (ARCFACE_INPUT_SIZE - og_h) // 2
# copy img image into center of result image
frame[y_center : y_center + og_h, x_center : x_center + og_w] = og
# run arcface normalization
frame = (frame / 127.5) - 1.0
frame = np.transpose(frame, (2, 0, 1))
frame = np.expand_dims(frame, axis=0)
return [{"data": frame}]

View File

@ -261,8 +261,8 @@ class LicensePlateDetector(BaseEmbedding):
def _preprocess_inputs(self, raw_inputs):
if isinstance(raw_inputs, list):
raise ValueError("License plate embedding does not support batch inputs.")
# Get image as numpy array
img = self._process_image(raw_inputs)
img = raw_inputs
height, width, channels = img.shape
# Resize maintaining aspect ratio

View File

@ -11,7 +11,7 @@ from frigate.config import FrigateConfig
from frigate.const import CLIPS_DIR
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event, Timeline
from frigate.util.path import delete_event_images
from frigate.util.path import delete_event_snapshot, delete_event_thumbnail
logger = logging.getLogger(__name__)
@ -98,7 +98,7 @@ class EventCleanup(threading.Thread):
# delete the media from disk
for expired in expired_events:
deleted = delete_event_images(expired)
deleted = delete_event_snapshot(expired)
if not deleted:
logger.warning(
@ -176,7 +176,7 @@ class EventCleanup(threading.Thread):
# so no need to delete mp4 files
for event in expired_events:
events_to_update.append(event.id)
deleted = delete_event_images(event)
deleted = delete_event_snapshot(event)
if not deleted:
logger.warning(
@ -340,6 +340,10 @@ class EventCleanup(threading.Thread):
.iterator()
)
events_to_delete = [e.id for e in events]
for e in events:
delete_event_thumbnail(e)
logger.debug(f"Found {len(events_to_delete)} events that can be expired")
if len(events_to_delete) > 0:
for i in range(0, len(events_to_delete), CHUNK_SIZE):

View File

@ -278,6 +278,13 @@ class EventProcessor(threading.Thread):
"top_score": event_data["score"],
},
}
if event_data.get("recognized_license_plate") is not None:
event[Event.data]["recognized_license_plate"] = event_data[
"recognized_license_plate"
]
event[Event.data]["recognized_license_plate_score"] = event_data[
"score"
]
Event.insert(event).execute()
elif event_type == EventStateEnum.end:
event = {

View File

@ -1,9 +1,9 @@
import cv2
import imutils
import numpy as np
from frigate.config import MotionConfig
from frigate.motion import MotionDetector
from frigate.util.image import grab_cv2_contours
class FrigateMotionDetector(MotionDetector):
@ -103,7 +103,7 @@ class FrigateMotionDetector(MotionDetector):
contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
contours = imutils.grab_contours(contours)
contours = grab_cv2_contours(contours)
# loop over the contours
for c in contours:

View File

@ -1,7 +1,6 @@
import logging
import cv2
import imutils
import numpy as np
from scipy.ndimage import gaussian_filter
@ -9,6 +8,7 @@ from frigate.camera import PTZMetrics
from frigate.comms.config_updater import ConfigSubscriber
from frigate.config import MotionConfig
from frigate.motion import MotionDetector
from frigate.util.image import grab_cv2_contours
logger = logging.getLogger(__name__)
@ -150,7 +150,7 @@ class ImprovedMotionDetector(MotionDetector):
contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
contours = imutils.grab_contours(contours)
contours = grab_cv2_contours(contours)
# loop over the contours
total_contour_area = 0

View File

@ -259,7 +259,7 @@ class BroadcastThread(threading.Thread):
ws.send(buf, binary=True)
except ValueError:
pass
except (BrokenPipeError, ConnectionResetError) as e:
except (BrokenPipeError, ConnectionResetError, OSError) as e:
logger.debug(f"Websocket unexpectedly closed {e}")
elif self.converter.process.poll() is not None:
break

View File

@ -45,7 +45,7 @@ class PlusApi:
self.key = (
Path(os.path.join("/run/secrets", PLUS_ENV_VAR)).read_text().strip()
)
# check for the addon options file
# check for the add-on options file
elif os.path.isfile("/data/options.json"):
with open("/data/options.json") as f:
raw_options = f.read()
@ -234,3 +234,11 @@ class PlusApi:
raise Exception(r.text)
return r.json()
def get_models(self) -> Any:
r = self._get("model/list")
if not r.ok:
raise Exception(r.text)
return r.json()

View File

@ -86,7 +86,11 @@ class OnvifController:
async def _init_onvif(self, camera_name: str) -> bool:
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
await onvif.update_xaddrs()
try:
await onvif.update_xaddrs()
except Exception as e:
logger.error(f"Onvif connection failed for {camera_name}: {e}")
return False
# create init services
media: ONVIFService = await onvif.create_media_service()

View File

@ -594,7 +594,7 @@ class RecordingMaintainer(threading.Thread):
audio_detections,
)
)
elif topic == DetectionTypeEnum.api:
elif topic == DetectionTypeEnum.api or DetectionTypeEnum.lpr:
continue
if frame_time < run_start - stale_frame_count_threshold:

View File

@ -181,6 +181,9 @@ class ReviewSegmentMaintainer(threading.Thread):
}
),
)
self.requestor.send_data(
f"{segment.camera}/review_status", segment.severity.value.upper()
)
def _publish_segment_update(
self,
@ -206,6 +209,9 @@ class ReviewSegmentMaintainer(threading.Thread):
}
),
)
self.requestor.send_data(
f"{segment.camera}/review_status", segment.severity.value.upper()
)
def _publish_segment_end(
self,
@ -225,6 +231,7 @@ class ReviewSegmentMaintainer(threading.Thread):
}
),
)
self.requestor.send_data(f"{segment.camera}/review_status", "NONE")
self.active_review_segments[segment.camera] = None
def end_segment(self, camera: str) -> None:
@ -253,7 +260,8 @@ class ReviewSegmentMaintainer(threading.Thread):
if len(active_objects) > 0:
has_activity = True
should_update = False
should_update_image = False
should_update_state = False
if frame_time > segment.last_update:
segment.last_update = frame_time
@ -284,7 +292,8 @@ class ReviewSegmentMaintainer(threading.Thread):
and camera_config.review.alerts.enabled
):
segment.severity = SeverityEnum.alert
should_update = True
should_update_state = True
should_update_image = True
# keep zones up to date
if len(object["current_zones"]) > 0:
@ -293,17 +302,24 @@ class ReviewSegmentMaintainer(threading.Thread):
segment.zones.append(zone)
if len(active_objects) > segment.frame_active_count:
should_update = True
should_update_state = True
should_update_image = True
if should_update:
if prev_data["data"]["sub_labels"] != list(segment.sub_labels.values()):
should_update_state = True
if should_update_state:
try:
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
)
if should_update_image:
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
)
if yuv_frame is None:
logger.debug(f"Failed to get frame {frame_name} from SHM")
return
if yuv_frame is None:
logger.debug(f"Failed to get frame {frame_name} from SHM")
return
else:
yuv_frame = None
self._publish_segment_update(
segment, camera_config, yuv_frame, active_objects, prev_data
@ -498,7 +514,7 @@ class ReviewSegmentMaintainer(threading.Thread):
_,
audio_detections,
) = data
elif topic == DetectionTypeEnum.api:
elif topic == DetectionTypeEnum.api or DetectionTypeEnum.lpr:
(
camera,
frame_time,
@ -557,13 +573,21 @@ class ReviewSegmentMaintainer(threading.Thread):
or audio in camera_config.review.detections.labels
) and camera_config.review.detections.enabled:
current_segment.audio.add(audio)
elif topic == DetectionTypeEnum.api:
elif topic == DetectionTypeEnum.api or topic == DetectionTypeEnum.lpr:
if manual_info["state"] == ManualEventState.complete:
current_segment.detections[manual_info["event_id"]] = (
manual_info["label"]
)
if self.config.cameras[camera].review.alerts.enabled:
if (
topic == DetectionTypeEnum.api
and self.config.cameras[camera].review.alerts.enabled
):
current_segment.severity = SeverityEnum.alert
elif (
topic == DetectionTypeEnum.lpr
and self.config.cameras[camera].review.detections.enabled
):
current_segment.severity = SeverityEnum.detection
current_segment.last_update = manual_info["end_time"]
elif manual_info["state"] == ManualEventState.start:
self.indefinite_events[camera][manual_info["event_id"]] = (
@ -572,8 +596,16 @@ class ReviewSegmentMaintainer(threading.Thread):
current_segment.detections[manual_info["event_id"]] = (
manual_info["label"]
)
if self.config.cameras[camera].review.alerts.enabled:
if (
topic == DetectionTypeEnum.api
and self.config.cameras[camera].review.alerts.enabled
):
current_segment.severity = SeverityEnum.alert
elif (
topic == DetectionTypeEnum.lpr
and self.config.cameras[camera].review.detections.enabled
):
current_segment.severity = SeverityEnum.detection
# temporarily make it so this event can not end
current_segment.last_update = sys.maxsize
@ -661,6 +693,34 @@ class ReviewSegmentMaintainer(threading.Thread):
logger.warning(
f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert."
)
elif topic == DetectionTypeEnum.lpr:
if self.config.cameras[camera].review.detections.enabled:
self.active_review_segments[camera] = PendingReviewSegment(
camera,
frame_time,
SeverityEnum.detection,
{manual_info["event_id"]: manual_info["label"]},
{},
[],
set(),
)
if manual_info["state"] == ManualEventState.start:
self.indefinite_events[camera][manual_info["event_id"]] = (
manual_info["label"]
)
# temporarily make it so this event can not end
self.active_review_segments[
camera
].last_update = sys.maxsize
elif manual_info["state"] == ManualEventState.complete:
self.active_review_segments[
camera
].last_update = manual_info["end_time"]
else:
logger.warning(
f"Dedicated LPR camera API has been called for {camera}, but detections are disabled. LPR events will not appear as a detection."
)
self.record_config_subscriber.stop()
self.review_config_subscriber.stop()

View File

@ -293,27 +293,42 @@ def stats_snapshot(
stats["embeddings"].update(
{
"image_embedding_speed": round(
embeddings_metrics.image_embeddings_fps.value * 1000, 2
embeddings_metrics.image_embeddings_speed.value * 1000, 2
),
"image_embedding": round(
embeddings_metrics.image_embeddings_eps.value, 2
),
"text_embedding_speed": round(
embeddings_metrics.text_embeddings_sps.value * 1000, 2
embeddings_metrics.text_embeddings_speed.value * 1000, 2
),
"text_embedding": round(
embeddings_metrics.text_embeddings_eps.value, 2
),
}
)
if config.face_recognition.enabled:
stats["embeddings"]["face_recognition_speed"] = round(
embeddings_metrics.face_rec_fps.value * 1000, 2
embeddings_metrics.face_rec_speed.value * 1000, 2
)
stats["embeddings"]["face_recognition"] = round(
embeddings_metrics.face_rec_fps.value, 2
)
if config.lpr.enabled:
stats["embeddings"]["plate_recognition_speed"] = round(
embeddings_metrics.alpr_pps.value * 1000, 2
embeddings_metrics.alpr_speed.value * 1000, 2
)
stats["embeddings"]["plate_recognition"] = round(
embeddings_metrics.alpr_pps.value, 2
)
if "license_plate" not in config.objects.all_objects:
if embeddings_metrics.yolov9_lpr_pps.value > 0.0:
stats["embeddings"]["yolov9_plate_detection_speed"] = round(
embeddings_metrics.yolov9_lpr_fps.value * 1000, 2
embeddings_metrics.yolov9_lpr_speed.value * 1000, 2
)
stats["embeddings"]["yolov9_plate_detection"] = round(
embeddings_metrics.yolov9_lpr_pps.value, 2
)
get_processing_stats(config, stats, hwaccel_errors)

View File

@ -246,6 +246,7 @@ class NorfairTracker(ObjectTracker):
"ptz"
if self.camera_config.onvif.autotracking.enabled_in_config
and object_type in self.camera_config.onvif.autotracking.track
and object_type in self.ptz_object_type_configs.keys()
else "static"
)
if object_type in self.trackers:

View File

@ -1,3 +1,4 @@
import base64
import datetime
import json
import logging
@ -7,6 +8,7 @@ from collections import defaultdict
from enum import Enum
from multiprocessing.synchronize import Event as MpEvent
import cv2
import numpy as np
from peewee import DoesNotExist
@ -172,6 +174,16 @@ class TrackedObjectProcessor(threading.Thread):
retain=True,
)
if obj.obj_data.get("sub_label"):
sub_label = obj.obj_data["sub_label"][0]
if sub_label in self.config.model.all_attribute_logos:
self.dispatcher.publish(
f"{camera}/{sub_label}/snapshot",
jpg_bytes,
retain=True,
)
def camera_activity(camera, activity):
last_activity = self.camera_activity.get(camera)
@ -384,6 +396,19 @@ class TrackedObjectProcessor(threading.Thread):
return True
def save_lpr_snapshot(self, payload: tuple) -> None:
# save the snapshot image
(frame, event_id, camera) = payload
img = cv2.imdecode(
np.frombuffer(base64.b64decode(frame), dtype=np.uint8),
cv2.IMREAD_COLOR,
)
self.camera_states[camera].save_manual_event_image(
img, event_id, "license_plate", {}
)
def create_manual_event(self, payload: tuple) -> None:
(
frame_time,
@ -399,7 +424,9 @@ class TrackedObjectProcessor(threading.Thread):
) = payload
# save the snapshot image
self.camera_states[camera_name].save_manual_event_image(event_id, label, draw)
self.camera_states[camera_name].save_manual_event_image(
None, event_id, label, draw
)
end_time = frame_time + duration if duration is not None else None
# send event to event maintainer
@ -446,6 +473,59 @@ class TrackedObjectProcessor(threading.Thread):
DetectionTypeEnum.api.value,
)
def create_lpr_event(self, payload: tuple) -> None:
(
frame_time,
camera_name,
label,
event_id,
include_recording,
score,
sub_label,
plate,
) = payload
# send event to event maintainer
self.event_sender.publish(
(
EventTypeEnum.api,
EventStateEnum.start,
camera_name,
"",
{
"id": event_id,
"label": label,
"sub_label": sub_label,
"score": score,
"camera": camera_name,
"start_time": frame_time
- self.config.cameras[camera_name].record.event_pre_capture,
"end_time": None,
"has_clip": self.config.cameras[camera_name].record.enabled
and include_recording,
"has_snapshot": True,
"type": "api",
"recognized_license_plate": plate,
"recognized_license_plate_score": score,
},
)
)
self.ongoing_manual_events[event_id] = camera_name
self.detection_publisher.publish(
(
camera_name,
frame_time,
{
"state": ManualEventState.start,
"label": f"{label}: {sub_label}" if sub_label else label,
"event_id": event_id,
"end_time": None,
},
),
DetectionTypeEnum.lpr.value,
)
def end_manual_event(self, payload: tuple) -> None:
(event_id, end_time) = payload
@ -550,6 +630,10 @@ class TrackedObjectProcessor(threading.Thread):
self.set_recognized_license_plate(
event_id, recognized_license_plate, score
)
elif topic.endswith(EventMetadataTypeEnum.lpr_event_create.value):
self.create_lpr_event(payload)
elif topic.endswith(EventMetadataTypeEnum.save_lpr_snapshot.value):
self.save_lpr_snapshot(payload)
elif topic.endswith(EventMetadataTypeEnum.manual_event_create.value):
self.create_manual_event(payload)
elif topic.endswith(EventMetadataTypeEnum.manual_event_end.value):

View File

@ -442,7 +442,7 @@ class TrackedObject:
if bounding_box:
thickness = 2
color = self.colormap[self.obj_data["label"]]
color = self.colormap.get(self.obj_data["label"], (255, 255, 255))
# draw the bounding boxes on the frame
box = self.thumbnail_data["box"]

View File

@ -265,6 +265,19 @@ def draw_box_with_label(
)
def grab_cv2_contours(cnts):
# if the length the contours tuple returned by cv2.findContours
# is '2' then we are using either OpenCV v2.4, v4-beta, or
# v4-official
if len(cnts) == 2:
return cnts[0]
# if the length of the contours tuple is '3' then we are using
# either OpenCV v3, v4-pre, or v4-alpha
elif len(cnts) == 3:
return cnts[1]
def is_label_printable(label) -> bool:
"""Check if label is printable."""
return not bool(set(label) - set(printable))

View File

@ -13,7 +13,11 @@ logger = logging.getLogger(__name__)
### Post Processing
def post_process_dfine(tensor_output: np.ndarray, width, height) -> np.ndarray:
def post_process_dfine(
tensor_output: np.ndarray, width: int, height: int
) -> np.ndarray:
class_ids = tensor_output[0][tensor_output[2] > 0.4]
boxes = tensor_output[1][tensor_output[2] > 0.4]
scores = tensor_output[2][tensor_output[2] > 0.4]
@ -41,6 +45,60 @@ def post_process_dfine(tensor_output: np.ndarray, width, height) -> np.ndarray:
return detections
def post_process_rfdetr(tensor_output: list[np.ndarray, np.ndarray]) -> np.ndarray:
boxes = tensor_output[0]
raw_scores = tensor_output[1]
# apply soft max to scores
exp = np.exp(raw_scores - np.max(raw_scores, axis=-1, keepdims=True))
all_scores = exp / np.sum(exp, axis=-1, keepdims=True)
# get highest scoring class from every detection
scores = np.max(all_scores[0, :, 1:], axis=-1)
labels = np.argmax(all_scores[0, :, 1:], axis=-1)
idxs = scores > 0.4
filtered_boxes = boxes[0, idxs]
filtered_scores = scores[idxs]
filtered_labels = labels[idxs]
# convert boxes from [x_center, y_center, width, height]
x_center, y_center, w, h = (
filtered_boxes[:, 0],
filtered_boxes[:, 1],
filtered_boxes[:, 2],
filtered_boxes[:, 3],
)
x_min = x_center - w / 2
y_min = y_center - h / 2
x_max = x_center + w / 2
y_max = y_center + h / 2
filtered_boxes = np.stack([x_min, y_min, x_max, y_max], axis=-1)
# apply nms
indices = cv2.dnn.NMSBoxes(
filtered_boxes, filtered_scores, score_threshold=0.4, nms_threshold=0.4
)
detections = np.zeros((20, 6), np.float32)
for i, (bbox, confidence, class_id) in enumerate(
zip(filtered_boxes[indices], filtered_scores[indices], filtered_labels[indices])
):
if i == 20:
break
detections[i] = [
class_id,
confidence,
bbox[1],
bbox[0],
bbox[3],
bbox[2],
]
return detections
def post_process_yolov9(predictions: np.ndarray, width, height) -> np.ndarray:
predictions = np.squeeze(predictions).T
scores = np.max(predictions[:, 4:], axis=1)

View File

@ -4,6 +4,9 @@ import base64
import os
from pathlib import Path
import cv2
from numpy import ndarray
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.models import Event
@ -21,6 +24,11 @@ def get_event_thumbnail_bytes(event: Event) -> bytes | None:
return None
def get_event_snapshot(event: Event) -> ndarray:
media_name = f"{event.camera}-{event.id}"
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
### Deletion

View File

@ -15,6 +15,7 @@ from frigate.camera import CameraMetrics, PTZMetrics
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, DetectConfig, ModelConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.const import (
CACHE_DIR,
CACHE_SEGMENT_FORMAT,
@ -113,8 +114,10 @@ def capture_frames(
def get_enabled_state():
"""Fetch the latest enabled state from ZMQ."""
_, config_data = config_subscriber.check_for_update()
if config_data:
return config_data.enabled
config.enabled = config_data.enabled
return config.enabled
while not stop_event.is_set():
@ -517,6 +520,7 @@ def track_camera(
frame_queue,
frame_shape,
model_config,
config,
config.detect,
frame_manager,
motion_detector,
@ -583,6 +587,7 @@ def process_frames(
frame_queue: mp.Queue,
frame_shape,
model_config: ModelConfig,
camera_config: CameraConfig,
detect_config: DetectConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
@ -610,6 +615,29 @@ def process_frames(
region_min_size = get_min_region_size(model_config)
attributes_map = model_config.attributes_map
all_attributes = model_config.all_attributes
# remove license_plate from attributes if this camera is a dedicated LPR cam
if camera_config.type == CameraTypeEnum.lpr:
modified_attributes_map = model_config.attributes_map.copy()
if (
"car" in modified_attributes_map
and "license_plate" in modified_attributes_map["car"]
):
modified_attributes_map["car"] = [
attr
for attr in modified_attributes_map["car"]
if attr != "license_plate"
]
attributes_map = modified_attributes_map
all_attributes = [
attr for attr in model_config.all_attributes if attr != "license_plate"
]
while not stop_event.is_set():
_, updated_enabled_config = enabled_config_subscriber.check_for_update()
@ -803,9 +831,7 @@ def process_frames(
# if detection was run on this frame, consolidate
if len(regions) > 0:
tracked_detections = [
d
for d in consolidated_detections
if d[0] not in model_config.all_attributes
d for d in consolidated_detections if d[0] not in all_attributes
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(
@ -817,7 +843,7 @@ def process_frames(
# group the attribute detections based on what label they apply to
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
for label, attribute_labels in model_config.attributes_map.items():
for label, attribute_labels in attributes_map.items():
attribute_detections[label] = [
TrackedObjectAttribute(d)
for d in consolidated_detections
@ -834,8 +860,7 @@ def process_frames(
for attributes in attribute_detections.values():
for attribute in attributes:
filtered_objects = filter(
lambda o: attribute.label
in model_config.attributes_map.get(o["label"], []),
lambda o: attribute.label in attributes_map.get(o["label"], []),
all_objects,
)
selected_object_id = attribute.find_best_object(filtered_objects)
@ -883,7 +908,7 @@ def process_frames(
for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = model_config.colormap[obj["label"]]
color = model_config.colormap.get(obj["label"], (255, 255, 255))
else:
thickness = 1
color = (255, 0, 0)

Some files were not shown because too many files have changed in this diff Show More