Merge branch 'dev' into dev

This commit is contained in:
GuoQing Liu 2025-08-26 15:36:15 +08:00 committed by GitHub
commit de773127da
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
335 changed files with 8200 additions and 992 deletions

View File

@ -6,7 +6,7 @@ body:
value: | value: |
Use this form to submit a reproducible bug in Frigate or Frigate's UI. Use this form to submit a reproducible bug in Frigate or Frigate's UI.
Before submitting your bug report, please [search the discussions][discussions], look at recent open and closed [pull requests][prs], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your bug has already been fixed by the developers or reported by the community. Before submitting your bug report, please ask the AI with the "Ask AI" button on the [official documentation site][ai] about your issue, [search the discussions][discussions], look at recent open and closed [pull requests][prs], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your bug has already been fixed by the developers or reported by the community.
**If you are unsure if your issue is actually a bug or not, please submit a support request first.** **If you are unsure if your issue is actually a bug or not, please submit a support request first.**
@ -14,6 +14,7 @@ body:
[prs]: https://www.github.com/blakeblackshear/frigate/pulls [prs]: https://www.github.com/blakeblackshear/frigate/pulls
[docs]: https://docs.frigate.video [docs]: https://docs.frigate.video
[faq]: https://github.com/blakeblackshear/frigate/discussions/12724 [faq]: https://github.com/blakeblackshear/frigate/discussions/12724
[ai]: https://docs.frigate.video
- type: checkboxes - type: checkboxes
attributes: attributes:
label: Checklist label: Checklist
@ -26,6 +27,8 @@ body:
- label: I have tried a different browser to see if it is related to my browser. - label: I have tried a different browser to see if it is related to my browser.
required: true required: true
- label: I have tried reproducing the issue in [incognito mode](https://www.computerworld.com/article/1719851/how-to-go-incognito-in-chrome-firefox-safari-and-edge.html) to rule out problems with any third party extensions or plugins I have installed. - label: I have tried reproducing the issue in [incognito mode](https://www.computerworld.com/article/1719851/how-to-go-incognito-in-chrome-firefox-safari-and-edge.html) to rule out problems with any third party extensions or plugins I have installed.
- label: I have asked the AI at https://docs.frigate.video about my issue.
required: true
- type: textarea - type: textarea
id: description id: description
attributes: attributes:

View File

@ -23,7 +23,7 @@ jobs:
name: AMD64 Build name: AMD64 Build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -47,7 +47,7 @@ jobs:
name: ARM Build name: ARM Build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -77,42 +77,12 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
jetson_jp5_build:
if: false
runs-on: ubuntu-22.04
name: Jetson Jetpack 5
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push TensorRT (Jetson, Jetpack 5)
env:
ARCH: arm64
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
set: |
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
jetson_jp6_build: jetson_jp6_build:
runs-on: ubuntu-22.04-arm runs-on: ubuntu-22.04-arm
name: Jetson Jetpack 6 name: Jetson Jetpack 6
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -143,7 +113,7 @@ jobs:
- amd64_build - amd64_build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
@ -185,7 +155,7 @@ jobs:
- arm64_build - arm64_build
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx

View File

@ -19,7 +19,7 @@ jobs:
env: env:
DOCKER_BUILDKIT: "1" DOCKER_BUILDKIT: "1"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
@ -40,7 +40,7 @@ jobs:
name: Web - Lint name: Web - Lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
@ -56,7 +56,7 @@ jobs:
name: Web - Test name: Web - Test
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
@ -76,7 +76,7 @@ jobs:
name: Python Checks name: Python Checks
steps: steps:
- name: Check out the repository - name: Check out the repository
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
@ -99,7 +99,7 @@ jobs:
name: Python Tests name: Python Tests
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- name: Set up QEMU - name: Set up QEMU

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
persist-credentials: false persist-credentials: false
- id: lowercaseRepo - id: lowercaseRepo

View File

@ -212,6 +212,7 @@ COPY docker/main/rootfs/ /
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc) # Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
FROM slim-base AS deps FROM slim-base AS deps
ARG TARGETARCH ARG TARGETARCH
ARG BASE_IMAGE
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
# http://stackoverflow.com/questions/48162574/ddg#49462622 # http://stackoverflow.com/questions/48162574/ddg#49462622
@ -255,6 +256,10 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
pip3 install -U /deps/wheels/*.whl pip3 install -U /deps/wheels/*.whl
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
bash -c "bash /deps/install_memryx.sh"
COPY --from=deps-rootfs / / COPY --from=deps-rootfs / /
RUN ldconfig RUN ldconfig

View File

@ -19,7 +19,8 @@ apt-get -qq install --no-install-recommends -y \
nethogs \ nethogs \
libgl1 \ libgl1 \
libglib2.0-0 \ libglib2.0-0 \
libusb-1.0.0 libusb-1.0.0 \
libgomp1 # memryx detector
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
@ -34,9 +35,13 @@ rm /tmp/libedgetpu1-max.deb
# install mesa-teflon-delegate from bookworm-backports # install mesa-teflon-delegate from bookworm-backports
# Only available for arm64 at the moment # Only available for arm64 at the moment
if [[ "${TARGETARCH}" == "arm64" ]]; then if [[ "${TARGETARCH}" == "arm64" ]]; then
echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backports.list if [[ "${BASE_IMAGE}" == *"nvcr.io/nvidia/tensorrt"* ]]; then
apt-get -qq update echo "Info: Skipping apt-get commands because BASE_IMAGE includes 'nvcr.io/nvidia/tensorrt' for arm64."
apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports else
echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backbacks.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports
fi
fi fi
# ffmpeg -> amd64 # ffmpeg -> amd64

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -e
# Download the MxAccl for Frigate github release
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
unzip /tmp/mxaccl.zip -d /tmp
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
rm /tmp/mxaccl.zip
# Install Python dependencies
pip3 install -r /opt/mx_accl_frigate/freeze
# Link the Python package dynamically
SITE_PACKAGES=$(python3 -c "import site; print(site.getsitepackages()[0])")
ln -s /opt/mx_accl_frigate/memryx "$SITE_PACKAGES/memryx"
# Copy architecture-specific shared libraries
ARCH=$(uname -m)
if [[ "$ARCH" == "x86_64" ]]; then
cp /opt/mx_accl_frigate/memryx/x86/libmemx.so* /usr/lib/x86_64-linux-gnu/
cp /opt/mx_accl_frigate/memryx/x86/libmx_accl.so* /usr/lib/x86_64-linux-gnu/
elif [[ "$ARCH" == "aarch64" ]]; then
cp /opt/mx_accl_frigate/memryx/arm/libmemx.so* /usr/lib/aarch64-linux-gnu/
cp /opt/mx_accl_frigate/memryx/arm/libmx_accl.so* /usr/lib/aarch64-linux-gnu/
else
echo "Unsupported architecture: $ARCH"
exit 1
fi
# Refresh linker cache
ldconfig

View File

@ -1,22 +1,23 @@
aiofiles == 24.1.* aiofiles == 24.1.*
click == 8.1.* click == 8.1.*
# FastAPI # FastAPI
aiohttp == 3.11.3 aiohttp == 3.12.*
starlette == 0.41.2 starlette == 0.47.*
starlette-context == 0.3.6 starlette-context == 0.4.*
fastapi == 0.115.* fastapi[standard-no-fastapi-cloud-cli] == 0.116.*
uvicorn == 0.30.* uvicorn == 0.35.*
slowapi == 0.1.* slowapi == 0.1.*
joserfc == 1.0.* joserfc == 1.2.*
pathvalidate == 3.2.* cryptography == 44.0.*
pathvalidate == 3.3.*
markupsafe == 3.0.* markupsafe == 3.0.*
python-multipart == 0.0.12 python-multipart == 0.0.20
# Classification Model Training # Classification Model Training
tensorflow == 2.19.* ; platform_machine == 'aarch64' tensorflow == 2.19.* ; platform_machine == 'aarch64'
tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64' tensorflow-cpu == 2.19.* ; platform_machine == 'x86_64'
# General # General
mypy == 1.6.1 mypy == 1.6.1
onvif-zeep-async == 3.1.* onvif-zeep-async == 4.0.*
paho-mqtt == 2.1.* paho-mqtt == 2.1.*
pandas == 2.2.* pandas == 2.2.*
peewee == 3.17.* peewee == 3.17.*
@ -30,7 +31,7 @@ ruamel.yaml == 0.18.*
tzlocal == 5.2 tzlocal == 5.2
requests == 2.32.* requests == 2.32.*
types-requests == 2.32.* types-requests == 2.32.*
norfair == 2.2.* norfair == 2.3.*
setproctitle == 1.3.* setproctitle == 1.3.*
ws4py == 0.5.* ws4py == 0.5.*
unidecode == 1.3.* unidecode == 1.3.*

View File

@ -10,7 +10,7 @@ echo "[INFO] Starting certsync..."
lefile="/etc/letsencrypt/live/frigate/fullchain.pem" lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
tls_enabled=`python3 /usr/local/nginx/get_tls_settings.py | jq -r .enabled` tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled`
while true while true
do do

View File

@ -85,7 +85,7 @@ python3 /usr/local/nginx/get_base_path.py | \
-out /usr/local/nginx/conf/base_path.conf -out /usr/local/nginx/conf/base_path.conf
# build templates for optional TLS support # build templates for optional TLS support
python3 /usr/local/nginx/get_tls_settings.py | \ python3 /usr/local/nginx/get_listen_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \ tempio -template /usr/local/nginx/templates/listen.gotmpl \
-out /usr/local/nginx/conf/listen.conf -out /usr/local/nginx/conf/listen.conf

View File

@ -26,6 +26,10 @@ try:
except FileNotFoundError: except FileNotFoundError:
config: dict[str, Any] = {} config: dict[str, Any] = {}
tls_config: dict[str, Any] = config.get("tls", {"enabled": True}) tls_config: dict[str, any] = config.get("tls", {"enabled": True})
networking_config = config.get("networking", {})
ipv6_config = networking_config.get("ipv6", {"enabled": False})
print(json.dumps(tls_config)) output = {"tls": tls_config, "ipv6": ipv6_config}
print(json.dumps(output))

View File

@ -1,33 +1,45 @@
# intended for internal traffic, not protected by auth
# Internal (IPv4 always; IPv6 optional)
listen 5000; listen 5000;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
{{ if not .enabled }}
# intended for external traffic, protected by auth # intended for external traffic, protected by auth
listen 8971; {{ if .tls }}
{{ if .tls.enabled }}
# external HTTPS (IPv4 always; IPv6 optional)
listen 8971 ssl;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
{{ else }}
# external HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}
{{ else }} {{ else }}
# intended for external traffic, protected by auth # (No tls section) default to HTTP (IPv4 always; IPv6 optional)
listen 8971 ssl; listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
{{ end }} {{ end }}

View File

@ -0,0 +1,47 @@
#!/bin/bash
set -e # Exit immediately if any command fails
set -o pipefail
echo "Starting MemryX driver and runtime installation..."
# Detect architecture
arch=$(uname -m)
# Purge existing packages and repo
echo "Removing old MemryX installations..."
# Remove any holds on MemryX packages (if they exist)
sudo apt-mark unhold memx-* mxa-manager || true
sudo apt purge -y memx-* mxa-manager || true
sudo rm -f /etc/apt/sources.list.d/memryx.list /etc/apt/trusted.gpg.d/memryx.asc
# Install kernel headers
echo "Installing kernel headers for: $(uname -r)"
sudo apt update
sudo apt install -y dkms linux-headers-$(uname -r)
# Add MemryX key and repo
echo "Adding MemryX GPG key and repository..."
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
# Update and install memx-drivers
echo "Installing memx-drivers..."
sudo apt update
sudo apt install -y memx-drivers
# ARM-specific board setup
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
echo "Running ARM board setup..."
sudo mx_arm_setup
fi
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
# Install other runtime packages
packages=("memx-accl" "mxa-manager")
for pkg in "${packages[@]}"; do
echo "Installing $pkg..."
sudo apt install -y "$pkg"
done
echo "MemryX installation complete!"

View File

@ -11,7 +11,8 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
&& pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
RUN rm -rf /rk-wheels/opencv_python-* RUN rm -rf /rk-wheels/opencv_python-*
RUN rm -rf /rk-wheels/torch-* RUN rm -rf /rk-wheels/torch-*

View File

@ -12,7 +12,10 @@ ARG PIP_BREAK_SYSTEM_PACKAGES
# Install TensorRT wheels # Install TensorRT wheels
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
# remove dependencies from the requirements that have type constraints
RUN sed -i '/\[.*\]/d' /requirements-wheels.txt \
&& pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
FROM deps AS frigate-tensorrt FROM deps AS frigate-tensorrt
ARG PIP_BREAK_SYSTEM_PACKAGES ARG PIP_BREAK_SYSTEM_PACKAGES

View File

@ -50,7 +50,7 @@ cameras:
### Configuring Minimum Volume ### Configuring Minimum Volume
The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. MQTT explorer can be used on the audio topic to see what volume level is being detected. The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. The Debug view in the Frigate UI has an Audio tab for cameras that have the `audio` role assigned where a graph and the current levels are is displayed. The `min_volume` parameter should be set to the minimum the `RMS` level required to run audio detection.
:::tip :::tip

View File

@ -59,6 +59,7 @@ The default session length for user authentication in Frigate is 24 hours. This
While the default provides a balance of security and convenience, you can customize this duration to suit your specific security requirements and user experience preferences. The session length is configured in seconds. While the default provides a balance of security and convenience, you can customize this duration to suit your specific security requirements and user experience preferences. The session length is configured in seconds.
The default value of `86400` will expire the authentication session after 24 hours. Some other examples: The default value of `86400` will expire the authentication session after 24 hours. Some other examples:
- `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout. - `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout.
- `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days. - `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days.
@ -133,6 +134,31 @@ proxy:
default_role: viewer default_role: viewer
``` ```
## Role mapping
In some environments, upstream identity providers (OIDC, SAML, LDAP, etc.) do not pass a Frigate-compatible role directly, but instead pass one or more group claims. To handle this, Frigate supports a `role_map` that translates upstream group names into Frigates internal roles (`admin` or `viewer`).
```yaml
proxy:
...
header_map:
user: x-forwarded-user
role: x-forwarded-groups
role_map:
admin:
- sysadmins
- access-level-security
viewer:
- camera-viewer
```
In this example:
- If the proxy passes a role header containing `sysadmins` or `access-level-security`, the user is assigned the `admin` role.
- If the proxy passes a role header containing `camera-viewer`, the user is assigned the `viewer` role.
- If no mapping matches, Frigate falls back to `default_role` if configured.
- If `role_map` is not defined, Frigate assumes the role header directly contains `admin` or `viewer`.
#### Port Considerations #### Port Considerations
**Authenticated Port (8971)** **Authenticated Port (8971)**

View File

@ -24,7 +24,7 @@ Frigate needs to first detect a `person` before it can detect and recognize a fa
Frigate has support for two face recognition model types: Frigate has support for two face recognition model types:
- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate. - **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate.
- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available. - **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU / NPU is available.
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition. In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
@ -34,7 +34,7 @@ All of these features run locally on your system.
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently. The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
The `large` model is optimized for accuracy, an integrated or discrete GPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. The `large` model is optimized for accuracy, an integrated or discrete GPU / NPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
## Configuration ## Configuration
@ -73,6 +73,9 @@ Fine-tune face recognition with these optional parameters at the global level of
- Default: `100`. - Default: `100`.
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- Default: `True`. - Default: `True`.
- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
- Default: `None`.
- Note: This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)
## Usage ## Usage

View File

@ -5,11 +5,11 @@ title: Enrichments
# Enrichments # Enrichments
Some of Frigate's enrichments can use a discrete GPU for accelerated processing. Some of Frigate's enrichments can use a discrete GPU / NPU for accelerated processing.
## Requirements ## Requirements
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU and configure the enrichment according to its specific documentation. Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU / NPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU / NPU and configure the enrichment according to its specific documentation.
- **AMD** - **AMD**
@ -23,6 +23,9 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image. - Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image. - Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
- **RockChip**
- RockChip NPU will automatically be detected and used for semantic search v1 and face recognition in the `-rk` Frigate image.
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments. Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments.
:::note :::note

View File

@ -67,9 +67,9 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs. - **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
- **`device`**: Device to use to run license plate recognition models. - **`device`**: Device to use to run license plate detection *and* recognition models.
- Default: `CPU` - Default: `CPU`
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. - This can be `CPU` or one of [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/). For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
- **`model_size`**: The size of the model used to detect text on plates. - **`model_size`**: The size of the model used to detect text on plates.
- Default: `small` - Default: `small`
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_. - This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_.

View File

@ -13,12 +13,17 @@ Frigate supports multiple different detectors that work on different types of ha
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. - [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices. - [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
- [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
**AMD** **AMD**
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection. - [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured. - [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
**Apple Silicon**
- [Apple Silicon](#apple-silicon-detector): Apple Silicon can run on M1 and newer Apple Silicon devices.
**Intel** **Intel**
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. - [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
@ -52,7 +57,7 @@ This does not affect using hardware for accelerating other tasks such as [semant
# Officially Supported Detectors # Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `memryx`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## Edge TPU Detector ## Edge TPU Detector
@ -240,6 +245,8 @@ Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-proc
--- ---
## OpenVINO Detector ## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
@ -264,7 +271,7 @@ detectors:
::: :::
### Supported Models ### OpenVINO Supported Models
#### SSDLite MobileNet v2 #### SSDLite MobileNet v2
@ -402,6 +409,59 @@ model:
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## Apple Silicon detector
The NPU in Apple Silicon can't be accessed from within a container, so the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) must first be setup. It is recommended to use the Frigate docker image with `-standard-arm64` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-arm64-standard`.
### Setup
1. Setup the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) and run the client
2. Configure the detector in Frigate and startup Frigate
### Configuration
Using the detector config below will connect to the client:
```yaml
detectors:
apple-silicon:
type: zmq
endpoint: tcp://host.docker.internal:5555
```
### Apple Silicon Supported Models
There is no default model provided, the following formats are supported:
#### YOLO (v3, v4, v7, v9)
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
:::tip
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate.
:::
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: onnx
model:
model_type: yolo-generic
width: 320 # <--- should match the imgsize set during model export
height: 320 # <--- should match the imgsize set during model export
input_tensor: nchw
input_dtype: float
path: /config/model_cache/yolo.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## AMD/ROCm GPU detector ## AMD/ROCm GPU detector
### Setup ### Setup
@ -483,7 +543,7 @@ We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from mes
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)' $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
``` ```
### Supported Models ### ROCm Supported Models
See [ONNX supported models](#supported-models) for supported models, there are some caveats: See [ONNX supported models](#supported-models) for supported models, there are some caveats:
@ -526,7 +586,7 @@ detectors:
::: :::
### Supported Models ### ONNX Supported Models
There is no default model provided, the following formats are supported: There is no default model provided, the following formats are supported:
@ -699,6 +759,196 @@ To verify that the integration is working correctly, start Frigate and observe t
# Community Supported Detectors # Community Supported Detectors
## MemryX MX3
This detector is available for use with the MemryX MX3 accelerator M.2 module. Frigate supports the MX3 on compatible hardware platforms, providing efficient and high-performance object detection.
See the [installation docs](../frigate/installation.md#memryx-mx3) for information on configuring the MemryX hardware.
To configure a MemryX detector, simply set the `type` attribute to `memryx` and follow the configuration guide below.
### Configuration
To configure the MemryX detector, use the following example configuration:
#### Single PCIe MemryX MX3
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
```
#### Multiple PCIe MemryX MX3 Modules
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
memx1:
type: memryx
device: PCIe:1
memx2:
type: memryx
device: PCIe:2
```
### Supported Models
MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the container at `/memryx_models/model_folder/`.
#### YOLO-NAS
The [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) model included in this detector is downloaded from the [Models Section](#downloading-yolo-nas-model) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
**Note:** The default model for the MemryX detector is YOLO-NAS 320x320.
The input size for **YOLO-NAS** can be set to either **320x320** (default) or **640x640**.
- The default size of **320x320** is optimized for lower CPU usage and faster inference times.
##### Configuration
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: yolonas
width: 320 # (Can be set to 640 for higher resolution)
height: 320 # (Can be set to 640 for higher resolution)
input_tensor: nchw
input_dtype: float
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/yolonas.zip
# The .zip file must contain:
# ├── yolonas.dfp (a file ending with .dfp)
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOv9
The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
##### Configuration
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: yolo-generic
width: 320 # (Can be set to 640 for higher resolution)
height: 320 # (Can be set to 640 for higher resolution)
input_tensor: nchw
input_dtype: float
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/yolov9.zip
# The .zip file must contain:
# ├── yolov9.dfp (a file ending with .dfp)
# └── yolov9_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOX
The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) and precompiled to DFP.
##### Configuration
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: yolox
width: 640
height: 640
input_tensor: nchw
input_dtype: float_denorm
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/yolox.zip
# The .zip file must contain:
# ├── yolox.dfp (a file ending with .dfp)
```
#### SSDLite MobileNet v2
The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmmlab.com/model/mmdet-det/ssdlite-e8679f.onnx) and has been converted to DFP.
##### Configuration
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
```yaml
detectors:
memx0:
type: memryx
device: PCIe:0
model:
model_type: ssd
width: 320
height: 320
input_tensor: nchw
input_dtype: float
labelmap_path: /labelmap/coco-80.txt
# Optional: The model is normally fetched through the runtime, so 'path' can be omitted unless you want to use a custom or local model.
# path: /config/ssdlite_mobilenet.zip
# The .zip file must contain:
# ├── ssdlite_mobilenet.dfp (a file ending with .dfp)
# └── ssdlite_mobilenet_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### Using a Custom Model
To use your own model:
1. Package your compiled model into a `.zip` file.
2. The `.zip` must contain the compiled `.dfp` file.
3. Depending on the model, the compiler may also generate a cropped post-processing network. If present, it will be named with the suffix `_post.onnx`.
4. Bind-mount the `.zip` file into the container and specify its path using `model.path` in your config.
5. Update the `labelmap_path` to match your custom model's labels.
For detailed instructions on compiling models, refer to the [MemryX Compiler](https://developer.memryx.com/tools/neural_compiler.html#usage) docs and [Tutorials](https://developer.memryx.com/tutorials/tutorials.html).
```yaml
# The detector automatically selects the default model if nothing is provided in the config.
#
# Optionally, you can specify a local model path as a .zip file to override the default.
# If a local path is provided and the file exists, it will be used instead of downloading.
#
# Example:
# path: /config/yolonas.zip
#
# The .zip file must contain:
# ├── yolonas.dfp (a file ending with .dfp)
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
```
---
## NVidia TensorRT Detector ## NVidia TensorRT Detector
Nvidia Jetson devices may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt-jp6` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6`. This detector is designed to work with Yolo models for object detection. Nvidia Jetson devices may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt-jp6` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6`. This detector is designed to work with Yolo models for object detection.
@ -824,7 +1074,7 @@ $ cat /sys/kernel/debug/rknpu/load
::: :::
### Supported Models ### RockChip Supported Models
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional. This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.

View File

@ -73,6 +73,12 @@ tls:
# Optional: Enable TLS for port 8971 (default: shown below) # Optional: Enable TLS for port 8971 (default: shown below)
enabled: True enabled: True
# Optional: IPv6 configuration
networking:
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
ipv6:
enabled: False
# Optional: Proxy configuration # Optional: Proxy configuration
proxy: proxy:
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth # Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
@ -82,7 +88,13 @@ proxy:
# See the docs for more info. # See the docs for more info.
header_map: header_map:
user: x-forwarded-user user: x-forwarded-user
role: x-forwarded-role role: x-forwarded-groups
role_map:
admin:
- sysadmins
- access-level-security
viewer:
- camera-viewer
# Optional: Url for logging out a user. This sets the location of the logout url in # Optional: Url for logging out a user. This sets the location of the logout url in
# the UI. # the UI.
logout_url: /api/logout logout_url: /api/logout
@ -586,6 +598,9 @@ semantic_search:
# Optional: Set the model size used for embeddings. (default: shown below) # Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU # NOTE: small model runs on CPU and large model runs on GPU
model_size: "small" model_size: "small"
# Optional: Target a specific device to run the model (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: None
# Optional: Configuration for face recognition capability # Optional: Configuration for face recognition capability
# NOTE: enabled, min_area can be overridden at the camera level # NOTE: enabled, min_area can be overridden at the camera level
@ -609,6 +624,9 @@ face_recognition:
blur_confidence_filter: True blur_confidence_filter: True
# Optional: Set the model size used face recognition. (default: shown below) # Optional: Set the model size used face recognition. (default: shown below)
model_size: small model_size: small
# Optional: Target a specific device to run the model (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: None
# Optional: Configuration for license plate recognition capability # Optional: Configuration for license plate recognition capability
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level # NOTE: enabled, min_area, and enhancement can be overridden at the camera level
@ -616,6 +634,7 @@ lpr:
# Optional: Enable license plate recognition (default: shown below) # Optional: Enable license plate recognition (default: shown below)
enabled: False enabled: False
# Optional: The device to run the models on (default: shown below) # Optional: The device to run the models on (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: CPU device: CPU
# Optional: Set the model size used for text detection. (default: shown below) # Optional: Set the model size used for text detection. (default: shown below)
model_size: small model_size: small
@ -652,6 +671,8 @@ genai:
base_url: http://localhost::11434 base_url: http://localhost::11434
# Required if gemini or openai # Required if gemini or openai
api_key: "{FRIGATE_GENAI_API_KEY}" api_key: "{FRIGATE_GENAI_API_KEY}"
# Required if enabled: The model to use with the provider.
model: gemini-1.5-flash
# Optional additional args to pass to the GenAI Provider (default: None) # Optional additional args to pass to the GenAI Provider (default: None)
provider_options: provider_options:
keep_alive: -1 keep_alive: -1

View File

@ -78,17 +78,21 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
### GPU Acceleration ### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU / NPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
```yaml ```yaml
semantic_search: semantic_search:
enabled: True enabled: True
model_size: large model_size: large
# Optional, if using the 'large' model in a multi-GPU installation
device: 0
``` ```
:::info :::info
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically. If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU / NPU will be detected and used automatically.
Specify the `device` option to target a specific GPU in a multi-GPU system (see [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)).
If you do not specify a device, the first available GPU will be used.
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.

View File

@ -58,22 +58,33 @@ Frigate supports multiple different detectors that work on different types of ha
- [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. - [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector) - [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
- [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
- [Supports many model architectures](../../configuration/object_detectors#memryx-mx3)
- Runs best with tiny, small, or medium-size models
**AMD** **AMD**
- [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection - [ROCm](#rocm---amd-gpu): ROCm can run on AMD Discrete GPUs to provide efficient object detection
- [Supports limited model architectures](../../configuration/object_detectors#supported-models-1) - [Supports limited model architectures](../../configuration/object_detectors#rocm-supported-models)
- Runs best on discrete AMD GPUs - Runs best on discrete AMD GPUs
**Apple Silicon**
- [Apple Silicon](#apple-silicon): Apple Silicon is usable on all M1 and newer Apple Silicon devices to provide efficient and fast object detection
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#apple-silicon-supported-models)
- Runs well with any size models including large
- Runs via ZMQ proxy which adds some latency, only recommended for local connection
**Intel** **Intel**
- [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. - [OpenVino](#openvino---intel): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
- [Supports majority of model architectures](../../configuration/object_detectors#supported-models) - [Supports majority of model architectures](../../configuration/object_detectors#openvino-supported-models)
- Runs best with tiny, small, or medium models - Runs best with tiny, small, or medium models
**Nvidia** **Nvidia**
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices. - [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#supported-models-2) - [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
- Runs well with any size models including large - Runs well with any size models including large
**Rockchip** **Rockchip**
@ -173,17 +184,56 @@ Inference speeds will vary greatly depending on the GPU and the model used.
| RTX A4000 | | 320: ~ 15 ms | | | RTX A4000 | | 320: ~ 15 ms | |
| Tesla P40 | | 320: ~ 105 ms | | | Tesla P40 | | 320: ~ 105 ms | |
### Apple Silicon
With the [Apple Silicon](../configuration/object_detectors.md#apple-silicon-detector) detector Frigate can take advantage of the NPU in M1 and newer Apple Silicon.
:::warning
Apple Silicon can not run within a container, so a ZMQ proxy is utilized to communicate with [the Apple Silicon Frigate detector](https://github.com/frigate-nvr/apple-silicon-detector) which runs on the host. This should add minimal latency when run on the same device.
:::
| Name | YOLOv9 Inference Time |
| --------- | ---------------------- |
| M3 Pro | t-320: 6 ms s-320: 8ms |
| M1 | s-320: 9ms |
### ROCm - AMD GPU ### ROCm - AMD GPU
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs. With the [ROCm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | | Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| --------- | --------------------- | ------------------------- | | --------- | --------------------- | ------------------------- |
| AMD 780M | ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms | | AMD 780M | ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms |
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
## Community Supported Detectors ## Community Supported Detectors
### MemryX MX3
Frigate supports the MemryX MX3 M.2 AI Acceleration Module on compatible hardware platforms, including both x86 (Intel/AMD) and ARM-based SBCs such as Raspberry Pi 5.
A single MemryX MX3 module is capable of handling multiple camera streams using the default models, making it sufficient for most users. For larger deployments with more cameras or bigger models, multiple MX3 modules can be used. Frigate supports multi-detector configurations, allowing you to connect multiple MX3 modules to scale inference capacity.
Detailed information is available [in the detector docs](/configuration/object_detectors#memryx-mx3).
**Default Model Configuration:**
- Default model is **YOLO-NAS-Small**.
The MX3 is a pipelined architecture, where the maximum frames per second supported (and thus supported number of cameras) cannot be calculated as `1/latency` (1/"Inference Time") and is measured separately. When estimating how many camera streams you may support with your configuration, use the **MX3 Total FPS** column to approximate of the detector's limit, not the Inference Time.
| Model | Input Size | MX3 Inference Time | MX3 Total FPS |
|----------------------|------------|--------------------|---------------|
| YOLO-NAS-Small | 320 | ~ 9 ms | ~ 378 |
| YOLO-NAS-Small | 640 | ~ 21 ms | ~ 138 |
| YOLOv9s | 320 | ~ 16 ms | ~ 382 |
| YOLOv9s | 640 | ~ 41 ms | ~ 110 |
| YOLOX-Small | 640 | ~ 16 ms | ~ 263 |
| SSDlite MobileNet v2 | 320 | ~ 5 ms | ~ 1056 |
Inference speeds may vary depending on the host platform. The above data was measured on an **Intel 13700 CPU**. Platforms like Raspberry Pi, Orange Pi, and other ARM-based SBCs have different levels of processing capability, which may limit total FPS.
### Nvidia Jetson ### Nvidia Jetson
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).

View File

@ -132,6 +132,77 @@ If you are using `docker run`, add this option to your command `--device /dev/ha
Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup. Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup.
### MemryX MX3
The MemryX MX3 Accelerator is available in the M.2 2280 form factor (like an NVMe SSD), and supports a variety of configurations:
- x86 (Intel/AMD) PCs
- Raspberry Pi 5
- Orange Pi 5 Plus/Max
- Multi-M.2 PCIe carrier cards
#### Configuration
#### Installation
To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/get_started/hardware_setup.html).
Then follow these steps for installing the correct driver/runtime configuration:
1. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/memryx/user_installation.sh).
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
3. Run the script with `./user_installation.sh`
4. **Restart your computer** to complete driver installation.
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/memx0
```
During configuration, you must run Docker in privileged mode and ensure the container can access the max-manager.
In your `docker-compose.yml`, also add:
```yaml
privileged: true
volumes:
/run/mxa_manager:/run/mxa_manager
```
If you can't use Docker Compose, you can run the container with something similar to this:
```bash
docker run -d \
--name frigate-memx \
--restart=unless-stopped \
--mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \
--shm-size=256m \
-v /path/to/your/storage:/media/frigate \
-v /path/to/your/config:/config \
-v /etc/localtime:/etc/localtime:ro \
-v /run/mxa_manager:/run/mxa_manager \
-e FRIGATE_RTSP_PASSWORD='password' \
--privileged=true \
-p 8971:8971 \
-p 8554:8554 \
-p 5000:5000 \
-p 8555:8555/tcp \
-p 8555:8555/udp \
--device /dev/memx0 \
ghcr.io/blakeblackshear/frigate:stable
```
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#memryx-mx3) to complete the setup.
### Rockchip platform ### Rockchip platform
Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and necessary drivers (especially rkvdec2 and rknpu). To check, enter the following commands: Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and necessary drivers (especially rkvdec2 and rknpu). To check, enter the following commands:

View File

@ -238,6 +238,14 @@ Topic with current state of notifications. Published values are `ON` and `OFF`.
## Frigate Camera Topics ## Frigate Camera Topics
### `frigate/<camera_name>/<role>/status`
Publishes the current health status of each role that is enabled (`audio`, `detect`, `record`). Possible values are:
- `online`: Stream is running and being processed
- `offline`: Stream is offline and is being restarted
- `disabled`: Camera is currently disabled
### `frigate/<camera_name>/<object_name>` ### `frigate/<camera_name>/<object_name>`
Publishes the count of objects for the camera for use as a sensor in Home Assistant. Publishes the count of objects for the camera for use as a sensor in Home Assistant.

View File

@ -217,15 +217,23 @@ def require_role(required_roles: List[str]):
if not roles: if not roles:
raise HTTPException(status_code=403, detail="Role not provided") raise HTTPException(status_code=403, detail="Role not provided")
# Check if any role matches required_roles # enforce VALID_ROLES
if not any(role in required_roles for role in roles): valid_roles = [r for r in roles if r in VALID_ROLES]
if not valid_roles:
raise HTTPException( raise HTTPException(
status_code=403, status_code=403,
detail=f"Role {', '.join(roles)} not authorized. Required: {', '.join(required_roles)}", detail=f"No valid roles found in {roles}. Required: {', '.join(required_roles)}",
) )
# Return the first matching role if not any(role in required_roles for role in valid_roles):
return next((role for role in roles if role in required_roles), roles[0]) raise HTTPException(
status_code=403,
detail=f"Role {', '.join(valid_roles)} not authorized. Required: {', '.join(required_roles)}",
)
return next(
(role for role in valid_roles if role in required_roles), valid_roles[0]
)
return role_checker return role_checker
@ -266,22 +274,38 @@ def auth(request: Request):
else "anonymous" else "anonymous"
) )
# start with default_role
role = proxy_config.default_role
# first try: explicit role header
role_header = proxy_config.header_map.role role_header = proxy_config.header_map.role
role = ( if role_header:
request.headers.get(role_header, default=proxy_config.default_role) raw_value = request.headers.get(role_header, "")
if role_header if proxy_config.header_map.role_map and raw_value:
else proxy_config.default_role # treat as group claim
) groups = [
g.strip()
# if comma-separated with "admin", use "admin", for g in raw_value.replace(" ", ",").split(",")
# if comma-separated with "viewer", use "viewer", if g.strip()
# else use default role ]
for (
roles = [r.strip() for r in role.split(proxy_config.separator)] if role else [] candidate_role,
success_response.headers["remote-role"] = next( required_groups,
(r for r in VALID_ROLES if r in roles), proxy_config.default_role ) in proxy_config.header_map.role_map.items():
) if any(group in groups for group in required_groups):
role = candidate_role
break
elif raw_value:
normalized_role = raw_value.strip().lower()
if normalized_role in VALID_ROLES:
role = normalized_role
else:
logger.warning(
f"Provided proxy role header contains invalid value '{raw_value}'. Using default role '{proxy_config.default_role}'."
)
role = proxy_config.default_role
success_response.headers["remote-role"] = role
return success_response return success_response
# now apply authentication # now apply authentication

View File

@ -1,8 +1,10 @@
import logging import logging
import re
from typing import Optional from typing import Optional
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from joserfc.jwk import OctKey
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
from slowapi import _rate_limit_exceeded_handler from slowapi import _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded from slowapi.errors import RateLimitExceeded
@ -130,6 +132,26 @@ def create_fastapi_app(
app.stats_emitter = stats_emitter app.stats_emitter = stats_emitter
app.event_metadata_updater = event_metadata_updater app.event_metadata_updater = event_metadata_updater
app.config_publisher = config_publisher app.config_publisher = config_publisher
app.jwt_token = get_jwt_secret() if frigate_config.auth.enabled else None
if frigate_config.auth.enabled:
secret = get_jwt_secret()
key_bytes = None
if isinstance(secret, str):
# If the secret looks like hex (e.g., generated by secrets.token_hex), use raw bytes
if len(secret) % 2 == 0 and re.fullmatch(r"[0-9a-fA-F]+", secret or ""):
try:
key_bytes = bytes.fromhex(secret)
except ValueError:
key_bytes = secret.encode("utf-8")
else:
key_bytes = secret.encode("utf-8")
elif isinstance(secret, (bytes, bytearray)):
key_bytes = bytes(secret)
else:
key_bytes = str(secret).encode("utf-8")
app.jwt_token = OctKey.import_key(key_bytes)
else:
app.jwt_token = None
return app return app

View File

@ -1,10 +1,21 @@
"""Manage camera activity and updating listeners.""" """Manage camera activity and updating listeners."""
import datetime
import json
import logging
import random
import string
from collections import Counter from collections import Counter
from typing import Any, Callable from typing import Any, Callable
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.config import CameraConfig, FrigateConfig from frigate.config import CameraConfig, FrigateConfig
logger = logging.getLogger(__name__)
class CameraActivityManager: class CameraActivityManager:
def __init__( def __init__(
@ -139,3 +150,106 @@ class CameraActivityManager:
if any_changed: if any_changed:
self.publish(f"{camera}/all", sum(list(all_objects.values()))) self.publish(f"{camera}/all", sum(list(all_objects.values())))
self.publish(f"{camera}/all/active", sum(list(active_objects.values()))) self.publish(f"{camera}/all/active", sum(list(active_objects.values())))
class AudioActivityManager:
def __init__(
self, config: FrigateConfig, publish: Callable[[str, Any], None]
) -> None:
self.config = config
self.publish = publish
self.current_audio_detections: dict[str, dict[str, dict[str, Any]]] = {}
self.event_metadata_publisher = EventMetadataPublisher()
for camera_config in config.cameras.values():
if not camera_config.audio.enabled_in_config:
continue
self.__init_camera(camera_config)
def __init_camera(self, camera_config: CameraConfig) -> None:
self.current_audio_detections[camera_config.name] = {}
def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None:
now = datetime.datetime.now().timestamp()
for camera in new_activity.keys():
# handle cameras that were added dynamically
if camera not in self.current_audio_detections:
self.__init_camera(self.config.cameras[camera])
new_detections = new_activity[camera].get("detections", [])
if self.compare_audio_activity(camera, new_detections, now):
logger.debug(f"Audio detections for {camera}: {new_activity}")
self.publish(
"audio_detections",
json.dumps(self.current_audio_detections),
)
def compare_audio_activity(
self, camera: str, new_detections: list[tuple[str, float]], now: float
) -> None:
max_not_heard = self.config.cameras[camera].audio.max_not_heard
current = self.current_audio_detections[camera]
any_changed = False
for label, score in new_detections:
any_changed = True
if label in current:
current[label]["last_detection"] = now
current[label]["score"] = score
else:
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
event_id = f"{now}-{rand_id}"
self.publish(f"{camera}/audio/{label}", "ON")
self.event_metadata_publisher.publish(
(
now,
camera,
label,
event_id,
True,
score,
None,
None,
"audio",
{},
),
EventMetadataTypeEnum.manual_event_create.value,
)
current[label] = {
"id": event_id,
"score": score,
"last_detection": now,
}
# expire detections
for label in list(current.keys()):
if now - current[label]["last_detection"] > max_not_heard:
any_changed = True
self.publish(f"{camera}/audio/{label}", "OFF")
self.event_metadata_publisher.publish(
(current[label]["id"], now),
EventMetadataTypeEnum.manual_event_end.value,
)
del current[label]
return any_changed
def expire_all(self, camera: str) -> None:
now = datetime.datetime.now().timestamp()
current = self.current_audio_detections.get(camera, {})
for label in list(current.keys()):
self.publish(f"{camera}/audio/{label}", "OFF")
self.event_metadata_publisher.publish(
(current[label]["id"], now),
EventMetadataTypeEnum.manual_event_end.value,
)
del current[label]

View File

@ -2,8 +2,6 @@
import logging import logging
import multiprocessing as mp import multiprocessing as mp
import os
import shutil
import threading import threading
from multiprocessing import Queue from multiprocessing import Queue
from multiprocessing.managers import DictProxy, SyncManager from multiprocessing.managers import DictProxy, SyncManager
@ -16,11 +14,11 @@ from frigate.config.camera.updater import (
CameraConfigUpdateEnum, CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber, CameraConfigUpdateSubscriber,
) )
from frigate.const import SHM_FRAMES_VAR
from frigate.models import Regions from frigate.models import Regions
from frigate.util.builtin import empty_and_close_queue from frigate.util.builtin import empty_and_close_queue
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.util.services import calculate_shm_requirements
from frigate.video import CameraCapture, CameraTracker from frigate.video import CameraCapture, CameraTracker
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -74,53 +72,25 @@ class CameraMaintainer(threading.Thread):
) )
def __calculate_shm_frame_count(self) -> int: def __calculate_shm_frame_count(self) -> int:
total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1) shm_stats = calculate_shm_requirements(self.config)
# required for log files + nginx cache if not shm_stats:
min_req_shm = 40 + 10 # /dev/shm not available
if self.config.birdseye.restream:
min_req_shm += 8
available_shm = total_shm - min_req_shm
cam_total_frame_size = 0.0
for camera in self.config.cameras.values():
if (
camera.enabled_in_config
and camera.detect.width
and camera.detect.height
):
cam_total_frame_size += round(
(camera.detect.width * camera.detect.height * 1.5 + 270480)
/ 1048576,
1,
)
# leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them.
cam_total_frame_size += 2 * round(
(1280 * 720 * 1.5 + 270480) / 1048576,
1,
)
if cam_total_frame_size == 0.0:
return 0 return 0
shm_frame_count = min(
int(os.environ.get(SHM_FRAMES_VAR, "50")),
int(available_shm / (cam_total_frame_size)),
)
logger.debug( logger.debug(
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM" f"Calculated total camera size {shm_stats['available']} / "
f"{shm_stats['camera_frame_size']} :: {shm_stats['shm_frame_count']} "
f"frames for each camera in SHM"
) )
if shm_frame_count < 20: if shm_stats["shm_frame_count"] < 20:
logger.warning( logger.warning(
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB." f"The current SHM size of {shm_stats['total']}MB is too small, "
f"recommend increasing it to at least {shm_stats['min_shm']}MB."
) )
return shm_frame_count return shm_stats["shm_frame_count"]
def __start_camera_processor( def __start_camera_processor(
self, name: str, config: CameraConfig, runtime: bool = False self, name: str, config: CameraConfig, runtime: bool = False

View File

@ -54,7 +54,7 @@ class CameraState:
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.prev_enabled = self.camera_config.enabled self.prev_enabled = self.camera_config.enabled
def get_current_frame(self, draw_options: dict[str, Any] = {}): def get_current_frame(self, draw_options: dict[str, Any] = {}) -> np.ndarray:
with self.current_frame_lock: with self.current_frame_lock:
frame_copy = np.copy(self._current_frame) frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time frame_time = self.current_frame_time
@ -272,7 +272,7 @@ class CameraState:
def finished(self, obj_id): def finished(self, obj_id):
del self.tracked_objects[obj_id] del self.tracked_objects[obj_id]
def on(self, event_type: str, callback: Callable[[dict], None]): def on(self, event_type: str, callback: Callable):
self.callbacks[event_type].append(callback) self.callbacks[event_type].append(callback)
def update( def update(

View File

@ -6,7 +6,7 @@ import logging
from typing import Any, Callable, Optional, cast from typing import Any, Callable, Optional, cast
from frigate.camera import PTZMetrics from frigate.camera import PTZMetrics
from frigate.camera.activity_manager import CameraActivityManager from frigate.camera.activity_manager import AudioActivityManager, CameraActivityManager
from frigate.comms.base_communicator import Communicator from frigate.comms.base_communicator import Communicator
from frigate.comms.webpush import WebPushClient from frigate.comms.webpush import WebPushClient
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
@ -17,10 +17,12 @@ from frigate.config.camera.updater import (
) )
from frigate.const import ( from frigate.const import (
CLEAR_ONGOING_REVIEW_SEGMENTS, CLEAR_ONGOING_REVIEW_SEGMENTS,
EXPIRE_AUDIO_ACTIVITY,
INSERT_MANY_RECORDINGS, INSERT_MANY_RECORDINGS,
INSERT_PREVIEW, INSERT_PREVIEW,
NOTIFICATION_TEST, NOTIFICATION_TEST,
REQUEST_REGION_GRID, REQUEST_REGION_GRID,
UPDATE_AUDIO_ACTIVITY,
UPDATE_BIRDSEYE_LAYOUT, UPDATE_BIRDSEYE_LAYOUT,
UPDATE_CAMERA_ACTIVITY, UPDATE_CAMERA_ACTIVITY,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
@ -55,6 +57,7 @@ class Dispatcher:
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
self.comms = communicators self.comms = communicators
self.camera_activity = CameraActivityManager(config, self.publish) self.camera_activity = CameraActivityManager(config, self.publish)
self.audio_activity = AudioActivityManager(config, self.publish)
self.model_state: dict[str, ModelStatusTypesEnum] = {} self.model_state: dict[str, ModelStatusTypesEnum] = {}
self.embeddings_reindex: dict[str, Any] = {} self.embeddings_reindex: dict[str, Any] = {}
self.birdseye_layout: dict[str, Any] = {} self.birdseye_layout: dict[str, Any] = {}
@ -135,6 +138,12 @@ class Dispatcher:
def handle_update_camera_activity() -> None: def handle_update_camera_activity() -> None:
self.camera_activity.update_activity(payload) self.camera_activity.update_activity(payload)
def handle_update_audio_activity() -> None:
self.audio_activity.update_activity(payload)
def handle_expire_audio_activity() -> None:
self.audio_activity.expire_all(payload)
def handle_update_event_description() -> None: def handle_update_event_description() -> None:
event: Event = Event.get(Event.id == payload["id"]) event: Event = Event.get(Event.id == payload["id"])
cast(dict, event.data)["description"] = payload["description"] cast(dict, event.data)["description"] = payload["description"]
@ -192,6 +201,7 @@ class Dispatcher:
def handle_on_connect() -> None: def handle_on_connect() -> None:
camera_status = self.camera_activity.last_camera_activity.copy() camera_status = self.camera_activity.last_camera_activity.copy()
audio_detections = self.audio_activity.current_audio_detections.copy()
cameras_with_status = camera_status.keys() cameras_with_status = camera_status.keys()
for camera in self.config.cameras.keys(): for camera in self.config.cameras.keys():
@ -234,6 +244,7 @@ class Dispatcher:
json.dumps(self.embeddings_reindex.copy()), json.dumps(self.embeddings_reindex.copy()),
) )
self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy())) self.publish("birdseye_layout", json.dumps(self.birdseye_layout.copy()))
self.publish("audio_detections", json.dumps(audio_detections))
def handle_notification_test() -> None: def handle_notification_test() -> None:
self.publish("notification_test", "Test notification") self.publish("notification_test", "Test notification")
@ -246,6 +257,8 @@ class Dispatcher:
UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment, UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment,
CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments, CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity, UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
UPDATE_AUDIO_ACTIVITY: handle_update_audio_activity,
EXPIRE_AUDIO_ACTIVITY: handle_expire_audio_activity,
UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description, UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
UPDATE_MODEL_STATE: handle_update_model_state, UPDATE_MODEL_STATE: handle_update_model_state,

View File

@ -8,7 +8,7 @@ from .zmq_proxy import Publisher, Subscriber
class EventUpdatePublisher( class EventUpdatePublisher(
Publisher[tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, Any]]] Publisher[tuple[EventTypeEnum, EventStateEnum, str | None, str, dict[str, Any]]]
): ):
"""Publishes events (objects, audio, manual).""" """Publishes events (objects, audio, manual)."""
@ -19,7 +19,7 @@ class EventUpdatePublisher(
def publish( def publish(
self, self,
payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, Any]], payload: tuple[EventTypeEnum, EventStateEnum, str | None, str, dict[str, Any]],
sub_topic: str = "", sub_topic: str = "",
) -> None: ) -> None:
super().publish(payload, sub_topic) super().publish(payload, sub_topic)

View File

@ -2,7 +2,7 @@
import json import json
import threading import threading
from typing import Any, Generic, Optional, TypeVar from typing import Generic, TypeVar
import zmq import zmq
@ -70,7 +70,7 @@ class Publisher(Generic[T]):
self.context.destroy() self.context.destroy()
class Subscriber: class Subscriber(Generic[T]):
"""Receives messages.""" """Receives messages."""
topic_base: str = "" topic_base: str = ""
@ -82,9 +82,7 @@ class Subscriber:
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic) self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
self.socket.connect(SOCKET_SUB) self.socket.connect(SOCKET_SUB)
def check_for_update( def check_for_update(self, timeout: float | None = FAST_QUEUE_TIMEOUT) -> T | None:
self, timeout: float | None = FAST_QUEUE_TIMEOUT
) -> tuple[str, Any] | tuple[None, None] | None:
"""Returns message or None if no update.""" """Returns message or None if no update."""
try: try:
has_update, _, _ = zmq.select([self.socket], [], [], timeout) has_update, _, _ = zmq.select([self.socket], [], [], timeout)
@ -101,7 +99,5 @@ class Subscriber:
self.socket.close() self.socket.close()
self.context.destroy() self.context.destroy()
def _return_object( def _return_object(self, topic: str, payload: T | None) -> T | None:
self, topic: str, payload: Optional[tuple[str, Any]]
) -> tuple[str, Any] | tuple[None, None] | None:
return payload return payload

View File

@ -130,6 +130,11 @@ class SemanticSearchConfig(FrigateBaseModel):
model_size: str = Field( model_size: str = Field(
default="small", title="The size of the embeddings model used." default="small", title="The size of the embeddings model used."
) )
device: Optional[str] = Field(
default=None,
title="The device key to use for semantic search.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class TriggerConfig(FrigateBaseModel): class TriggerConfig(FrigateBaseModel):
@ -196,6 +201,11 @@ class FaceRecognitionConfig(FrigateBaseModel):
blur_confidence_filter: bool = Field( blur_confidence_filter: bool = Field(
default=True, title="Apply blur quality filter to face confidence." default=True, title="Apply blur quality filter to face confidence."
) )
device: Optional[str] = Field(
default=None,
title="The device key to use for face recognition.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class CameraFaceRecognitionConfig(FrigateBaseModel): class CameraFaceRecognitionConfig(FrigateBaseModel):
@ -209,10 +219,6 @@ class CameraFaceRecognitionConfig(FrigateBaseModel):
class LicensePlateRecognitionConfig(FrigateBaseModel): class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.") enabled: bool = Field(default=False, title="Enable license plate recognition.")
device: Optional[EnrichmentsDeviceEnum] = Field(
default=EnrichmentsDeviceEnum.CPU,
title="The device used for license plate recognition.",
)
model_size: str = Field( model_size: str = Field(
default="small", title="The size of the embeddings model used." default="small", title="The size of the embeddings model used."
) )
@ -258,6 +264,11 @@ class LicensePlateRecognitionConfig(FrigateBaseModel):
default=False, default=False,
title="Save plates captured for LPR for debugging purposes.", title="Save plates captured for LPR for debugging purposes.",
) )
device: Optional[str] = Field(
default=None,
title="The device key to use for LPR.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class CameraLicensePlateRecognitionConfig(FrigateBaseModel): class CameraLicensePlateRecognitionConfig(FrigateBaseModel):

View File

@ -64,6 +64,7 @@ from .database import DatabaseConfig
from .env import EnvVars from .env import EnvVars
from .logger import LoggerConfig from .logger import LoggerConfig
from .mqtt import MqttConfig from .mqtt import MqttConfig
from .network import NetworkingConfig
from .proxy import ProxyConfig from .proxy import ProxyConfig
from .telemetry import TelemetryConfig from .telemetry import TelemetryConfig
from .tls import TlsConfig from .tls import TlsConfig
@ -334,6 +335,9 @@ class FrigateConfig(FrigateBaseModel):
notifications: NotificationConfig = Field( notifications: NotificationConfig = Field(
default_factory=NotificationConfig, title="Global notification configuration." default_factory=NotificationConfig, title="Global notification configuration."
) )
networking: NetworkingConfig = Field(
default_factory=NetworkingConfig, title="Networking configuration"
)
proxy: ProxyConfig = Field( proxy: ProxyConfig = Field(
default_factory=ProxyConfig, title="Proxy configuration." default_factory=ProxyConfig, title="Proxy configuration."
) )

13
frigate/config/network.py Normal file
View File

@ -0,0 +1,13 @@
from pydantic import Field
from .base import FrigateBaseModel
__all__ = ["IPv6Config", "NetworkingConfig"]
class IPv6Config(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971")
class NetworkingConfig(FrigateBaseModel):
ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration")

View File

@ -16,6 +16,10 @@ class HeaderMappingConfig(FrigateBaseModel):
default=None, default=None,
title="Header name from upstream proxy to identify user role.", title="Header name from upstream proxy to identify user role.",
) )
role_map: Optional[dict[str, list[str]]] = Field(
default_factory=dict,
title=("Mapping of Frigate roles to upstream group values. "),
)
class ProxyConfig(FrigateBaseModel): class ProxyConfig(FrigateBaseModel):

View File

@ -74,6 +74,7 @@ FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi" FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan" FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
FFMPEG_HWACCEL_RKMPP = "preset-rkmpp" FFMPEG_HWACCEL_RKMPP = "preset-rkmpp"
FFMPEG_HWACCEL_AMF = "preset-amd-amf"
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"] FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
# Regex constants # Regex constants
@ -110,6 +111,8 @@ REQUEST_REGION_GRID = "request_region_grid"
UPSERT_REVIEW_SEGMENT = "upsert_review_segment" UPSERT_REVIEW_SEGMENT = "upsert_review_segment"
CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments" CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
UPDATE_CAMERA_ACTIVITY = "update_camera_activity" UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
UPDATE_AUDIO_ACTIVITY = "update_audio_activity"
EXPIRE_AUDIO_ACTIVITY = "expire_audio_activity"
UPDATE_EVENT_DESCRIPTION = "update_event_description" UPDATE_EVENT_DESCRIPTION = "update_event_description"
UPDATE_REVIEW_DESCRIPTION = "update_review_description" UPDATE_REVIEW_DESCRIPTION = "update_review_description"
UPDATE_MODEL_STATE = "update_model_state" UPDATE_MODEL_STATE = "update_model_state"

View File

@ -269,7 +269,7 @@ class ArcFaceRecognizer(FaceRecognizer):
def __init__(self, config: FrigateConfig): def __init__(self, config: FrigateConfig):
super().__init__(config) super().__init__(config)
self.mean_embs: dict[int, np.ndarray] = {} self.mean_embs: dict[int, np.ndarray] = {}
self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding() self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding(config.face_recognition)
self.model_builder_queue: queue.Queue | None = None self.model_builder_queue: queue.Queue | None = None
def clear(self) -> None: def clear(self) -> None:

View File

@ -171,7 +171,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
# don't run for non person objects # don't run for non person objects
if obj_data.get("label") != "person": if obj_data.get("label") != "person":
logger.debug("Not a processing face for non person object.") logger.debug("Not processing face for a non person object.")
return return
# don't overwrite sub label for objects that have a sub label # don't overwrite sub label for objects that have a sub label

View File

@ -0,0 +1,731 @@
import glob
import logging
import os
import shutil
import time
import urllib.request
import zipfile
from queue import Queue
import cv2
import numpy as np
from pydantic import BaseModel, Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
)
from frigate.util.model import post_process_yolo
logger = logging.getLogger(__name__)
DETECTOR_KEY = "memryx"
# Configuration class for model settings
class ModelConfig(BaseModel):
path: str = Field(default=None, title="Model Path") # Path to the DFP file
labelmap_path: str = Field(default=None, title="Path to Label Map")
class MemryXDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
device: str = Field(default="PCIe", title="Device Path")
class MemryXDetector(DetectionApi):
type_key = DETECTOR_KEY # Set the type key
supported_models = [
ModelTypeEnum.ssd,
ModelTypeEnum.yolonas,
ModelTypeEnum.yologeneric, # Treated as yolov9 in MemryX implementation
ModelTypeEnum.yolox,
]
def __init__(self, detector_config):
"""Initialize MemryX detector with the provided configuration."""
try:
# Import MemryX SDK
from memryx import AsyncAccl
except ModuleNotFoundError:
raise ImportError(
"MemryX SDK is not installed. Install it and set up MIX environment."
)
return
model_cfg = getattr(detector_config, "model", None)
# Check if model_type was explicitly set by the user
if "model_type" in getattr(model_cfg, "__fields_set__", set()):
detector_config.model.model_type = model_cfg.model_type
else:
logger.info(
"model_type not set in config — defaulting to yolonas for MemryX."
)
detector_config.model.model_type = ModelTypeEnum.yolonas
self.capture_queue = Queue(maxsize=10)
self.output_queue = Queue(maxsize=10)
self.capture_id_queue = Queue(maxsize=10)
self.logger = logger
self.memx_model_path = detector_config.model.path # Path to .dfp file
self.memx_post_model = None # Path to .post file
self.expected_post_model = None
self.memx_device_path = detector_config.device # Device path
# Parse the device string to split PCIe:<index>
device_str = self.memx_device_path
self.device_id = []
self.device_id.append(int(device_str.split(":")[1]))
self.memx_model_height = detector_config.model.height
self.memx_model_width = detector_config.model.width
self.memx_model_type = detector_config.model.model_type
self.cache_dir = "/memryx_models"
if self.memx_model_type == ModelTypeEnum.yologeneric:
model_mapping = {
(640, 640): (
"https://developer.memryx.com/example_files/2p0_frigate/yolov9_640.zip",
"yolov9_640",
),
(320, 320): (
"https://developer.memryx.com/example_files/2p0_frigate/yolov9_320.zip",
"yolov9_320",
),
}
self.model_url, self.model_folder = model_mapping.get(
(self.memx_model_height, self.memx_model_width),
(
"https://developer.memryx.com/example_files/2p0_frigate/yolov9_320.zip",
"yolov9_320",
),
)
self.expected_dfp_model = "YOLO_v9_small_onnx.dfp"
elif self.memx_model_type == ModelTypeEnum.yolonas:
model_mapping = {
(640, 640): (
"https://developer.memryx.com/example_files/2p0_frigate/yolonas_640.zip",
"yolonas_640",
),
(320, 320): (
"https://developer.memryx.com/example_files/2p0_frigate/yolonas_320.zip",
"yolonas_320",
),
}
self.model_url, self.model_folder = model_mapping.get(
(self.memx_model_height, self.memx_model_width),
(
"https://developer.memryx.com/example_files/2p0_frigate/yolonas_320.zip",
"yolonas_320",
),
)
self.expected_dfp_model = "yolo_nas_s.dfp"
self.expected_post_model = "yolo_nas_s_post.onnx"
elif self.memx_model_type == ModelTypeEnum.yolox:
self.model_folder = "yolox"
self.model_url = (
"https://developer.memryx.com/example_files/2p0_frigate/yolox.zip"
)
self.expected_dfp_model = "YOLOX_640_640_3_onnx.dfp"
self.set_strides_grids()
elif self.memx_model_type == ModelTypeEnum.ssd:
self.model_folder = "ssd"
self.model_url = (
"https://developer.memryx.com/example_files/2p0_frigate/ssd.zip"
)
self.expected_dfp_model = "SSDlite_MobileNet_v2_320_320_3_onnx.dfp"
self.expected_post_model = "SSDlite_MobileNet_v2_320_320_3_onnx_post.onnx"
self.check_and_prepare_model()
logger.info(
f"Initializing MemryX with model: {self.memx_model_path} on device {self.memx_device_path}"
)
try:
# Load MemryX Model
logger.info(f"dfp path: {self.memx_model_path}")
# Initialization code
# Load MemryX Model with a device target
self.accl = AsyncAccl(
self.memx_model_path,
device_ids=self.device_id, # AsyncAccl device ids
local_mode=True,
)
# Models that use cropped post-processing sections (YOLO-NAS and SSD)
# --> These will be moved to pure numpy in the future to improve performance on low-end CPUs
if self.memx_post_model:
self.accl.set_postprocessing_model(self.memx_post_model, model_idx=0)
self.accl.connect_input(self.process_input)
self.accl.connect_output(self.process_output)
logger.info(
f"Loaded MemryX model from {self.memx_model_path} and {self.memx_post_model}"
)
except Exception as e:
logger.error(f"Failed to initialize MemryX model: {e}")
raise
def load_yolo_constants(self):
base = f"{self.cache_dir}/{self.model_folder}"
# constants for yolov9 post-processing
self.const_A = np.load(f"{base}/_model_22_Constant_9_output_0.npy")
self.const_B = np.load(f"{base}/_model_22_Constant_10_output_0.npy")
self.const_C = np.load(f"{base}/_model_22_Constant_12_output_0.npy")
def check_and_prepare_model(self):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir, exist_ok=True)
# ---------- CASE 1: user provided a custom model path ----------
if self.memx_model_path:
if not self.memx_model_path.endswith(".zip"):
raise ValueError(
f"Invalid model path: {self.memx_model_path}. "
"Only .zip files are supported. Please provide a .zip model archive."
)
if not os.path.exists(self.memx_model_path):
raise FileNotFoundError(
f"Custom model zip not found: {self.memx_model_path}"
)
logger.info(f"User provided zip model: {self.memx_model_path}")
# Extract custom zip into a separate area so it never clashes with MemryX cache
custom_dir = os.path.join(
self.cache_dir, "custom_models", self.model_folder
)
if os.path.isdir(custom_dir):
shutil.rmtree(custom_dir)
os.makedirs(custom_dir, exist_ok=True)
with zipfile.ZipFile(self.memx_model_path, "r") as zip_ref:
zip_ref.extractall(custom_dir)
logger.info(f"Custom model extracted to {custom_dir}.")
# Find .dfp and optional *_post.onnx recursively
dfp_candidates = glob.glob(
os.path.join(custom_dir, "**", "*.dfp"), recursive=True
)
post_candidates = glob.glob(
os.path.join(custom_dir, "**", "*_post.onnx"), recursive=True
)
if not dfp_candidates:
raise FileNotFoundError(
"No .dfp file found in custom model zip after extraction."
)
self.memx_model_path = dfp_candidates[0]
# Handle post model requirements by model type
if self.memx_model_type in [
ModelTypeEnum.yologeneric,
ModelTypeEnum.yolonas,
ModelTypeEnum.ssd,
]:
if not post_candidates:
raise FileNotFoundError(
f"No *_post.onnx file found in custom model zip for {self.memx_model_type.name}."
)
self.memx_post_model = post_candidates[0]
elif self.memx_model_type == ModelTypeEnum.yolox:
# Explicitly ignore any post model even if present
self.memx_post_model = None
else:
# Future model types can optionally use post if present
self.memx_post_model = post_candidates[0] if post_candidates else None
logger.info(f"Using custom model: {self.memx_model_path}")
return
# ---------- CASE 2: no custom model path -> use MemryX cached models ----------
model_subdir = os.path.join(self.cache_dir, self.model_folder)
dfp_path = os.path.join(model_subdir, self.expected_dfp_model)
post_path = (
os.path.join(model_subdir, self.expected_post_model)
if self.expected_post_model
else None
)
dfp_exists = os.path.exists(dfp_path)
post_exists = os.path.exists(post_path) if post_path else True
if dfp_exists and post_exists:
logger.info("Using cached models.")
self.memx_model_path = dfp_path
self.memx_post_model = post_path
if self.memx_model_type == ModelTypeEnum.yologeneric:
self.load_yolo_constants()
return
# ---------- CASE 3: download MemryX model (no cache) ----------
logger.info(
f"Model files not found locally. Downloading from {self.model_url}..."
)
zip_path = os.path.join(self.cache_dir, f"{self.model_folder}.zip")
try:
if not os.path.exists(zip_path):
urllib.request.urlretrieve(self.model_url, zip_path)
logger.info(f"Model ZIP downloaded to {zip_path}. Extracting...")
if not os.path.exists(model_subdir):
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(self.cache_dir)
logger.info(f"Model extracted to {self.cache_dir}.")
# Re-assign model paths after extraction
self.memx_model_path = os.path.join(model_subdir, self.expected_dfp_model)
self.memx_post_model = (
os.path.join(model_subdir, self.expected_post_model)
if self.expected_post_model
else None
)
if self.memx_model_type == ModelTypeEnum.yologeneric:
self.load_yolo_constants()
finally:
if os.path.exists(zip_path):
try:
os.remove(zip_path)
logger.info("Cleaned up ZIP file after extraction.")
except Exception as e:
logger.warning(f"Failed to remove downloaded zip {zip_path}: {e}")
def send_input(self, connection_id, tensor_input: np.ndarray):
"""Pre-process (if needed) and send frame to MemryX input queue"""
if tensor_input is None:
raise ValueError("[send_input] No image data provided for inference")
if self.memx_model_type == ModelTypeEnum.yolonas:
if tensor_input.ndim == 4 and tensor_input.shape[1:] == (320, 320, 3):
logger.debug("Transposing tensor from NHWC to NCHW for YOLO-NAS")
tensor_input = np.transpose(
tensor_input, (0, 3, 1, 2)
) # (1, H, W, C) → (1, C, H, W)
tensor_input = tensor_input.astype(np.float32)
tensor_input /= 255
if self.memx_model_type == ModelTypeEnum.yolox:
# Remove batch dim → (3, 640, 640)
tensor_input = tensor_input.squeeze(0)
# Convert CHW to HWC for OpenCV
tensor_input = np.transpose(tensor_input, (1, 2, 0)) # (640, 640, 3)
padded_img = np.ones((640, 640, 3), dtype=np.uint8) * 114
scale = min(
640 / float(tensor_input.shape[0]), 640 / float(tensor_input.shape[1])
)
sx, sy = (
int(tensor_input.shape[1] * scale),
int(tensor_input.shape[0] * scale),
)
resized_img = cv2.resize(
tensor_input, (sx, sy), interpolation=cv2.INTER_LINEAR
)
padded_img[:sy, :sx] = resized_img.astype(np.uint8)
# Step 4: Slice the padded image into 4 quadrants and concatenate them into 12 channels
x0 = padded_img[0::2, 0::2, :] # Top-left
x1 = padded_img[1::2, 0::2, :] # Bottom-left
x2 = padded_img[0::2, 1::2, :] # Top-right
x3 = padded_img[1::2, 1::2, :] # Bottom-right
# Step 5: Concatenate along the channel dimension (axis 2)
concatenated_img = np.concatenate([x0, x1, x2, x3], axis=2)
tensor_input = concatenated_img.astype(np.float32)
# Convert to CHW format (12, 320, 320)
tensor_input = np.transpose(tensor_input, (2, 0, 1))
# Add batch dimension → (1, 12, 320, 320)
tensor_input = np.expand_dims(tensor_input, axis=0)
# Send frame to MemryX for processing
self.capture_queue.put(tensor_input)
self.capture_id_queue.put(connection_id)
def process_input(self):
"""Input callback function: wait for frames in the input queue, preprocess, and send to MX3 (return)"""
while True:
try:
# Wait for a frame from the queue (blocking call)
frame = self.capture_queue.get(
block=True
) # Blocks until data is available
return frame
except Exception as e:
logger.info(f"[process_input] Error processing input: {e}")
time.sleep(0.1) # Prevent busy waiting in case of error
def receive_output(self):
"""Retrieve processed results from MemryX output queue + a copy of the original frame"""
connection_id = (
self.capture_id_queue.get()
) # Get the corresponding connection ID
detections = self.output_queue.get() # Get detections from MemryX
return connection_id, detections
def post_process_yolonas(self, output):
predictions = output[0]
detections = np.zeros((20, 6), np.float32)
for i, prediction in enumerate(predictions):
if i == 20:
break
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
if class_id < 0:
break
detections[i] = [
class_id,
confidence,
y_min / self.memx_model_height,
x_min / self.memx_model_width,
y_max / self.memx_model_height,
x_max / self.memx_model_width,
]
# Return the list of final detections
self.output_queue.put(detections)
def process_yolo(self, class_id, conf, pos):
"""
Takes in class ID, confidence score, and array of [x, y, w, h] that describes detection position,
returns an array that's easily passable back to Frigate.
"""
return [
class_id, # class ID
conf, # confidence score
(pos[1] - (pos[3] / 2)) / self.memx_model_height, # y_min
(pos[0] - (pos[2] / 2)) / self.memx_model_width, # x_min
(pos[1] + (pos[3] / 2)) / self.memx_model_height, # y_max
(pos[0] + (pos[2] / 2)) / self.memx_model_width, # x_max
]
def set_strides_grids(self):
grids = []
expanded_strides = []
strides = [8, 16, 32]
hsize_list = [self.memx_model_height // stride for stride in strides]
wsize_list = [self.memx_model_width // stride for stride in strides]
for hsize, wsize, stride in zip(hsize_list, wsize_list, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
expanded_strides.append(np.full((*shape, 1), stride))
self.grids = np.concatenate(grids, 1)
self.expanded_strides = np.concatenate(expanded_strides, 1)
def sigmoid(self, x: np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-x))
def onnx_concat(self, inputs: list, axis: int) -> np.ndarray:
# Ensure all inputs are numpy arrays
if not all(isinstance(x, np.ndarray) for x in inputs):
raise TypeError("All inputs must be numpy arrays.")
# Ensure shapes match on non-concat axes
ref_shape = list(inputs[0].shape)
for i, tensor in enumerate(inputs[1:], start=1):
for ax in range(len(ref_shape)):
if ax == axis:
continue
if tensor.shape[ax] != ref_shape[ax]:
raise ValueError(
f"Shape mismatch at axis {ax} between input[0] and input[{i}]"
)
return np.concatenate(inputs, axis=axis)
def onnx_reshape(self, data: np.ndarray, shape: np.ndarray) -> np.ndarray:
# Ensure shape is a 1D array of integers
target_shape = shape.astype(int).tolist()
# Use NumPy reshape with dynamic handling of -1
reshaped = np.reshape(data, target_shape)
return reshaped
def post_process_yolox(self, output):
output_785 = output[0] # 785
output_794 = output[1] # 794
output_795 = output[2] # 795
output_811 = output[3] # 811
output_820 = output[4] # 820
output_821 = output[5] # 821
output_837 = output[6] # 837
output_846 = output[7] # 846
output_847 = output[8] # 847
output_795 = self.sigmoid(output_795)
output_785 = self.sigmoid(output_785)
output_821 = self.sigmoid(output_821)
output_811 = self.sigmoid(output_811)
output_847 = self.sigmoid(output_847)
output_837 = self.sigmoid(output_837)
concat_1 = self.onnx_concat([output_794, output_795, output_785], axis=1)
concat_2 = self.onnx_concat([output_820, output_821, output_811], axis=1)
concat_3 = self.onnx_concat([output_846, output_847, output_837], axis=1)
shape = np.array([1, 85, -1], dtype=np.int64)
reshape_1 = self.onnx_reshape(concat_1, shape)
reshape_2 = self.onnx_reshape(concat_2, shape)
reshape_3 = self.onnx_reshape(concat_3, shape)
concat_out = self.onnx_concat([reshape_1, reshape_2, reshape_3], axis=2)
output = concat_out.transpose(0, 2, 1) # 1, 840, 85
self.num_classes = output.shape[2] - 5
# [x, y, h, w, box_score, class_no_1, ..., class_no_80],
results = output
results[..., :2] = (results[..., :2] + self.grids) * self.expanded_strides
results[..., 2:4] = np.exp(results[..., 2:4]) * self.expanded_strides
image_pred = results[0, ...]
class_conf = np.max(
image_pred[:, 5 : 5 + self.num_classes], axis=1, keepdims=True
)
class_pred = np.argmax(image_pred[:, 5 : 5 + self.num_classes], axis=1)
class_pred = np.expand_dims(class_pred, axis=1)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
detections = detections[conf_mask]
# Sort by class confidence (index 5) and keep top 20 detections
ordered = detections[detections[:, 5].argsort()[::-1]][:20]
# Prepare a final detections array of shape (20, 6)
final_detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
final_detections[i] = self.process_yolo(
object_detected[6], object_detected[5], object_detected[:4]
)
self.output_queue.put(final_detections)
def post_process_ssdlite(self, outputs):
dets = outputs[0].squeeze(0) # Shape: (1, num_dets, 5)
labels = outputs[1].squeeze(0)
detections = []
for i in range(dets.shape[0]):
x_min, y_min, x_max, y_max, confidence = dets[i]
class_id = int(labels[i]) # Convert label to integer
if confidence < 0.45:
continue # Skip detections below threshold
# Convert coordinates to integers
x_min, y_min, x_max, y_max = map(int, [x_min, y_min, x_max, y_max])
# Append valid detections [class_id, confidence, x, y, width, height]
detections.append([class_id, confidence, x_min, y_min, x_max, y_max])
final_detections = np.zeros((20, 6), np.float32)
if len(detections) == 0:
# logger.info("No detections found.")
self.output_queue.put(final_detections)
return
# Convert to NumPy array
detections = np.array(detections, dtype=np.float32)
# Apply Non-Maximum Suppression (NMS)
bboxes = detections[:, 2:6].tolist() # (x_min, y_min, width, height)
scores = detections[:, 1].tolist() # Confidence scores
indices = cv2.dnn.NMSBoxes(bboxes, scores, 0.45, 0.5)
if len(indices) > 0:
indices = indices.flatten()[:20] # Keep only the top 20 detections
selected_detections = detections[indices]
# Normalize coordinates AFTER NMS
for i, det in enumerate(selected_detections):
class_id, confidence, x_min, y_min, x_max, y_max = det
# Normalize coordinates
x_min /= self.memx_model_width
y_min /= self.memx_model_height
x_max /= self.memx_model_width
y_max /= self.memx_model_height
final_detections[i] = [class_id, confidence, y_min, x_min, y_max, x_max]
self.output_queue.put(final_detections)
def onnx_reshape_with_allowzero(
self, data: np.ndarray, shape: np.ndarray, allowzero: int = 0
) -> np.ndarray:
shape = shape.astype(int)
input_shape = data.shape
output_shape = []
for i, dim in enumerate(shape):
if dim == 0 and allowzero == 0:
output_shape.append(input_shape[i]) # Copy dimension from input
else:
output_shape.append(dim)
# Now let NumPy infer any -1 if needed
reshaped = np.reshape(data, output_shape)
return reshaped
def process_output(self, *outputs):
"""Output callback function -- receives frames from the MX3 and triggers post-processing"""
if self.memx_model_type == ModelTypeEnum.yologeneric:
if not self.memx_post_model:
conv_out1 = outputs[0]
conv_out2 = outputs[1]
conv_out3 = outputs[2]
conv_out4 = outputs[3]
conv_out5 = outputs[4]
conv_out6 = outputs[5]
concat_1 = self.onnx_concat([conv_out1, conv_out2], axis=1)
concat_2 = self.onnx_concat([conv_out3, conv_out4], axis=1)
concat_3 = self.onnx_concat([conv_out5, conv_out6], axis=1)
shape = np.array([1, 144, -1], dtype=np.int64)
reshaped_1 = self.onnx_reshape_with_allowzero(
concat_1, shape, allowzero=0
)
reshaped_2 = self.onnx_reshape_with_allowzero(
concat_2, shape, allowzero=0
)
reshaped_3 = self.onnx_reshape_with_allowzero(
concat_3, shape, allowzero=0
)
concat_4 = self.onnx_concat([reshaped_1, reshaped_2, reshaped_3], 2)
axis = 1
split_sizes = [64, 80]
# Calculate indices at which to split
indices = np.cumsum(split_sizes)[
:-1
] # [64] — split before the second chunk
# Perform split along axis 1
split_0, split_1 = np.split(concat_4, indices, axis=axis)
num_boxes = 2100 if self.memx_model_height == 320 else 8400
shape1 = np.array([1, 4, 16, num_boxes])
reshape_4 = self.onnx_reshape_with_allowzero(
split_0, shape1, allowzero=0
)
transpose_1 = reshape_4.transpose(0, 2, 1, 3)
axis = 1 # As per ONNX softmax node
# Subtract max for numerical stability
x_max = np.max(transpose_1, axis=axis, keepdims=True)
x_exp = np.exp(transpose_1 - x_max)
x_sum = np.sum(x_exp, axis=axis, keepdims=True)
softmax_output = x_exp / x_sum
# Weight W from the ONNX initializer (1, 16, 1, 1) with values 0 to 15
W = np.arange(16, dtype=np.float32).reshape(
1, 16, 1, 1
) # (1, 16, 1, 1)
# Apply 1x1 convolution: this is a weighted sum over channels
conv_output = np.sum(
softmax_output * W, axis=1, keepdims=True
) # shape: (1, 1, 4, 8400)
shape2 = np.array([1, 4, num_boxes])
reshape_5 = self.onnx_reshape_with_allowzero(
conv_output, shape2, allowzero=0
)
# ONNX Slice — get first 2 channels: [0:2] along axis 1
slice_output1 = reshape_5[:, 0:2, :] # Result: (1, 2, 8400)
# Slice channels 2 to 4 → axis = 1
slice_output2 = reshape_5[:, 2:4, :]
# Perform Subtraction
sub_output = self.const_A - slice_output1 # Equivalent to ONNX Sub
# Perform the ONNX-style Add
add_output = self.const_B + slice_output2
sub1 = add_output - sub_output
add1 = sub_output + add_output
div_output = add1 / 2.0
concat_5 = self.onnx_concat([div_output, sub1], axis=1)
# Expand B to (1, 1, 8400) so it can broadcast across axis=1 (4 channels)
const_C_expanded = self.const_C[:, np.newaxis, :] # Shape: (1, 1, 8400)
# Perform ONNX-style element-wise multiplication
mul_output = concat_5 * const_C_expanded # Result: (1, 4, 8400)
sigmoid_output = self.sigmoid(split_1)
outputs = self.onnx_concat([mul_output, sigmoid_output], axis=1)
final_detections = post_process_yolo(
outputs, self.memx_model_width, self.memx_model_height
)
self.output_queue.put(final_detections)
elif self.memx_model_type == ModelTypeEnum.yolonas:
return self.post_process_yolonas(outputs)
elif self.memx_model_type == ModelTypeEnum.yolox:
return self.post_process_yolox(outputs)
elif self.memx_model_type == ModelTypeEnum.ssd:
return self.post_process_ssdlite(outputs)
else:
raise Exception(
f"{self.memx_model_type} is currently not supported for memryx. See the docs for more info on supported models."
)
def detect_raw(self, tensor_input: np.ndarray):
"""Removed synchronous detect_raw() function so that we only use async"""
return 0

View File

@ -12,6 +12,7 @@ from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolo from frigate.util.model import post_process_yolo
from frigate.util.rknn_converter import auto_convert_model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -94,7 +95,31 @@ class Rknn(DetectionApi):
# user provided models should be a path and contain a "/" # user provided models should be a path and contain a "/"
if "/" in model_path: if "/" in model_path:
model_props["preset"] = False model_props["preset"] = False
model_props["path"] = model_path
# Check if this is an ONNX model or model without extension that needs conversion
if model_path.endswith(".onnx") or not os.path.splitext(model_path)[1]:
# Try to auto-convert to RKNN format
logger.info(
f"Attempting to auto-convert {model_path} to RKNN format..."
)
# Determine model type from config
model_type = self.detector_config.model.model_type
# Auto-convert the model
converted_path = auto_convert_model(model_path, model_type.value)
if converted_path:
model_props["path"] = converted_path
logger.info(f"Successfully converted model to: {converted_path}")
else:
# Fall back to original path if conversion fails
logger.warning(
f"Failed to convert {model_path} to RKNN format, using original path"
)
model_props["path"] = model_path
else:
model_props["path"] = model_path
else: else:
model_props["preset"] = True model_props["preset"] = True

View File

@ -0,0 +1,151 @@
import json
import logging
from typing import Any, List
import numpy as np
import zmq
from pydantic import Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
logger = logging.getLogger(__name__)
DETECTOR_KEY = "zmq"
class ZmqDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
endpoint: str = Field(
default="ipc:///tmp/cache/zmq_detector", title="ZMQ IPC endpoint"
)
request_timeout_ms: int = Field(
default=200, title="ZMQ request timeout in milliseconds"
)
linger_ms: int = Field(default=0, title="ZMQ socket linger in milliseconds")
class ZmqIpcDetector(DetectionApi):
"""
ZMQ-based detector plugin using a REQ/REP socket over an IPC endpoint.
Protocol:
- Request is sent as a multipart message:
[ header_json_bytes, tensor_bytes ]
where header is a JSON object containing:
{
"shape": List[int],
"dtype": str, # numpy dtype string, e.g. "uint8", "float32"
}
tensor_bytes are the raw bytes of the numpy array in C-order.
- Response is expected to be either:
a) Multipart [ header_json_bytes, tensor_bytes ] with header specifying
shape [20,6] and dtype "float32"; or
b) Single frame tensor_bytes of length 20*6*4 bytes (float32).
On any error or timeout, this detector returns a zero array of shape (20, 6).
"""
type_key = DETECTOR_KEY
def __init__(self, detector_config: ZmqDetectorConfig):
super().__init__(detector_config)
self._context = zmq.Context()
self._endpoint = detector_config.endpoint
self._request_timeout_ms = detector_config.request_timeout_ms
self._linger_ms = detector_config.linger_ms
self._socket = None
self._create_socket()
# Preallocate zero result for error paths
self._zero_result = np.zeros((20, 6), np.float32)
def _create_socket(self) -> None:
if self._socket is not None:
try:
self._socket.close(linger=self._linger_ms)
except Exception:
pass
self._socket = self._context.socket(zmq.REQ)
# Apply timeouts and linger so calls don't block indefinitely
self._socket.setsockopt(zmq.RCVTIMEO, self._request_timeout_ms)
self._socket.setsockopt(zmq.SNDTIMEO, self._request_timeout_ms)
self._socket.setsockopt(zmq.LINGER, self._linger_ms)
logger.debug(f"ZMQ detector connecting to {self._endpoint}")
self._socket.connect(self._endpoint)
def _build_header(self, tensor_input: np.ndarray) -> bytes:
header: dict[str, Any] = {
"shape": list(tensor_input.shape),
"dtype": str(tensor_input.dtype.name),
}
return json.dumps(header).encode("utf-8")
def _decode_response(self, frames: List[bytes]) -> np.ndarray:
try:
if len(frames) == 1:
# Single-frame raw float32 (20x6)
buf = frames[0]
if len(buf) != 20 * 6 * 4:
logger.warning(
f"ZMQ detector received unexpected payload size: {len(buf)}"
)
return self._zero_result
return np.frombuffer(buf, dtype=np.float32).reshape((20, 6))
if len(frames) >= 2:
header = json.loads(frames[0].decode("utf-8"))
shape = tuple(header.get("shape", []))
dtype = np.dtype(header.get("dtype", "float32"))
return np.frombuffer(frames[1], dtype=dtype).reshape(shape)
logger.warning("ZMQ detector received empty reply")
return self._zero_result
except Exception as exc: # noqa: BLE001
logger.error(f"ZMQ detector failed to decode response: {exc}")
return self._zero_result
def detect_raw(self, tensor_input: np.ndarray) -> np.ndarray:
try:
header_bytes = self._build_header(tensor_input)
payload_bytes = memoryview(tensor_input.tobytes(order="C"))
# Send request
self._socket.send_multipart([header_bytes, payload_bytes])
# Receive reply
reply_frames = self._socket.recv_multipart()
detections = self._decode_response(reply_frames)
# Ensure output shape and dtype are exactly as expected
return detections
except zmq.Again:
# Timeout
logger.debug("ZMQ detector request timed out; resetting socket")
try:
self._create_socket()
except Exception:
pass
return self._zero_result
except zmq.ZMQError as exc:
logger.error(f"ZMQ detector ZMQError: {exc}; resetting socket")
try:
self._create_socket()
except Exception:
pass
return self._zero_result
except Exception as exc: # noqa: BLE001
logger.error(f"ZMQ detector unexpected error: {exc}")
return self._zero_result
def __del__(self) -> None: # pragma: no cover - best-effort cleanup
try:
if self._socket is not None:
self._socket.close(linger=self.detector_config.linger_ms)
except Exception:
pass

View File

@ -112,9 +112,8 @@ class Embeddings:
self.embedding = JinaV2Embedding( self.embedding = JinaV2Embedding(
model_size=self.config.semantic_search.model_size, model_size=self.config.semantic_search.model_size,
requestor=self.requestor, requestor=self.requestor,
device="GPU" device=config.semantic_search.device
if self.config.semantic_search.model_size == "large" or ("GPU" if config.semantic_search.model_size == "large" else "CPU"),
else "CPU",
) )
self.text_embedding = lambda input_data: self.embedding( self.text_embedding = lambda input_data: self.embedding(
input_data, embedding_type="text" input_data, embedding_type="text"
@ -131,7 +130,8 @@ class Embeddings:
self.vision_embedding = JinaV1ImageEmbedding( self.vision_embedding = JinaV1ImageEmbedding(
model_size=config.semantic_search.model_size, model_size=config.semantic_search.model_size,
requestor=self.requestor, requestor=self.requestor,
device="GPU" if config.semantic_search.model_size == "large" else "CPU", device=config.semantic_search.device
or ("GPU" if config.semantic_search.model_size == "large" else "CPU"),
) )
def update_stats(self) -> None: def update_stats(self) -> None:

View File

@ -9,6 +9,7 @@ from frigate.const import MODEL_CACHE_DIR
from frigate.log import redirect_output_to_logger from frigate.log import redirect_output_to_logger
from frigate.util.downloader import ModelDownloader from frigate.util.downloader import ModelDownloader
from ...config import FaceRecognitionConfig
from .base_embedding import BaseEmbedding from .base_embedding import BaseEmbedding
from .runner import ONNXModelRunner from .runner import ONNXModelRunner
@ -111,7 +112,7 @@ class FaceNetEmbedding(BaseEmbedding):
class ArcfaceEmbedding(BaseEmbedding): class ArcfaceEmbedding(BaseEmbedding):
def __init__(self): def __init__(self, config: FaceRecognitionConfig):
super().__init__( super().__init__(
model_name="facedet", model_name="facedet",
model_file="arcface.onnx", model_file="arcface.onnx",
@ -119,6 +120,7 @@ class ArcfaceEmbedding(BaseEmbedding):
"arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx", "arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
}, },
) )
self.config = config
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None self.tokenizer = None
self.feature_extractor = None self.feature_extractor = None
@ -148,7 +150,7 @@ class ArcfaceEmbedding(BaseEmbedding):
self.runner = ONNXModelRunner( self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file), os.path.join(self.download_path, self.model_file),
"GPU", device=self.config.device or "GPU",
) )
def _preprocess_inputs(self, raw_inputs): def _preprocess_inputs(self, raw_inputs):

View File

@ -128,7 +128,6 @@ class JinaV1TextEmbedding(BaseEmbedding):
self.runner = ONNXModelRunner( self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file), os.path.join(self.download_path, self.model_file),
self.device, self.device,
self.model_size,
) )
def _preprocess_inputs(self, raw_inputs): def _preprocess_inputs(self, raw_inputs):
@ -207,7 +206,6 @@ class JinaV1ImageEmbedding(BaseEmbedding):
self.runner = ONNXModelRunner( self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file), os.path.join(self.download_path, self.model_file),
self.device, self.device,
self.model_size,
) )
def _preprocess_inputs(self, raw_inputs): def _preprocess_inputs(self, raw_inputs):

View File

@ -128,7 +128,6 @@ class JinaV2Embedding(BaseEmbedding):
self.runner = ONNXModelRunner( self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file), os.path.join(self.download_path, self.model_file),
self.device, self.device,
self.model_size,
) )
def _preprocess_image(self, image_data: bytes | Image.Image) -> np.ndarray: def _preprocess_image(self, image_data: bytes | Image.Image) -> np.ndarray:

View File

@ -4,10 +4,12 @@ import logging
import os.path import os.path
from typing import Any from typing import Any
import numpy as np
import onnxruntime as ort import onnxruntime as ort
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
from frigate.util.model import get_ort_providers from frigate.util.model import get_ort_providers
from frigate.util.rknn_converter import auto_convert_model, is_rknn_compatible
try: try:
import openvino as ov import openvino as ov
@ -25,7 +27,33 @@ class ONNXModelRunner:
self.model_path = model_path self.model_path = model_path
self.ort: ort.InferenceSession = None self.ort: ort.InferenceSession = None
self.ov: ov.Core = None self.ov: ov.Core = None
providers, options = get_ort_providers(device == "CPU", device, requires_fp16) self.rknn = None
self.type = "ort"
try:
if device != "CPU" and is_rknn_compatible(model_path):
# Try to auto-convert to RKNN format
rknn_path = auto_convert_model(model_path)
if rknn_path:
try:
self.rknn = RKNNModelRunner(rknn_path, device)
self.type = "rknn"
logger.info(f"Using RKNN model: {rknn_path}")
return
except Exception as e:
logger.debug(
f"Failed to load RKNN model, falling back to ONNX: {e}"
)
self.rknn = None
except ImportError:
pass
# Fall back to standard ONNX providers
providers, options = get_ort_providers(
device == "CPU",
device,
requires_fp16,
)
self.interpreter = None self.interpreter = None
if "OpenVINOExecutionProvider" in providers: if "OpenVINOExecutionProvider" in providers:
@ -55,7 +83,9 @@ class ONNXModelRunner:
) )
def get_input_names(self) -> list[str]: def get_input_names(self) -> list[str]:
if self.type == "ov": if self.type == "rknn":
return self.rknn.get_input_names()
elif self.type == "ov":
input_names = [] input_names = []
for input in self.interpreter.inputs: for input in self.interpreter.inputs:
@ -67,7 +97,9 @@ class ONNXModelRunner:
def get_input_width(self): def get_input_width(self):
"""Get the input width of the model regardless of backend.""" """Get the input width of the model regardless of backend."""
if self.type == "ort": if self.type == "rknn":
return self.rknn.get_input_width()
elif self.type == "ort":
return self.ort.get_inputs()[0].shape[3] return self.ort.get_inputs()[0].shape[3]
elif self.type == "ov": elif self.type == "ov":
input_info = self.interpreter.inputs input_info = self.interpreter.inputs
@ -90,8 +122,10 @@ class ONNXModelRunner:
return -1 return -1
return -1 return -1
def run(self, input: dict[str, Any]) -> Any: def run(self, input: dict[str, Any]) -> Any | None:
if self.type == "ov": if self.type == "rknn":
return self.rknn.run(input)
elif self.type == "ov":
infer_request = self.interpreter.create_infer_request() infer_request = self.interpreter.create_infer_request()
try: try:
@ -107,3 +141,103 @@ class ONNXModelRunner:
return outputs return outputs
elif self.type == "ort": elif self.type == "ort":
return self.ort.run(None, input) return self.ort.run(None, input)
class RKNNModelRunner:
"""Run RKNN models for embeddings."""
def __init__(self, model_path: str, device: str = "AUTO", model_type: str = None):
self.model_path = model_path
self.device = device
self.model_type = model_type
self.rknn = None
self._load_model()
def _load_model(self):
"""Load the RKNN model."""
try:
from rknnlite.api import RKNNLite
self.rknn = RKNNLite(verbose=False)
if self.rknn.load_rknn(self.model_path) != 0:
logger.error(f"Failed to load RKNN model: {self.model_path}")
raise RuntimeError("Failed to load RKNN model")
if self.rknn.init_runtime() != 0:
logger.error("Failed to initialize RKNN runtime")
raise RuntimeError("Failed to initialize RKNN runtime")
logger.info(f"Successfully loaded RKNN model: {self.model_path}")
except ImportError:
logger.error("RKNN Lite not available")
raise ImportError("RKNN Lite not available")
except Exception as e:
logger.error(f"Error loading RKNN model: {e}")
raise
def get_input_names(self) -> list[str]:
"""Get input names for the model."""
# For CLIP models, we need to determine the model type from the path
model_name = os.path.basename(self.model_path).lower()
if "vision" in model_name:
return ["pixel_values"]
elif "arcface" in model_name:
return ["data"]
else:
# Default fallback - try to infer from model type
if self.model_type and "jina-clip" in self.model_type:
if "vision" in self.model_type:
return ["pixel_values"]
# Generic fallback
return ["input"]
def get_input_width(self) -> int:
"""Get the input width of the model."""
# For CLIP vision models, this is typically 224
model_name = os.path.basename(self.model_path).lower()
if "vision" in model_name:
return 224 # CLIP V1 uses 224x224
elif "arcface" in model_name:
return 112
return -1
def run(self, inputs: dict[str, Any]) -> Any:
"""Run inference with the RKNN model."""
if not self.rknn:
raise RuntimeError("RKNN model not loaded")
try:
input_names = self.get_input_names()
rknn_inputs = []
for name in input_names:
if name in inputs:
if name == "pixel_values":
# RKNN expects NHWC format, but ONNX typically provides NCHW
# Transpose from [batch, channels, height, width] to [batch, height, width, channels]
pixel_data = inputs[name]
if len(pixel_data.shape) == 4 and pixel_data.shape[1] == 3:
# Transpose from NCHW to NHWC
pixel_data = np.transpose(pixel_data, (0, 2, 3, 1))
rknn_inputs.append(pixel_data)
else:
rknn_inputs.append(inputs[name])
outputs = self.rknn.inference(inputs=rknn_inputs)
return outputs
except Exception as e:
logger.error(f"Error during RKNN inference: {e}")
raise
def __del__(self):
"""Cleanup when the runner is destroyed."""
if self.rknn:
try:
self.rknn.release()
except Exception:
pass

View File

@ -2,21 +2,15 @@
import datetime import datetime
import logging import logging
import random
import string
import threading import threading
import time import time
from multiprocessing.managers import DictProxy from multiprocessing.managers import DictProxy
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from typing import Any, Tuple from typing import Tuple
import numpy as np import numpy as np
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig
from frigate.config.camera.updater import ( from frigate.config.camera.updater import (
@ -29,7 +23,9 @@ from frigate.const import (
AUDIO_MAX_BIT_RANGE, AUDIO_MAX_BIT_RANGE,
AUDIO_MIN_CONFIDENCE, AUDIO_MIN_CONFIDENCE,
AUDIO_SAMPLE_RATE, AUDIO_SAMPLE_RATE,
EXPIRE_AUDIO_ACTIVITY,
PROCESS_PRIORITY_HIGH, PROCESS_PRIORITY_HIGH,
UPDATE_AUDIO_ACTIVITY,
) )
from frigate.data_processing.common.audio_transcription.model import ( from frigate.data_processing.common.audio_transcription.model import (
AudioTranscriptionModelRunner, AudioTranscriptionModelRunner,
@ -159,7 +155,6 @@ class AudioEventMaintainer(threading.Thread):
self.config = config self.config = config
self.camera_config = camera self.camera_config = camera
self.camera_metrics = camera_metrics self.camera_metrics = camera_metrics
self.detections: dict[dict[str, Any]] = {}
self.stop_event = stop_event self.stop_event = stop_event
self.detector = AudioTfl(stop_event, self.camera_config.audio.num_threads) self.detector = AudioTfl(stop_event, self.camera_config.audio.num_threads)
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),) self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
@ -184,7 +179,6 @@ class AudioEventMaintainer(threading.Thread):
], ],
) )
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio.value) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio.value)
self.event_metadata_publisher = EventMetadataPublisher()
if self.camera_config.audio_transcription.enabled_in_config: if self.camera_config.audio_transcription.enabled_in_config:
# init the transcription processor for this camera # init the transcription processor for this camera
@ -216,12 +210,13 @@ class AudioEventMaintainer(threading.Thread):
self.camera_metrics[self.camera_config.name].audio_rms.value = rms self.camera_metrics[self.camera_config.name].audio_rms.value = rms
self.camera_metrics[self.camera_config.name].audio_dBFS.value = dBFS self.camera_metrics[self.camera_config.name].audio_dBFS.value = dBFS
audio_detections: list[Tuple[str, float]] = []
# only run audio detection when volume is above min_volume # only run audio detection when volume is above min_volume
if rms >= self.camera_config.audio.min_volume: if rms >= self.camera_config.audio.min_volume:
# create waveform relative to max range and look for detections # create waveform relative to max range and look for detections
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32) waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
model_detections = self.detector.detect(waveform) model_detections = self.detector.detect(waveform)
audio_detections = []
for label, score, _ in model_detections: for label, score, _ in model_detections:
self.logger.debug( self.logger.debug(
@ -234,8 +229,7 @@ class AudioEventMaintainer(threading.Thread):
if score > dict( if score > dict(
(self.camera_config.audio.filters or {}).get(label, {}) (self.camera_config.audio.filters or {}).get(label, {})
).get("threshold", 0.8): ).get("threshold", 0.8):
self.handle_detection(label, score) audio_detections.append((label, score))
audio_detections.append(label)
# send audio detection data # send audio detection data
self.detection_publisher.publish( self.detection_publisher.publish(
@ -243,10 +237,16 @@ class AudioEventMaintainer(threading.Thread):
self.camera_config.name, self.camera_config.name,
datetime.datetime.now().timestamp(), datetime.datetime.now().timestamp(),
dBFS, dBFS,
audio_detections, [label for label, _ in audio_detections],
) )
) )
# send audio activity update
self.requestor.send_data(
UPDATE_AUDIO_ACTIVITY,
{self.camera_config.name: {"detections": audio_detections}},
)
# run audio transcription # run audio transcription
if self.transcription_processor is not None: if self.transcription_processor is not None:
if self.camera_config.audio_transcription.live_enabled: if self.camera_config.audio_transcription.live_enabled:
@ -261,8 +261,6 @@ class AudioEventMaintainer(threading.Thread):
else: else:
self.transcription_processor.check_unload_model() self.transcription_processor.check_unload_model()
self.expire_detections()
def calculate_audio_levels(self, audio_as_float: np.float32) -> Tuple[float, float]: def calculate_audio_levels(self, audio_as_float: np.float32) -> Tuple[float, float]:
# Calculate RMS (Root-Mean-Square) which represents the average signal amplitude # Calculate RMS (Root-Mean-Square) which represents the average signal amplitude
# Note: np.float32 isn't serializable, we must use np.float64 to publish the message # Note: np.float32 isn't serializable, we must use np.float64 to publish the message
@ -279,75 +277,6 @@ class AudioEventMaintainer(threading.Thread):
return float(rms), float(dBFS) return float(rms), float(dBFS)
def handle_detection(self, label: str, score: float) -> None:
if self.detections.get(label):
self.detections[label]["last_detection"] = (
datetime.datetime.now().timestamp()
)
else:
now = datetime.datetime.now().timestamp()
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
event_id = f"{now}-{rand_id}"
self.requestor.send_data(f"{self.camera_config.name}/audio/{label}", "ON")
self.event_metadata_publisher.publish(
(
now,
self.camera_config.name,
label,
event_id,
True,
score,
None,
None,
"audio",
{},
),
EventMetadataTypeEnum.manual_event_create.value,
)
self.detections[label] = {
"id": event_id,
"label": label,
"last_detection": now,
}
def expire_detections(self) -> None:
now = datetime.datetime.now().timestamp()
for detection in self.detections.values():
if not detection:
continue
if (
now - detection.get("last_detection", now)
> self.camera_config.audio.max_not_heard
):
self.requestor.send_data(
f"{self.camera_config.name}/audio/{detection['label']}", "OFF"
)
self.event_metadata_publisher.publish(
(detection["id"], detection["last_detection"]),
EventMetadataTypeEnum.manual_event_end.value,
)
self.detections[detection["label"]] = None
def expire_all_detections(self) -> None:
"""Immediately end all current detections"""
now = datetime.datetime.now().timestamp()
for label, detection in list(self.detections.items()):
if detection:
self.requestor.send_data(
f"{self.camera_config.name}/audio/{label}", "OFF"
)
self.event_metadata_publisher.publish(
(detection["id"], now),
EventMetadataTypeEnum.manual_event_end.value,
)
self.detections[label] = None
def start_or_restart_ffmpeg(self) -> None: def start_or_restart_ffmpeg(self) -> None:
self.audio_listener = start_or_restart_ffmpeg( self.audio_listener = start_or_restart_ffmpeg(
self.ffmpeg_cmd, self.ffmpeg_cmd,
@ -356,6 +285,7 @@ class AudioEventMaintainer(threading.Thread):
self.chunk_size, self.chunk_size,
self.audio_listener, self.audio_listener,
) )
self.requestor.send_data(f"{self.camera_config.name}/status/audio", "online")
def read_audio(self) -> None: def read_audio(self) -> None:
def log_and_restart() -> None: def log_and_restart() -> None:
@ -371,6 +301,9 @@ class AudioEventMaintainer(threading.Thread):
if not chunk: if not chunk:
if self.audio_listener.poll() is not None: if self.audio_listener.poll() is not None:
self.requestor.send_data(
f"{self.camera_config.name}/status/audio", "offline"
)
self.logger.error("ffmpeg process is not running, restarting...") self.logger.error("ffmpeg process is not running, restarting...")
log_and_restart() log_and_restart()
return return
@ -396,10 +329,15 @@ class AudioEventMaintainer(threading.Thread):
) )
self.start_or_restart_ffmpeg() self.start_or_restart_ffmpeg()
else: else:
self.requestor.send_data(
f"{self.camera_config.name}/status/audio", "disabled"
)
self.logger.debug( self.logger.debug(
f"Disabling audio detections for {self.camera_config.name}, ending events" f"Disabling audio detections for {self.camera_config.name}, ending events"
) )
self.expire_all_detections() self.requestor.send_data(
EXPIRE_AUDIO_ACTIVITY, self.camera_config.name
)
stop_ffmpeg(self.audio_listener, self.logger) stop_ffmpeg(self.audio_listener, self.logger)
self.audio_listener = None self.audio_listener = None
self.was_enabled = enabled self.was_enabled = enabled

View File

@ -7,6 +7,7 @@ from typing import Any
from frigate.const import ( from frigate.const import (
FFMPEG_HVC1_ARGS, FFMPEG_HVC1_ARGS,
FFMPEG_HWACCEL_AMF,
FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_NVIDIA,
FFMPEG_HWACCEL_RKMPP, FFMPEG_HWACCEL_RKMPP,
FFMPEG_HWACCEL_VAAPI, FFMPEG_HWACCEL_VAAPI,
@ -74,6 +75,7 @@ PRESETS_HW_ACCEL_DECODE = {
f"{FFMPEG_HWACCEL_RKMPP}-no-dump_extra": "-hwaccel rkmpp -hwaccel_output_format drm_prime", f"{FFMPEG_HWACCEL_RKMPP}-no-dump_extra": "-hwaccel rkmpp -hwaccel_output_format drm_prime",
# experimental presets # experimental presets
FFMPEG_HWACCEL_VULKAN: "-hwaccel vulkan -init_hw_device vulkan=gpu:0 -filter_hw_device gpu -hwaccel_output_format vulkan", FFMPEG_HWACCEL_VULKAN: "-hwaccel vulkan -init_hw_device vulkan=gpu:0 -filter_hw_device gpu -hwaccel_output_format vulkan",
FFMPEG_HWACCEL_AMF: "-hwaccel amf -init_hw_device amf=gpu:0 -filter_hw_device gpu -hwaccel_output_format amf",
} }
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[ PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA FFMPEG_HWACCEL_NVIDIA
@ -108,6 +110,7 @@ PRESETS_HW_ACCEL_SCALE = {
"default": "-r {0} -vf fps={0},scale={1}:{2}", "default": "-r {0} -vf fps={0},scale={1}:{2}",
# experimental presets # experimental presets
FFMPEG_HWACCEL_VULKAN: "-r {0} -vf fps={0},hwupload,scale_vulkan=w={1}:h={2},hwdownload", FFMPEG_HWACCEL_VULKAN: "-r {0} -vf fps={0},hwupload,scale_vulkan=w={1}:h={2},hwdownload",
FFMPEG_HWACCEL_AMF: "-r {0} -vf fps={0},hwupload,scale_amf=w={1}:h={2},hwdownload",
} }
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[ PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[
FFMPEG_HWACCEL_NVIDIA FFMPEG_HWACCEL_NVIDIA
@ -133,6 +136,7 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}", "preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}",
FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}", FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v main {2}", "preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v main {2}",
FFMPEG_HWACCEL_AMF: "{0} -hide_banner {1} -c:v h264_amf -g 50 -profile:v high {2}",
"default": "{0} -hide_banner {1} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {2}", "default": "{0} -hide_banner {1} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {2}",
} }
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h264"] = ( PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h264"] = (
@ -161,6 +165,7 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-jetson-h265": "{0} -hide_banner {1} -c:v hevc_nvmpi -profile main {2}", "preset-jetson-h265": "{0} -hide_banner {1} -c:v hevc_nvmpi -profile main {2}",
FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}", FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v main {2}", "preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v main {2}",
FFMPEG_HWACCEL_AMF: "{0} -hide_banner {1} -c:v h264_amf -profile:v high {2}",
"default": "{0} -hide_banner {1} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {2}", "default": "{0} -hide_banner {1} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {2}",
} }
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE["preset-nvidia-h264"] = ( PRESETS_HW_ACCEL_ENCODE_TIMELAPSE["preset-nvidia-h264"] = (

View File

@ -115,7 +115,7 @@ Sequence details:
response = self._send(context_prompt, thumbnails) response = self._send(context_prompt, thumbnails)
if debug_save: if debug_save and response:
with open( with open(
os.path.join( os.path.join(
CLIPS_DIR, "genai-requests", review_data["id"], "response.txt" CLIPS_DIR, "genai-requests", review_data["id"], "response.txt"

View File

@ -53,6 +53,9 @@ ignore_errors = false
[mypy-frigate.stats] [mypy-frigate.stats]
ignore_errors = false ignore_errors = false
[mypy-frigate.track.*]
ignore_errors = false
[mypy-frigate.types] [mypy-frigate.types]
ignore_errors = false ignore_errors = false

View File

@ -1,7 +1,10 @@
import datetime import datetime
import logging import logging
import queue import queue
import threading
import time
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections import deque
from multiprocessing import Queue, Value from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
@ -34,7 +37,7 @@ class ObjectDetector(ABC):
pass pass
class LocalObjectDetector(ObjectDetector): class BaseLocalDetector(ObjectDetector):
def __init__( def __init__(
self, self,
detector_config: BaseDetectorConfig = None, detector_config: BaseDetectorConfig = None,
@ -56,6 +59,18 @@ class LocalObjectDetector(ObjectDetector):
self.detect_api = create_detector(detector_config) self.detect_api = create_detector(detector_config)
def _transform_input(self, tensor_input: np.ndarray) -> np.ndarray:
if self.input_transform:
tensor_input = np.transpose(tensor_input, self.input_transform)
if self.dtype == InputDTypeEnum.float:
tensor_input = tensor_input.astype(np.float32)
tensor_input /= 255
elif self.dtype == InputDTypeEnum.float_denorm:
tensor_input = tensor_input.astype(np.float32)
return tensor_input
def detect(self, tensor_input: np.ndarray, threshold=0.4): def detect(self, tensor_input: np.ndarray, threshold=0.4):
detections = [] detections = []
@ -73,19 +88,22 @@ class LocalObjectDetector(ObjectDetector):
self.fps.update() self.fps.update()
return detections return detections
class LocalObjectDetector(BaseLocalDetector):
def detect_raw(self, tensor_input: np.ndarray): def detect_raw(self, tensor_input: np.ndarray):
if self.input_transform: tensor_input = self._transform_input(tensor_input)
tensor_input = np.transpose(tensor_input, self.input_transform)
if self.dtype == InputDTypeEnum.float:
tensor_input = tensor_input.astype(np.float32)
tensor_input /= 255
elif self.dtype == InputDTypeEnum.float_denorm:
tensor_input = tensor_input.astype(np.float32)
return self.detect_api.detect_raw(tensor_input=tensor_input) return self.detect_api.detect_raw(tensor_input=tensor_input)
class AsyncLocalObjectDetector(BaseLocalDetector):
def async_send_input(self, tensor_input: np.ndarray, connection_id: str):
tensor_input = self._transform_input(tensor_input)
return self.detect_api.send_input(connection_id, tensor_input)
def async_receive_output(self):
return self.detect_api.receive_output()
class DetectorRunner(FrigateProcess): class DetectorRunner(FrigateProcess):
def __init__( def __init__(
self, self,
@ -160,6 +178,110 @@ class DetectorRunner(FrigateProcess):
logger.info("Exited detection process...") logger.info("Exited detection process...")
class AsyncDetectorRunner(FrigateProcess):
def __init__(
self,
name,
detection_queue: Queue,
cameras: list[str],
avg_speed: Value,
start_time: Value,
config: FrigateConfig,
detector_config: BaseDetectorConfig,
stop_event: MpEvent,
) -> None:
super().__init__(stop_event, PROCESS_PRIORITY_HIGH, name=name, daemon=True)
self.detection_queue = detection_queue
self.cameras = cameras
self.avg_speed = avg_speed
self.start_time = start_time
self.config = config
self.detector_config = detector_config
self.outputs: dict = {}
self._frame_manager: SharedMemoryFrameManager | None = None
self._publisher: ObjectDetectorPublisher | None = None
self._detector: AsyncLocalObjectDetector | None = None
self.send_times = deque()
def create_output_shm(self, name: str):
out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False)
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
self.outputs[name] = {"shm": out_shm, "np": out_np}
def _detect_worker(self) -> None:
logger.info("Starting Detect Worker Thread")
while not self.stop_event.is_set():
try:
connection_id = self.detection_queue.get(timeout=1)
except queue.Empty:
continue
input_frame = self._frame_manager.get(
connection_id,
(
1,
self.detector_config.model.height,
self.detector_config.model.width,
3,
),
)
if input_frame is None:
logger.warning(f"Failed to get frame {connection_id} from SHM")
continue
# mark start time and send to accelerator
self.send_times.append(time.perf_counter())
self._detector.async_send_input(input_frame, connection_id)
def _result_worker(self) -> None:
logger.info("Starting Result Worker Thread")
while not self.stop_event.is_set():
connection_id, detections = self._detector.async_receive_output()
if not self.send_times:
# guard; shouldn't happen if send/recv are balanced
continue
ts = self.send_times.popleft()
duration = time.perf_counter() - ts
# release input buffer
self._frame_manager.close(connection_id)
if connection_id not in self.outputs:
self.create_output_shm(connection_id)
# write results and publish
if detections is not None:
self.outputs[connection_id]["np"][:] = detections[:]
self._publisher.publish(connection_id)
# update timers
self.avg_speed.value = (self.avg_speed.value * 9 + duration) / 10
self.start_time.value = 0.0
def run(self) -> None:
self.pre_run_setup(self.config.logger)
self._frame_manager = SharedMemoryFrameManager()
self._publisher = ObjectDetectorPublisher()
self._detector = AsyncLocalObjectDetector(detector_config=self.detector_config)
for name in self.cameras:
self.create_output_shm(name)
t_detect = threading.Thread(target=self._detect_worker, daemon=True)
t_result = threading.Thread(target=self._result_worker, daemon=True)
t_detect.start()
t_result.start()
while not self.stop_event.is_set():
time.sleep(0.5)
self._publisher.stop()
logger.info("Exited async detection process...")
class ObjectDetectProcess: class ObjectDetectProcess:
def __init__( def __init__(
self, self,
@ -198,16 +320,30 @@ class ObjectDetectProcess:
self.detection_start.value = 0.0 self.detection_start.value = 0.0
if (self.detect_process is not None) and self.detect_process.is_alive(): if (self.detect_process is not None) and self.detect_process.is_alive():
self.stop() self.stop()
self.detect_process = DetectorRunner(
f"frigate.detector:{self.name}", # Async path for MemryX
self.detection_queue, if self.detector_config.type == "memryx":
self.cameras, self.detect_process = AsyncDetectorRunner(
self.avg_inference_speed, f"frigate.detector:{self.name}",
self.detection_start, self.detection_queue,
self.config, self.cameras,
self.detector_config, self.avg_inference_speed,
self.stop_event, self.detection_start,
) self.config,
self.detector_config,
self.stop_event,
)
else:
self.detect_process = DetectorRunner(
f"frigate.detector:{self.name}",
self.detection_queue,
self.cameras,
self.avg_inference_speed,
self.detection_start,
self.config,
self.detector_config,
self.stop_event,
)
self.detect_process.start() self.detect_process.start()

View File

@ -60,10 +60,10 @@ class PtzMotionEstimator:
def motion_estimator( def motion_estimator(
self, self,
detections: list[dict[str, Any]], detections: list[tuple[Any, Any, Any, Any, Any, Any]],
frame_name: str, frame_name: str,
frame_time: float, frame_time: float,
camera: str, camera: str | None,
): ):
# If we've just started up or returned to our preset, reset motion estimator for new tracking session # If we've just started up or returned to our preset, reset motion estimator for new tracking session
if self.ptz_metrics.reset.is_set(): if self.ptz_metrics.reset.is_set():

View File

@ -308,7 +308,12 @@ class RecordingCleanup(threading.Thread):
now - datetime.timedelta(days=config.record.continuous.days) now - datetime.timedelta(days=config.record.continuous.days)
).timestamp() ).timestamp()
motion_expire_date = ( motion_expire_date = (
now - datetime.timedelta(days=config.record.motion.days) now
- datetime.timedelta(
days=max(
config.record.motion.days, config.record.continuous.days
) # can't keep motion for less than continuous
)
).timestamp() ).timestamp()
# Get all the reviews to check against # Get all the reviews to check against

View File

@ -8,7 +8,6 @@ from json import JSONDecodeError
from multiprocessing.managers import DictProxy from multiprocessing.managers import DictProxy
from typing import Any, Optional from typing import Any, Optional
import psutil
import requests import requests
from requests.exceptions import RequestException from requests.exceptions import RequestException
@ -18,9 +17,11 @@ from frigate.data_processing.types import DataProcessorMetrics
from frigate.object_detection.base import ObjectDetectProcess from frigate.object_detection.base import ObjectDetectProcess
from frigate.types import StatsTrackingTypes from frigate.types import StatsTrackingTypes
from frigate.util.services import ( from frigate.util.services import (
calculate_shm_requirements,
get_amd_gpu_stats, get_amd_gpu_stats,
get_bandwidth_stats, get_bandwidth_stats,
get_cpu_stats, get_cpu_stats,
get_fs_type,
get_intel_gpu_stats, get_intel_gpu_stats,
get_jetson_stats, get_jetson_stats,
get_nvidia_gpu_stats, get_nvidia_gpu_stats,
@ -70,16 +71,6 @@ def stats_init(
return stats_tracking return stats_tracking
def get_fs_type(path: str) -> str:
bestMatch = ""
fsType = ""
for part in psutil.disk_partitions(all=True):
if path.startswith(part.mountpoint) and len(bestMatch) < len(part.mountpoint):
fsType = part.fstype
bestMatch = part.mountpoint
return fsType
def read_temperature(path: str) -> Optional[float]: def read_temperature(path: str) -> Optional[float]:
if os.path.isfile(path): if os.path.isfile(path):
with open(path) as f: with open(path) as f:
@ -389,7 +380,7 @@ def stats_snapshot(
"last_updated": int(time.time()), "last_updated": int(time.time()),
} }
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]: for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
try: try:
storage_stats = shutil.disk_usage(path) storage_stats = shutil.disk_usage(path)
except (FileNotFoundError, OSError): except (FileNotFoundError, OSError):
@ -403,6 +394,8 @@ def stats_snapshot(
"mount_type": get_fs_type(path), "mount_type": get_fs_type(path),
} }
stats["service"]["storage"]["/dev/shm"] = calculate_shm_requirements(config)
stats["processes"] = {} stats["processes"] = {}
for name, pid in stats_tracking["processes"].items(): for name, pid in stats_tracking["processes"].items():
stats["processes"][name] = { stats["processes"][name] = {

View File

@ -11,6 +11,9 @@ class ObjectTracker(ABC):
@abstractmethod @abstractmethod
def match_and_update( def match_and_update(
self, frame_name: str, frame_time: float, detections: list[dict[str, Any]] self,
frame_name: str,
frame_time: float,
detections: list[tuple[Any, Any, Any, Any, Any, Any]],
) -> None: ) -> None:
pass pass

View File

@ -1,25 +1,26 @@
import random import random
import string import string
from collections import defaultdict from collections import defaultdict
from typing import Any
import numpy as np import numpy as np
from scipy.spatial import distance as dist from scipy.spatial import distance as dist
from frigate.config import DetectConfig from frigate.config import DetectConfig
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.util import intersection_over_union from frigate.util.image import intersection_over_union
class CentroidTracker(ObjectTracker): class CentroidTracker(ObjectTracker):
def __init__(self, config: DetectConfig): def __init__(self, config: DetectConfig):
self.tracked_objects = {} self.tracked_objects: dict[str, dict[str, Any]] = {}
self.untracked_object_boxes = [] self.untracked_object_boxes: list[tuple[int, int, int, int]] = []
self.disappeared = {} self.disappeared: dict[str, Any] = {}
self.positions = {} self.positions: dict[str, Any] = {}
self.max_disappeared = config.max_disappeared self.max_disappeared = config.max_disappeared
self.detect_config = config self.detect_config = config
def register(self, index, obj): def register(self, obj: dict[str, Any]) -> None:
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
id = f"{obj['frame_time']}-{rand_id}" id = f"{obj['frame_time']}-{rand_id}"
obj["id"] = id obj["id"] = id
@ -39,13 +40,13 @@ class CentroidTracker(ObjectTracker):
"ymax": self.detect_config.height, "ymax": self.detect_config.height,
} }
def deregister(self, id): def deregister(self, id: str) -> None:
del self.tracked_objects[id] del self.tracked_objects[id]
del self.disappeared[id] del self.disappeared[id]
# tracks the current position of the object based on the last N bounding boxes # tracks the current position of the object based on the last N bounding boxes
# returns False if the object has moved outside its previous position # returns False if the object has moved outside its previous position
def update_position(self, id, box): def update_position(self, id: str, box: tuple[int, int, int, int]) -> bool:
position = self.positions[id] position = self.positions[id]
position_box = ( position_box = (
position["xmin"], position["xmin"],
@ -88,7 +89,7 @@ class CentroidTracker(ObjectTracker):
return True return True
def is_expired(self, id): def is_expired(self, id: str) -> bool:
obj = self.tracked_objects[id] obj = self.tracked_objects[id]
# get the max frames for this label type or the default # get the max frames for this label type or the default
max_frames = self.detect_config.stationary.max_frames.objects.get( max_frames = self.detect_config.stationary.max_frames.objects.get(
@ -108,7 +109,7 @@ class CentroidTracker(ObjectTracker):
return False return False
def update(self, id, new_obj): def update(self, id: str, new_obj: dict[str, Any]) -> None:
self.disappeared[id] = 0 self.disappeared[id] = 0
# update the motionless count if the object has not moved to a new position # update the motionless count if the object has not moved to a new position
if self.update_position(id, new_obj["box"]): if self.update_position(id, new_obj["box"]):
@ -129,25 +130,30 @@ class CentroidTracker(ObjectTracker):
self.tracked_objects[id].update(new_obj) self.tracked_objects[id].update(new_obj)
def update_frame_times(self, frame_name, frame_time): def update_frame_times(self, frame_name: str, frame_time: float) -> None:
for id in list(self.tracked_objects.keys()): for id in list(self.tracked_objects.keys()):
self.tracked_objects[id]["frame_time"] = frame_time self.tracked_objects[id]["frame_time"] = frame_time
self.tracked_objects[id]["motionless_count"] += 1 self.tracked_objects[id]["motionless_count"] += 1
if self.is_expired(id): if self.is_expired(id):
self.deregister(id) self.deregister(id)
def match_and_update(self, frame_time, detections): def match_and_update(
self,
frame_name: str,
frame_time: float,
detections: list[tuple[Any, Any, Any, Any, Any, Any]],
) -> None:
# group by name # group by name
detection_groups = defaultdict(lambda: []) detection_groups = defaultdict(lambda: [])
for obj in detections: for det in detections:
detection_groups[obj[0]].append( detection_groups[det[0]].append(
{ {
"label": obj[0], "label": det[0],
"score": obj[1], "score": det[1],
"box": obj[2], "box": det[2],
"area": obj[3], "area": det[3],
"ratio": obj[4], "ratio": det[4],
"region": obj[5], "region": det[5],
"frame_time": frame_time, "frame_time": frame_time,
} }
) )
@ -180,7 +186,7 @@ class CentroidTracker(ObjectTracker):
if len(current_objects) == 0: if len(current_objects) == 0:
for index, obj in enumerate(group): for index, obj in enumerate(group):
self.register(index, obj) self.register(obj)
continue continue
new_centroids = np.array([o["centroid"] for o in group]) new_centroids = np.array([o["centroid"] for o in group])
@ -238,4 +244,4 @@ class CentroidTracker(ObjectTracker):
# register each new input centroid as a trackable object # register each new input centroid as a trackable object
else: else:
for col in unusedCols: for col in unusedCols:
self.register(col, group[col]) self.register(group[col])

View File

@ -5,14 +5,10 @@ from typing import Any, Sequence
import cv2 import cv2
import numpy as np import numpy as np
from norfair import ( from norfair.drawing.draw_boxes import draw_boxes
Detection, from norfair.drawing.drawer import Drawable, Drawer
Drawable, from norfair.filter import OptimizedKalmanFilterFactory
OptimizedKalmanFilterFactory, from norfair.tracker import Detection, TrackedObject, Tracker
Tracker,
draw_boxes,
)
from norfair.drawing.drawer import Drawer
from rich import print from rich import print
from rich.console import Console from rich.console import Console
from rich.table import Table from rich.table import Table
@ -43,7 +39,7 @@ MAX_STATIONARY_HISTORY = 10
# - could be variable based on time since last_detection # - could be variable based on time since last_detection
# - include estimated velocity in the distance (car driving by of a parked car) # - include estimated velocity in the distance (car driving by of a parked car)
# - include some visual similarity factor in the distance for occlusions # - include some visual similarity factor in the distance for occlusions
def distance(detection: np.array, estimate: np.array) -> float: def distance(detection: np.ndarray, estimate: np.ndarray) -> float:
# ultimately, this should try and estimate distance in 3-dimensional space # ultimately, this should try and estimate distance in 3-dimensional space
# consider change in location, width, and height # consider change in location, width, and height
@ -73,14 +69,16 @@ def distance(detection: np.array, estimate: np.array) -> float:
change = np.append(distance, np.array([width_ratio, height_ratio])) change = np.append(distance, np.array([width_ratio, height_ratio]))
# calculate euclidean distance of the change vector # calculate euclidean distance of the change vector
return np.linalg.norm(change) return float(np.linalg.norm(change))
def frigate_distance(detection: Detection, tracked_object) -> float: def frigate_distance(detection: Detection, tracked_object: TrackedObject) -> float:
return distance(detection.points, tracked_object.estimate) return distance(detection.points, tracked_object.estimate)
def histogram_distance(matched_not_init_trackers, unmatched_trackers): def histogram_distance(
matched_not_init_trackers: TrackedObject, unmatched_trackers: TrackedObject
) -> float:
snd_embedding = unmatched_trackers.last_detection.embedding snd_embedding = unmatched_trackers.last_detection.embedding
if snd_embedding is None: if snd_embedding is None:
@ -110,17 +108,17 @@ class NorfairTracker(ObjectTracker):
ptz_metrics: PTZMetrics, ptz_metrics: PTZMetrics,
): ):
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.tracked_objects = {} self.tracked_objects: dict[str, dict[str, Any]] = {}
self.untracked_object_boxes: list[list[int]] = [] self.untracked_object_boxes: list[list[int]] = []
self.disappeared = {} self.disappeared: dict[str, int] = {}
self.positions = {} self.positions: dict[str, dict[str, Any]] = {}
self.stationary_box_history: dict[str, list[list[int, int, int, int]]] = {} self.stationary_box_history: dict[str, list[list[int]]] = {}
self.camera_config = config self.camera_config = config
self.detect_config = config.detect self.detect_config = config.detect
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
self.ptz_motion_estimator = {} self.ptz_motion_estimator: PtzMotionEstimator | None = None
self.camera_name = config.name self.camera_name = config.name
self.track_id_map = {} self.track_id_map: dict[str, str] = {}
# Define tracker configurations for static camera # Define tracker configurations for static camera
self.object_type_configs = { self.object_type_configs = {
@ -169,7 +167,7 @@ class NorfairTracker(ObjectTracker):
"distance_threshold": 3, "distance_threshold": 3,
} }
self.trackers = {} self.trackers: dict[str, dict[str, Tracker]] = {}
# Handle static trackers # Handle static trackers
for obj_type, tracker_config in self.object_type_configs.items(): for obj_type, tracker_config in self.object_type_configs.items():
if obj_type in self.camera_config.objects.track: if obj_type in self.camera_config.objects.track:
@ -195,19 +193,21 @@ class NorfairTracker(ObjectTracker):
self.default_tracker = { self.default_tracker = {
"static": Tracker( "static": Tracker(
distance_function=frigate_distance, distance_function=frigate_distance,
distance_threshold=self.default_tracker_config["distance_threshold"], distance_threshold=self.default_tracker_config[ # type: ignore[arg-type]
"distance_threshold"
],
initialization_delay=self.detect_config.min_initialized, initialization_delay=self.detect_config.min_initialized,
hit_counter_max=self.detect_config.max_disappeared, hit_counter_max=self.detect_config.max_disappeared, # type: ignore[arg-type]
filter_factory=self.default_tracker_config["filter_factory"], filter_factory=self.default_tracker_config["filter_factory"], # type: ignore[arg-type]
), ),
"ptz": Tracker( "ptz": Tracker(
distance_function=frigate_distance, distance_function=frigate_distance,
distance_threshold=self.default_ptz_tracker_config[ distance_threshold=self.default_ptz_tracker_config[
"distance_threshold" "distance_threshold"
], ], # type: ignore[arg-type]
initialization_delay=self.detect_config.min_initialized, initialization_delay=self.detect_config.min_initialized,
hit_counter_max=self.detect_config.max_disappeared, hit_counter_max=self.detect_config.max_disappeared, # type: ignore[arg-type]
filter_factory=self.default_ptz_tracker_config["filter_factory"], filter_factory=self.default_ptz_tracker_config["filter_factory"], # type: ignore[arg-type]
), ),
} }
@ -216,7 +216,7 @@ class NorfairTracker(ObjectTracker):
self.camera_config, self.ptz_metrics self.camera_config, self.ptz_metrics
) )
def _create_tracker(self, obj_type, tracker_config): def _create_tracker(self, obj_type: str, tracker_config: dict[str, Any]) -> Tracker:
"""Helper function to create a tracker with given configuration.""" """Helper function to create a tracker with given configuration."""
tracker_params = { tracker_params = {
"distance_function": tracker_config["distance_function"], "distance_function": tracker_config["distance_function"],
@ -258,7 +258,7 @@ class NorfairTracker(ObjectTracker):
return self.trackers[object_type][mode] return self.trackers[object_type][mode]
return self.default_tracker[mode] return self.default_tracker[mode]
def register(self, track_id, obj): def register(self, track_id: str, obj: dict[str, Any]) -> None:
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
id = f"{obj['frame_time']}-{rand_id}" id = f"{obj['frame_time']}-{rand_id}"
self.track_id_map[track_id] = id self.track_id_map[track_id] = id
@ -270,7 +270,7 @@ class NorfairTracker(ObjectTracker):
# Get the correct tracker for this object's label # Get the correct tracker for this object's label
tracker = self.get_tracker(obj["label"]) tracker = self.get_tracker(obj["label"])
obj_match = next( obj_match = next(
(o for o in tracker.tracked_objects if o.global_id == track_id), None (o for o in tracker.tracked_objects if str(o.global_id) == track_id), None
) )
# if we don't have a match, we have a new object # if we don't have a match, we have a new object
obj["score_history"] = ( obj["score_history"] = (
@ -297,7 +297,7 @@ class NorfairTracker(ObjectTracker):
} }
self.stationary_box_history[id] = boxes self.stationary_box_history[id] = boxes
def deregister(self, id, track_id): def deregister(self, id: str, track_id: str) -> None:
obj = self.tracked_objects[id] obj = self.tracked_objects[id]
del self.tracked_objects[id] del self.tracked_objects[id]
@ -314,14 +314,14 @@ class NorfairTracker(ObjectTracker):
tracker.tracked_objects = [ tracker.tracked_objects = [
o o
for o in tracker.tracked_objects for o in tracker.tracked_objects
if o.global_id != track_id and o.hit_counter < 0 if str(o.global_id) != track_id and o.hit_counter < 0
] ]
del self.track_id_map[track_id] del self.track_id_map[track_id]
# tracks the current position of the object based on the last N bounding boxes # tracks the current position of the object based on the last N bounding boxes
# returns False if the object has moved outside its previous position # returns False if the object has moved outside its previous position
def update_position(self, id: str, box: list[int, int, int, int], stationary: bool): def update_position(self, id: str, box: list[int], stationary: bool) -> bool:
xmin, ymin, xmax, ymax = box xmin, ymin, xmax, ymax = box
position = self.positions[id] position = self.positions[id]
self.stationary_box_history[id].append(box) self.stationary_box_history[id].append(box)
@ -396,7 +396,7 @@ class NorfairTracker(ObjectTracker):
return True return True
def is_expired(self, id): def is_expired(self, id: str) -> bool:
obj = self.tracked_objects[id] obj = self.tracked_objects[id]
# get the max frames for this label type or the default # get the max frames for this label type or the default
max_frames = self.detect_config.stationary.max_frames.objects.get( max_frames = self.detect_config.stationary.max_frames.objects.get(
@ -416,7 +416,7 @@ class NorfairTracker(ObjectTracker):
return False return False
def update(self, track_id, obj): def update(self, track_id: str, obj: dict[str, Any]) -> None:
id = self.track_id_map[track_id] id = self.track_id_map[track_id]
self.disappeared[id] = 0 self.disappeared[id] = 0
stationary = ( stationary = (
@ -443,7 +443,7 @@ class NorfairTracker(ObjectTracker):
self.tracked_objects[id].update(obj) self.tracked_objects[id].update(obj)
def update_frame_times(self, frame_name: str, frame_time: float): def update_frame_times(self, frame_name: str, frame_time: float) -> None:
# if the object was there in the last frame, assume it's still there # if the object was there in the last frame, assume it's still there
detections = [ detections = [
( (
@ -460,10 +460,13 @@ class NorfairTracker(ObjectTracker):
self.match_and_update(frame_name, frame_time, detections=detections) self.match_and_update(frame_name, frame_time, detections=detections)
def match_and_update( def match_and_update(
self, frame_name: str, frame_time: float, detections: list[dict[str, Any]] self,
): frame_name: str,
frame_time: float,
detections: list[tuple[Any, Any, Any, Any, Any, Any]],
) -> None:
# Group detections by object type # Group detections by object type
detections_by_type = {} detections_by_type: dict[str, list[Detection]] = {}
for obj in detections: for obj in detections:
label = obj[0] label = obj[0]
if label not in detections_by_type: if label not in detections_by_type:
@ -551,28 +554,28 @@ class NorfairTracker(ObjectTracker):
estimate = ( estimate = (
max(0, estimate[0]), max(0, estimate[0]),
max(0, estimate[1]), max(0, estimate[1]),
min(self.detect_config.width - 1, estimate[2]), min(self.detect_config.width - 1, estimate[2]), # type: ignore[operator]
min(self.detect_config.height - 1, estimate[3]), min(self.detect_config.height - 1, estimate[3]), # type: ignore[operator]
) )
obj = { new_obj = {
**t.last_detection.data, **t.last_detection.data,
"estimate": estimate, "estimate": estimate,
"estimate_velocity": t.estimate_velocity, "estimate_velocity": t.estimate_velocity,
} }
active_ids.append(t.global_id) active_ids.append(str(t.global_id))
if t.global_id not in self.track_id_map: if str(t.global_id) not in self.track_id_map:
self.register(t.global_id, obj) self.register(str(t.global_id), new_obj)
# if there wasn't a detection in this frame, increment disappeared # if there wasn't a detection in this frame, increment disappeared
elif t.last_detection.data["frame_time"] != frame_time: elif t.last_detection.data["frame_time"] != frame_time:
id = self.track_id_map[t.global_id] id = self.track_id_map[str(t.global_id)]
self.disappeared[id] += 1 self.disappeared[id] += 1
# sometimes the estimate gets way off # sometimes the estimate gets way off
# only update if the upper left corner is actually upper left # only update if the upper left corner is actually upper left
if estimate[0] < estimate[2] and estimate[1] < estimate[3]: if estimate[0] < estimate[2] and estimate[1] < estimate[3]:
self.tracked_objects[id]["estimate"] = obj["estimate"] self.tracked_objects[id]["estimate"] = new_obj["estimate"]
# else update it # else update it
else: else:
self.update(t.global_id, obj) self.update(str(t.global_id), new_obj)
# clear expired tracks # clear expired tracks
expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids] expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids]
@ -585,7 +588,7 @@ class NorfairTracker(ObjectTracker):
o[2] for o in detections if o[2] not in tracked_object_boxes o[2] for o in detections if o[2] not in tracked_object_boxes
] ]
def print_objects_as_table(self, tracked_objects: Sequence): def print_objects_as_table(self, tracked_objects: Sequence) -> None:
"""Used for helping in debugging""" """Used for helping in debugging"""
print() print()
console = Console() console = Console()
@ -605,13 +608,13 @@ class NorfairTracker(ObjectTracker):
) )
console.print(table) console.print(table)
def debug_draw(self, frame, frame_time): def debug_draw(self, frame: np.ndarray, frame_time: float) -> None:
# Collect all tracked objects from each tracker # Collect all tracked objects from each tracker
all_tracked_objects = [] all_tracked_objects: list[TrackedObject] = []
# print a table to the console with norfair tracked object info # print a table to the console with norfair tracked object info
if False: if False:
if len(self.trackers["license_plate"]["static"].tracked_objects) > 0: if len(self.trackers["license_plate"]["static"].tracked_objects) > 0: # type: ignore[unreachable]
self.print_objects_as_table( self.print_objects_as_table(
self.trackers["license_plate"]["static"].tracked_objects self.trackers["license_plate"]["static"].tracked_objects
) )
@ -638,9 +641,9 @@ class NorfairTracker(ObjectTracker):
# draw the estimated bounding box # draw the estimated bounding box
draw_boxes(frame, all_tracked_objects, color="green", draw_ids=True) draw_boxes(frame, all_tracked_objects, color="green", draw_ids=True)
# draw the detections that were detected in the current frame # draw the detections that were detected in the current frame
draw_boxes(frame, active_detections, color="blue", draw_ids=True) draw_boxes(frame, active_detections, color="blue", draw_ids=True) # type: ignore[arg-type]
# draw the detections that are missing in the current frame # draw the detections that are missing in the current frame
draw_boxes(frame, missing_detections, color="red", draw_ids=True) draw_boxes(frame, missing_detections, color="red", draw_ids=True) # type: ignore[arg-type]
# draw the distance calculation for the last detection # draw the distance calculation for the last detection
# estimate vs detection # estimate vs detection
@ -648,8 +651,8 @@ class NorfairTracker(ObjectTracker):
ld = obj.last_detection ld = obj.last_detection
# bottom right # bottom right
text_anchor = ( text_anchor = (
ld.points[1, 0], ld.points[1, 0], # type: ignore[index]
ld.points[1, 1], ld.points[1, 1], # type: ignore[index]
) )
frame = Drawer.text( frame = Drawer.text(
frame, frame,
@ -662,7 +665,7 @@ class NorfairTracker(ObjectTracker):
if False: if False:
# draw the current formatted time on the frame # draw the current formatted time on the frame
from datetime import datetime from datetime import datetime # type: ignore[unreachable]
formatted_time = datetime.fromtimestamp(frame_time).strftime( formatted_time = datetime.fromtimestamp(frame_time).strftime(
"%m/%d/%Y %I:%M:%S %p" "%m/%d/%Y %I:%M:%S %p"

View File

@ -6,6 +6,7 @@ import queue
import threading import threading
from collections import defaultdict from collections import defaultdict
from enum import Enum from enum import Enum
from multiprocessing import Queue as MpQueue
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from typing import Any from typing import Any
@ -39,6 +40,7 @@ from frigate.const import (
) )
from frigate.events.types import EventStateEnum, EventTypeEnum from frigate.events.types import EventStateEnum, EventTypeEnum
from frigate.models import Event, ReviewSegment, Timeline from frigate.models import Event, ReviewSegment, Timeline
from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.track.tracked_object import TrackedObject from frigate.track.tracked_object import TrackedObject
from frigate.util.image import SharedMemoryFrameManager from frigate.util.image import SharedMemoryFrameManager
@ -56,10 +58,10 @@ class TrackedObjectProcessor(threading.Thread):
self, self,
config: FrigateConfig, config: FrigateConfig,
dispatcher: Dispatcher, dispatcher: Dispatcher,
tracked_objects_queue, tracked_objects_queue: MpQueue,
ptz_autotracker_thread, ptz_autotracker_thread: PtzAutoTrackerThread,
stop_event, stop_event: MpEvent,
): ) -> None:
super().__init__(name="detected_frames_processor") super().__init__(name="detected_frames_processor")
self.config = config self.config = config
self.dispatcher = dispatcher self.dispatcher = dispatcher
@ -98,8 +100,12 @@ class TrackedObjectProcessor(threading.Thread):
# } # }
# } # }
# } # }
self.zone_data = defaultdict(lambda: defaultdict(dict)) self.zone_data: dict[str, dict[str, Any]] = defaultdict(
self.active_zone_data = defaultdict(lambda: defaultdict(dict)) lambda: defaultdict(dict)
)
self.active_zone_data: dict[str, dict[str, Any]] = defaultdict(
lambda: defaultdict(dict)
)
for camera in self.config.cameras.keys(): for camera in self.config.cameras.keys():
self.create_camera_state(camera) self.create_camera_state(camera)
@ -107,7 +113,7 @@ class TrackedObjectProcessor(threading.Thread):
def create_camera_state(self, camera: str) -> None: def create_camera_state(self, camera: str) -> None:
"""Creates a new camera state.""" """Creates a new camera state."""
def start(camera: str, obj: TrackedObject, frame_name: str): def start(camera: str, obj: TrackedObject, frame_name: str) -> None:
self.event_sender.publish( self.event_sender.publish(
( (
EventTypeEnum.tracked_object, EventTypeEnum.tracked_object,
@ -118,7 +124,7 @@ class TrackedObjectProcessor(threading.Thread):
) )
) )
def update(camera: str, obj: TrackedObject, frame_name: str): def update(camera: str, obj: TrackedObject, frame_name: str) -> None:
obj.has_snapshot = self.should_save_snapshot(camera, obj) obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj) obj.has_clip = self.should_retain_recording(camera, obj)
after = obj.to_dict() after = obj.to_dict()
@ -139,10 +145,10 @@ class TrackedObjectProcessor(threading.Thread):
) )
) )
def autotrack(camera: str, obj: TrackedObject, frame_name: str): def autotrack(camera: str, obj: TrackedObject, frame_name: str) -> None:
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj) self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
def end(camera: str, obj: TrackedObject, frame_name: str): def end(camera: str, obj: TrackedObject, frame_name: str) -> None:
# populate has_snapshot # populate has_snapshot
obj.has_snapshot = self.should_save_snapshot(camera, obj) obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj) obj.has_clip = self.should_retain_recording(camera, obj)
@ -211,7 +217,7 @@ class TrackedObjectProcessor(threading.Thread):
return False return False
def camera_activity(camera, activity): def camera_activity(camera: str, activity: dict[str, Any]) -> None:
last_activity = self.camera_activity.get(camera) last_activity = self.camera_activity.get(camera)
if not last_activity or activity != last_activity: if not last_activity or activity != last_activity:
@ -229,7 +235,7 @@ class TrackedObjectProcessor(threading.Thread):
camera_state.on("camera_activity", camera_activity) camera_state.on("camera_activity", camera_activity)
self.camera_states[camera] = camera_state self.camera_states[camera] = camera_state
def should_save_snapshot(self, camera, obj: TrackedObject): def should_save_snapshot(self, camera: str, obj: TrackedObject) -> bool:
if obj.false_positive: if obj.false_positive:
return False return False
@ -252,7 +258,7 @@ class TrackedObjectProcessor(threading.Thread):
return True return True
def should_retain_recording(self, camera: str, obj: TrackedObject): def should_retain_recording(self, camera: str, obj: TrackedObject) -> bool:
if obj.false_positive: if obj.false_positive:
return False return False
@ -272,7 +278,7 @@ class TrackedObjectProcessor(threading.Thread):
return True return True
def should_mqtt_snapshot(self, camera, obj: TrackedObject): def should_mqtt_snapshot(self, camera: str, obj: TrackedObject) -> bool:
# object never changed position # object never changed position
if obj.is_stationary(): if obj.is_stationary():
return False return False
@ -287,7 +293,9 @@ class TrackedObjectProcessor(threading.Thread):
return True return True
def update_mqtt_motion(self, camera, frame_time, motion_boxes): def update_mqtt_motion(
self, camera: str, frame_time: float, motion_boxes: list
) -> None:
# publish if motion is currently being detected # publish if motion is currently being detected
if motion_boxes: if motion_boxes:
# only send ON if motion isn't already active # only send ON if motion isn't already active
@ -313,11 +321,15 @@ class TrackedObjectProcessor(threading.Thread):
# reset the last_motion so redundant `off` commands aren't sent # reset the last_motion so redundant `off` commands aren't sent
self.last_motion_detected[camera] = 0 self.last_motion_detected[camera] = 0
def get_best(self, camera, label): def get_best(self, camera: str, label: str) -> dict[str, Any]:
# TODO: need a lock here # TODO: need a lock here
camera_state = self.camera_states[camera] camera_state = self.camera_states[camera]
if label in camera_state.best_objects: if label in camera_state.best_objects:
best_obj = camera_state.best_objects[label] best_obj = camera_state.best_objects[label]
if not best_obj.thumbnail_data:
return {}
best = best_obj.thumbnail_data.copy() best = best_obj.thumbnail_data.copy()
best["frame"] = camera_state.frame_cache.get( best["frame"] = camera_state.frame_cache.get(
best_obj.thumbnail_data["frame_time"] best_obj.thumbnail_data["frame_time"]
@ -340,7 +352,7 @@ class TrackedObjectProcessor(threading.Thread):
return self.camera_states[camera].get_current_frame(draw_options) return self.camera_states[camera].get_current_frame(draw_options)
def get_current_frame_time(self, camera) -> int: def get_current_frame_time(self, camera: str) -> float:
"""Returns the latest frame time for a given camera.""" """Returns the latest frame time for a given camera."""
return self.camera_states[camera].current_frame_time return self.camera_states[camera].current_frame_time
@ -348,7 +360,7 @@ class TrackedObjectProcessor(threading.Thread):
self, event_id: str, sub_label: str | None, score: float | None self, event_id: str, sub_label: str | None, score: float | None
) -> None: ) -> None:
"""Update sub label for given event id.""" """Update sub label for given event id."""
tracked_obj: TrackedObject = None tracked_obj: TrackedObject | None = None
for state in self.camera_states.values(): for state in self.camera_states.values():
tracked_obj = state.tracked_objects.get(event_id) tracked_obj = state.tracked_objects.get(event_id)
@ -357,7 +369,7 @@ class TrackedObjectProcessor(threading.Thread):
break break
try: try:
event: Event = Event.get(Event.id == event_id) event: Event | None = Event.get(Event.id == event_id)
except DoesNotExist: except DoesNotExist:
event = None event = None
@ -368,12 +380,12 @@ class TrackedObjectProcessor(threading.Thread):
tracked_obj.obj_data["sub_label"] = (sub_label, score) tracked_obj.obj_data["sub_label"] = (sub_label, score)
if event: if event:
event.sub_label = sub_label event.sub_label = sub_label # type: ignore[assignment]
data = event.data data = event.data
if sub_label is None: if sub_label is None:
data["sub_label_score"] = None data["sub_label_score"] = None # type: ignore[index]
elif score is not None: elif score is not None:
data["sub_label_score"] = score data["sub_label_score"] = score # type: ignore[index]
event.data = data event.data = data
event.save() event.save()
@ -402,7 +414,7 @@ class TrackedObjectProcessor(threading.Thread):
objects_list = [] objects_list = []
sub_labels = set() sub_labels = set()
events = Event.select(Event.id, Event.label, Event.sub_label).where( events = Event.select(Event.id, Event.label, Event.sub_label).where(
Event.id.in_(detection_ids) Event.id.in_(detection_ids) # type: ignore[call-arg, misc]
) )
for det_event in events: for det_event in events:
if det_event.sub_label: if det_event.sub_label:
@ -431,13 +443,11 @@ class TrackedObjectProcessor(threading.Thread):
f"Updated sub_label for event {event_id} in review segment {review_segment.id}" f"Updated sub_label for event {event_id} in review segment {review_segment.id}"
) )
except ReviewSegment.DoesNotExist: except DoesNotExist:
logger.debug( logger.debug(
f"No review segment found with event ID {event_id} when updating sub_label" f"No review segment found with event ID {event_id} when updating sub_label"
) )
return True
def set_object_attribute( def set_object_attribute(
self, self,
event_id: str, event_id: str,
@ -446,7 +456,7 @@ class TrackedObjectProcessor(threading.Thread):
score: float | None, score: float | None,
) -> None: ) -> None:
"""Update attribute for given event id.""" """Update attribute for given event id."""
tracked_obj: TrackedObject = None tracked_obj: TrackedObject | None = None
for state in self.camera_states.values(): for state in self.camera_states.values():
tracked_obj = state.tracked_objects.get(event_id) tracked_obj = state.tracked_objects.get(event_id)
@ -455,7 +465,7 @@ class TrackedObjectProcessor(threading.Thread):
break break
try: try:
event: Event = Event.get(Event.id == event_id) event: Event | None = Event.get(Event.id == event_id)
except DoesNotExist: except DoesNotExist:
event = None event = None
@ -470,16 +480,14 @@ class TrackedObjectProcessor(threading.Thread):
if event: if event:
data = event.data data = event.data
data[field_name] = field_value data[field_name] = field_value # type: ignore[index]
if field_value is None: if field_value is None:
data[f"{field_name}_score"] = None data[f"{field_name}_score"] = None # type: ignore[index]
elif score is not None: elif score is not None:
data[f"{field_name}_score"] = score data[f"{field_name}_score"] = score # type: ignore[index]
event.data = data event.data = data
event.save() event.save()
return True
def save_lpr_snapshot(self, payload: tuple) -> None: def save_lpr_snapshot(self, payload: tuple) -> None:
# save the snapshot image # save the snapshot image
(frame, event_id, camera) = payload (frame, event_id, camera) = payload
@ -638,7 +646,7 @@ class TrackedObjectProcessor(threading.Thread):
) )
self.ongoing_manual_events.pop(event_id) self.ongoing_manual_events.pop(event_id)
def force_end_all_events(self, camera: str, camera_state: CameraState): def force_end_all_events(self, camera: str, camera_state: CameraState) -> None:
"""Ends all active events on camera when disabling.""" """Ends all active events on camera when disabling."""
last_frame_name = camera_state.previous_frame_id last_frame_name = camera_state.previous_frame_id
for obj_id, obj in list(camera_state.tracked_objects.items()): for obj_id, obj in list(camera_state.tracked_objects.items()):
@ -656,7 +664,7 @@ class TrackedObjectProcessor(threading.Thread):
{"enabled": False, "motion": 0, "objects": []}, {"enabled": False, "motion": 0, "objects": []},
) )
def run(self): def run(self) -> None:
while not self.stop_event.is_set(): while not self.stop_event.is_set():
# check for config updates # check for config updates
updated_topics = self.camera_config_subscriber.check_for_updates() updated_topics = self.camera_config_subscriber.check_for_updates()
@ -698,11 +706,14 @@ class TrackedObjectProcessor(threading.Thread):
# check for sub label updates # check for sub label updates
while True: while True:
(raw_topic, payload) = self.sub_label_subscriber.check_for_update( update = self.sub_label_subscriber.check_for_update(timeout=0)
timeout=0
)
if not raw_topic: if not update:
break
(raw_topic, payload) = update
if not raw_topic or not payload:
break break
topic = str(raw_topic) topic = str(raw_topic)

View File

@ -5,18 +5,19 @@ import math
import os import os
from collections import defaultdict from collections import defaultdict
from statistics import median from statistics import median
from typing import Any, Optional from typing import Any, Optional, cast
import cv2 import cv2
import numpy as np import numpy as np
from frigate.config import ( from frigate.config import (
CameraConfig, CameraConfig,
ModelConfig, FilterConfig,
SnapshotsConfig, SnapshotsConfig,
UIConfig, UIConfig,
) )
from frigate.const import CLIPS_DIR, THUMB_DIR from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.detectors.detector_config import ModelConfig
from frigate.review.types import SeverityEnum from frigate.review.types import SeverityEnum
from frigate.util.builtin import sanitize_float from frigate.util.builtin import sanitize_float
from frigate.util.image import ( from frigate.util.image import (
@ -46,11 +47,11 @@ class TrackedObject:
model_config: ModelConfig, model_config: ModelConfig,
camera_config: CameraConfig, camera_config: CameraConfig,
ui_config: UIConfig, ui_config: UIConfig,
frame_cache, frame_cache: dict[float, dict[str, Any]],
obj_data: dict[str, Any], obj_data: dict[str, Any],
): ) -> None:
# set the score history then remove as it is not part of object state # set the score history then remove as it is not part of object state
self.score_history = obj_data["score_history"] self.score_history: list[float] = obj_data["score_history"]
del obj_data["score_history"] del obj_data["score_history"]
self.obj_data = obj_data self.obj_data = obj_data
@ -61,24 +62,24 @@ class TrackedObject:
self.frame_cache = frame_cache self.frame_cache = frame_cache
self.zone_presence: dict[str, int] = {} self.zone_presence: dict[str, int] = {}
self.zone_loitering: dict[str, int] = {} self.zone_loitering: dict[str, int] = {}
self.current_zones = [] self.current_zones: list[str] = []
self.entered_zones = [] self.entered_zones: list[str] = []
self.attributes = defaultdict(float) self.attributes: dict[str, float] = defaultdict(float)
self.false_positive = True self.false_positive = True
self.has_clip = False self.has_clip = False
self.has_snapshot = False self.has_snapshot = False
self.top_score = self.computed_score = 0.0 self.top_score = self.computed_score = 0.0
self.thumbnail_data = None self.thumbnail_data: dict[str, Any] | None = None
self.last_updated = 0 self.last_updated = 0
self.last_published = 0 self.last_published = 0
self.frame = None self.frame = None
self.active = True self.active = True
self.pending_loitering = False self.pending_loitering = False
self.speed_history = [] self.speed_history: list[float] = []
self.current_estimated_speed = 0 self.current_estimated_speed: float = 0
self.average_estimated_speed = 0 self.average_estimated_speed: float = 0
self.velocity_angle = 0 self.velocity_angle = 0
self.path_data = [] self.path_data: list[tuple[Any, float]] = []
self.previous = self.to_dict() self.previous = self.to_dict()
@property @property
@ -111,7 +112,7 @@ class TrackedObject:
return None return None
def _is_false_positive(self): def _is_false_positive(self) -> bool:
# once a true positive, always a true positive # once a true positive, always a true positive
if not self.false_positive: if not self.false_positive:
return False return False
@ -119,11 +120,13 @@ class TrackedObject:
threshold = self.camera_config.objects.filters[self.obj_data["label"]].threshold threshold = self.camera_config.objects.filters[self.obj_data["label"]].threshold
return self.computed_score < threshold return self.computed_score < threshold
def compute_score(self): def compute_score(self) -> float:
"""get median of scores for object.""" """get median of scores for object."""
return median(self.score_history) return median(self.score_history)
def update(self, current_frame_time: float, obj_data, has_valid_frame: bool): def update(
self, current_frame_time: float, obj_data: dict[str, Any], has_valid_frame: bool
) -> tuple[bool, bool, bool, bool]:
thumb_update = False thumb_update = False
significant_change = False significant_change = False
path_update = False path_update = False
@ -305,7 +308,7 @@ class TrackedObject:
k: self.attributes[k] for k in self.logos if k in self.attributes k: self.attributes[k] for k in self.logos if k in self.attributes
} }
if len(recognized_logos) > 0: if len(recognized_logos) > 0:
max_logo = max(recognized_logos, key=recognized_logos.get) max_logo = max(recognized_logos, key=recognized_logos.get) # type: ignore[arg-type]
# don't overwrite sub label if it is already set # don't overwrite sub label if it is already set
if ( if (
@ -342,28 +345,30 @@ class TrackedObject:
# update path # update path
width = self.camera_config.detect.width width = self.camera_config.detect.width
height = self.camera_config.detect.height height = self.camera_config.detect.height
bottom_center = (
round(obj_data["centroid"][0] / width, 4),
round(obj_data["box"][3] / height, 4),
)
# calculate a reasonable movement threshold (e.g., 5% of the frame diagonal) if width is not None and height is not None:
threshold = 0.05 * math.sqrt(width**2 + height**2) / max(width, height) bottom_center = (
round(obj_data["centroid"][0] / width, 4),
if not self.path_data: round(obj_data["box"][3] / height, 4),
self.path_data.append((bottom_center, obj_data["frame_time"]))
path_update = True
elif (
math.dist(self.path_data[-1][0], bottom_center) >= threshold
or len(self.path_data) == 1
):
# check Euclidean distance before appending
self.path_data.append((bottom_center, obj_data["frame_time"]))
path_update = True
logger.debug(
f"Point tracking: {obj_data['id']}, {bottom_center}, {obj_data['frame_time']}"
) )
# calculate a reasonable movement threshold (e.g., 5% of the frame diagonal)
threshold = 0.05 * math.sqrt(width**2 + height**2) / max(width, height)
if not self.path_data:
self.path_data.append((bottom_center, obj_data["frame_time"]))
path_update = True
elif (
math.dist(self.path_data[-1][0], bottom_center) >= threshold
or len(self.path_data) == 1
):
# check Euclidean distance before appending
self.path_data.append((bottom_center, obj_data["frame_time"]))
path_update = True
logger.debug(
f"Point tracking: {obj_data['id']}, {bottom_center}, {obj_data['frame_time']}"
)
self.obj_data.update(obj_data) self.obj_data.update(obj_data)
self.current_zones = current_zones self.current_zones = current_zones
logger.debug( logger.debug(
@ -371,7 +376,7 @@ class TrackedObject:
) )
return (thumb_update, significant_change, path_update, autotracker_update) return (thumb_update, significant_change, path_update, autotracker_update)
def to_dict(self): def to_dict(self) -> dict[str, Any]:
event = { event = {
"id": self.obj_data["id"], "id": self.obj_data["id"],
"camera": self.camera_config.name, "camera": self.camera_config.name,
@ -413,10 +418,8 @@ class TrackedObject:
return not self.is_stationary() return not self.is_stationary()
def is_stationary(self) -> bool: def is_stationary(self) -> bool:
return ( count = cast(int | float, self.obj_data["motionless_count"])
self.obj_data["motionless_count"] return count > (self.camera_config.detect.stationary.threshold or 50)
> self.camera_config.detect.stationary.threshold
)
def get_thumbnail(self, ext: str) -> bytes | None: def get_thumbnail(self, ext: str) -> bytes | None:
img_bytes = self.get_img_bytes( img_bytes = self.get_img_bytes(
@ -453,9 +456,9 @@ class TrackedObject:
def get_img_bytes( def get_img_bytes(
self, self,
ext: str, ext: str,
timestamp=False, timestamp: bool = False,
bounding_box=False, bounding_box: bool = False,
crop=False, crop: bool = False,
height: int | None = None, height: int | None = None,
quality: int | None = None, quality: int | None = None,
) -> bytes | None: ) -> bytes | None:
@ -532,18 +535,18 @@ class TrackedObject:
best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
) )
if timestamp: if timestamp:
color = self.camera_config.timestamp_style.color colors = self.camera_config.timestamp_style.color
draw_timestamp( draw_timestamp(
best_frame, best_frame,
self.thumbnail_data["frame_time"], self.thumbnail_data["frame_time"],
self.camera_config.timestamp_style.format, self.camera_config.timestamp_style.format,
font_effect=self.camera_config.timestamp_style.effect, font_effect=self.camera_config.timestamp_style.effect,
font_thickness=self.camera_config.timestamp_style.thickness, font_thickness=self.camera_config.timestamp_style.thickness,
font_color=(color.blue, color.green, color.red), font_color=(colors.blue, colors.green, colors.red),
position=self.camera_config.timestamp_style.position, position=self.camera_config.timestamp_style.position,
) )
quality_params = None quality_params = []
if ext == "jpg": if ext == "jpg":
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality or 70] quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality or 70]
@ -596,6 +599,9 @@ class TrackedObject:
p.write(png_bytes) p.write(png_bytes)
def write_thumbnail_to_disk(self) -> None: def write_thumbnail_to_disk(self) -> None:
if not self.camera_config.name:
return
directory = os.path.join(THUMB_DIR, self.camera_config.name) directory = os.path.join(THUMB_DIR, self.camera_config.name)
if not os.path.exists(directory): if not os.path.exists(directory):
@ -603,11 +609,14 @@ class TrackedObject:
thumb_bytes = self.get_thumbnail("webp") thumb_bytes = self.get_thumbnail("webp")
with open(os.path.join(directory, f"{self.obj_data['id']}.webp"), "wb") as f: if thumb_bytes:
f.write(thumb_bytes) with open(
os.path.join(directory, f"{self.obj_data['id']}.webp"), "wb"
) as f:
f.write(thumb_bytes)
def zone_filtered(obj: TrackedObject, object_config): def zone_filtered(obj: TrackedObject, object_config: dict[str, FilterConfig]) -> bool:
object_name = obj.obj_data["label"] object_name = obj.obj_data["label"]
if object_name in object_config: if object_name in object_config:
@ -657,9 +666,9 @@ class TrackedObjectAttribute:
def find_best_object(self, objects: list[dict[str, Any]]) -> Optional[str]: def find_best_object(self, objects: list[dict[str, Any]]) -> Optional[str]:
"""Find the best attribute for each object and return its ID.""" """Find the best attribute for each object and return its ID."""
best_object_area = None best_object_area: float | None = None
best_object_id = None best_object_id: str | None = None
best_object_label = None best_object_label: str | None = None
for obj in objects: for obj in objects:
if not box_inside(obj["box"], self.box): if not box_inside(obj["box"], self.box):

View File

@ -363,6 +363,10 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
if days: if days:
if mode == "all": if mode == "all":
continuous["days"] = days continuous["days"] = days
# if a user was keeping all for number of days
# we need to keep motion and all for that number of days
motion["days"] = days
else: else:
motion["days"] = days motion["days"] = days
@ -380,7 +384,7 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
new_object_config["genai"] = {} new_object_config["genai"] = {}
for key in global_genai.keys(): for key in global_genai.keys():
if key not in ["provider", "base_url", "api_key"]: if key not in ["enabled", "model", "provider", "base_url", "api_key"]:
new_object_config["genai"][key] = global_genai[key] new_object_config["genai"][key] = global_genai[key]
else: else:
new_genai_config[key] = global_genai[key] new_genai_config[key] = global_genai[key]

View File

@ -284,7 +284,7 @@ def post_process_yolox(
def get_ort_providers( def get_ort_providers(
force_cpu: bool = False, device: str = "AUTO", requires_fp16: bool = False force_cpu: bool = False, device: str | None = "AUTO", requires_fp16: bool = False
) -> tuple[list[str], list[dict[str, Any]]]: ) -> tuple[list[str], list[dict[str, Any]]]:
if force_cpu: if force_cpu:
return ( return (
@ -301,7 +301,7 @@ def get_ort_providers(
for provider in ort.get_available_providers(): for provider in ort.get_available_providers():
if provider == "CUDAExecutionProvider": if provider == "CUDAExecutionProvider":
device_id = 0 if not device.isdigit() else int(device) device_id = 0 if (not device or not device.isdigit()) else int(device)
providers.append(provider) providers.append(provider)
options.append( options.append(
{ {

View File

@ -0,0 +1,470 @@
"""RKNN model conversion utility for Frigate."""
import fcntl
import logging
import os
import subprocess
import sys
import time
from pathlib import Path
from typing import Optional
logger = logging.getLogger(__name__)
MODEL_TYPE_CONFIGS = {
"yolo-generic": {
"mean_values": [[0, 0, 0]],
"std_values": [[255, 255, 255]],
"target_platform": None, # Will be set dynamically
},
"yolonas": {
"mean_values": [[0, 0, 0]],
"std_values": [[255, 255, 255]],
"target_platform": None, # Will be set dynamically
},
"yolox": {
"mean_values": [[0, 0, 0]],
"std_values": [[255, 255, 255]],
"target_platform": None, # Will be set dynamically
},
"jina-clip-v1-vision": {
"mean_values": [[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255]],
"std_values": [[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255]],
"target_platform": None, # Will be set dynamically
},
"arcface-r100": {
"mean_values": [[127.5, 127.5, 127.5]],
"std_values": [[127.5, 127.5, 127.5]],
"target_platform": None, # Will be set dynamically
},
}
def get_rknn_model_type(model_path: str) -> str | None:
if all(keyword in str(model_path) for keyword in ["jina-clip-v1", "vision"]):
return "jina-clip-v1-vision"
model_name = os.path.basename(str(model_path)).lower()
if "arcface" in model_name:
return "arcface-r100"
if any(keyword in model_name for keyword in ["yolo", "yolox", "yolonas"]):
return model_name
return None
def is_rknn_compatible(model_path: str, model_type: str | None = None) -> bool:
"""
Check if a model is compatible with RKNN conversion.
Args:
model_path: Path to the model file
model_type: Type of the model (if known)
Returns:
True if the model is RKNN-compatible, False otherwise
"""
soc = get_soc_type()
if soc is None:
return False
if not model_type:
model_type = get_rknn_model_type(model_path)
if model_type and model_type in MODEL_TYPE_CONFIGS:
return True
return False
def ensure_torch_dependencies() -> bool:
"""Dynamically install torch dependencies if not available."""
try:
import torch # type: ignore
logger.debug("PyTorch is already available")
return True
except ImportError:
logger.info("PyTorch not found, attempting to install...")
try:
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"--break-system-packages",
"torch",
"torchvision",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
import torch # type: ignore # noqa: F401
logger.info("PyTorch installed successfully")
return True
except (subprocess.CalledProcessError, ImportError) as e:
logger.error(f"Failed to install PyTorch: {e}")
return False
def ensure_rknn_toolkit() -> bool:
"""Ensure RKNN toolkit is available."""
try:
from rknn.api import RKNN # type: ignore # noqa: F401
logger.debug("RKNN toolkit is already available")
return True
except ImportError as e:
logger.error(f"RKNN toolkit not found. Please ensure it's installed. {e}")
return False
def get_soc_type() -> Optional[str]:
"""Get the SoC type from device tree."""
try:
with open("/proc/device-tree/compatible") as file:
soc = file.read().split(",")[-1].strip("\x00")
return soc
except FileNotFoundError:
logger.debug("Could not determine SoC type from device tree")
return None
def convert_onnx_to_rknn(
onnx_path: str,
output_path: str,
model_type: str,
quantization: bool = False,
soc: Optional[str] = None,
) -> bool:
"""
Convert ONNX model to RKNN format.
Args:
onnx_path: Path to input ONNX model
output_path: Path for output RKNN model
model_type: Type of model (yolo-generic, yolonas, yolox, ssd)
quantization: Whether to use 8-bit quantization (i8) or 16-bit float (fp16)
soc: Target SoC platform (auto-detected if None)
Returns:
True if conversion successful, False otherwise
"""
if not ensure_torch_dependencies():
logger.debug("PyTorch dependencies not available")
return False
if not ensure_rknn_toolkit():
logger.debug("RKNN toolkit not available")
return False
# Get SoC type if not provided
if soc is None:
soc = get_soc_type()
if soc is None:
logger.debug("Could not determine SoC type")
return False
# Get model config for the specified type
if model_type not in MODEL_TYPE_CONFIGS:
logger.debug(f"Unsupported model type: {model_type}")
return False
config = MODEL_TYPE_CONFIGS[model_type].copy()
config["target_platform"] = soc
try:
from rknn.api import RKNN # type: ignore
logger.info(f"Converting {onnx_path} to RKNN format for {soc}")
rknn = RKNN(verbose=True)
rknn.config(**config)
if model_type == "jina-clip-v1-vision":
load_output = rknn.load_onnx(
model=onnx_path,
inputs=["pixel_values"],
input_size_list=[[1, 3, 224, 224]],
)
elif model_type == "arcface-r100":
load_output = rknn.load_onnx(
model=onnx_path,
inputs=["data"],
input_size_list=[[1, 3, 112, 112]],
)
else:
load_output = rknn.load_onnx(model=onnx_path)
if load_output != 0:
logger.error("Failed to load ONNX model")
return False
if rknn.build(do_quantization=quantization) != 0:
logger.error("Failed to build RKNN model")
return False
if rknn.export_rknn(output_path) != 0:
logger.error("Failed to export RKNN model")
return False
logger.info(f"Successfully converted model to {output_path}")
return True
except Exception as e:
logger.error(f"Error during RKNN conversion: {e}")
return False
def cleanup_stale_lock(lock_file_path: Path) -> bool:
"""
Clean up a stale lock file if it exists and is old.
Args:
lock_file_path: Path to the lock file
Returns:
True if lock was cleaned up, False otherwise
"""
try:
if lock_file_path.exists():
# Check if lock file is older than 10 minutes (stale)
lock_age = time.time() - lock_file_path.stat().st_mtime
if lock_age > 600: # 10 minutes
logger.warning(
f"Removing stale lock file: {lock_file_path} (age: {lock_age:.1f}s)"
)
lock_file_path.unlink()
return True
except Exception as e:
logger.error(f"Error cleaning up stale lock: {e}")
return False
def acquire_conversion_lock(lock_file_path: Path, timeout: int = 300) -> bool:
"""
Acquire a file-based lock for model conversion.
Args:
lock_file_path: Path to the lock file
timeout: Maximum time to wait for lock in seconds
Returns:
True if lock acquired, False if timeout or error
"""
try:
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
cleanup_stale_lock(lock_file_path)
lock_fd = os.open(lock_file_path, os.O_CREAT | os.O_RDWR)
# Try to acquire exclusive lock
start_time = time.time()
while time.time() - start_time < timeout:
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Lock acquired successfully
logger.debug(f"Acquired conversion lock: {lock_file_path}")
return True
except (OSError, IOError):
# Lock is held by another process, wait and retry
if time.time() - start_time >= timeout:
logger.warning(
f"Timeout waiting for conversion lock: {lock_file_path}"
)
os.close(lock_fd)
return False
logger.debug("Waiting for conversion lock to be released...")
time.sleep(1)
os.close(lock_fd)
return False
except Exception as e:
logger.error(f"Error acquiring conversion lock: {e}")
return False
def release_conversion_lock(lock_file_path: Path) -> None:
"""
Release the conversion lock.
Args:
lock_file_path: Path to the lock file
"""
try:
if lock_file_path.exists():
lock_file_path.unlink()
logger.debug(f"Released conversion lock: {lock_file_path}")
except Exception as e:
logger.error(f"Error releasing conversion lock: {e}")
def is_lock_stale(lock_file_path: Path, max_age: int = 600) -> bool:
"""
Check if a lock file is stale (older than max_age seconds).
Args:
lock_file_path: Path to the lock file
max_age: Maximum age in seconds before considering lock stale
Returns:
True if lock is stale, False otherwise
"""
try:
if lock_file_path.exists():
lock_age = time.time() - lock_file_path.stat().st_mtime
return lock_age > max_age
except Exception:
pass
return False
def wait_for_conversion_completion(
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
) -> bool:
"""
Wait for another process to complete the conversion.
Args:
rknn_path: Path to the expected RKNN model
lock_file_path: Path to the lock file to monitor
timeout: Maximum time to wait in seconds
Returns:
True if RKNN model appears, False if timeout
"""
start_time = time.time()
while time.time() - start_time < timeout:
# Check if RKNN model appeared
if rknn_path.exists():
logger.info(f"RKNN model appeared: {rknn_path}")
return True
# Check if lock file is gone (conversion completed or failed)
if not lock_file_path.exists():
logger.info("Lock file removed, checking for RKNN model...")
if rknn_path.exists():
logger.info(f"RKNN model found after lock removal: {rknn_path}")
return True
else:
logger.warning(
"Lock file removed but RKNN model not found, conversion may have failed"
)
return False
# Check if lock is stale
if is_lock_stale(lock_file_path):
logger.warning("Lock file is stale, attempting to clean up and retry...")
cleanup_stale_lock(lock_file_path)
# Try to acquire lock again
if acquire_conversion_lock(lock_file_path, timeout=60):
try:
# Check if RKNN file appeared while waiting
if rknn_path.exists():
logger.info(f"RKNN model appeared while waiting: {rknn_path}")
return True
# Convert ONNX to RKNN
logger.info(
f"Retrying conversion of {rknn_path} after stale lock cleanup..."
)
# Get the original model path from rknn_path
base_path = rknn_path.parent / rknn_path.stem
onnx_path = base_path.with_suffix(".onnx")
if onnx_path.exists():
if convert_onnx_to_rknn(
str(onnx_path), str(rknn_path), model_type, False
):
return True
logger.error("Failed to convert model after stale lock cleanup")
return False
finally:
release_conversion_lock(lock_file_path)
logger.debug("Waiting for RKNN model to appear...")
time.sleep(1)
logger.warning(f"Timeout waiting for RKNN model: {rknn_path}")
return False
def auto_convert_model(
model_path: str, model_type: str | None = None, quantization: bool = False
) -> Optional[str]:
"""
Automatically convert a model to RKNN format if needed.
Args:
model_path: Path to the model file
model_type: Type of the model
quantization: Whether to use quantization
Returns:
Path to the RKNN model if successful, None otherwise
"""
if model_path.endswith(".rknn"):
return model_path
# Check if equivalent .rknn file exists
base_path = Path(model_path)
if base_path.suffix.lower() in [".onnx", ""]:
base_name = base_path.stem if base_path.suffix else base_path.name
rknn_path = base_path.parent / f"{base_name}.rknn"
if rknn_path.exists():
logger.info(f"Found existing RKNN model: {rknn_path}")
return str(rknn_path)
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
if acquire_conversion_lock(lock_file_path):
try:
if rknn_path.exists():
logger.info(
f"RKNN model appeared while waiting for lock: {rknn_path}"
)
return str(rknn_path)
logger.info(f"Converting {model_path} to RKNN format...")
rknn_path.parent.mkdir(parents=True, exist_ok=True)
if not model_type:
model_type = get_rknn_model_type(base_path)
if convert_onnx_to_rknn(
str(base_path), str(rknn_path), model_type, quantization
):
return str(rknn_path)
else:
logger.error(f"Failed to convert {model_path} to RKNN format")
return None
finally:
release_conversion_lock(lock_file_path)
else:
logger.info(
f"Another process is converting {model_path}, waiting for completion..."
)
if not model_type:
model_type = get_rknn_model_type(base_path)
if wait_for_conversion_completion(model_type, rknn_path, lock_file_path):
return str(rknn_path)
else:
logger.error(f"Timeout waiting for conversion of {model_path}")
return None
return None

View File

@ -6,6 +6,7 @@ import logging
import os import os
import re import re
import resource import resource
import shutil
import signal import signal
import subprocess as sp import subprocess as sp
import traceback import traceback
@ -22,6 +23,7 @@ from frigate.const import (
DRIVER_ENV_VAR, DRIVER_ENV_VAR,
FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_NVIDIA,
FFMPEG_HWACCEL_VAAPI, FFMPEG_HWACCEL_VAAPI,
SHM_FRAMES_VAR,
) )
from frigate.util.builtin import clean_camera_user_pass, escape_special_characters from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
@ -768,3 +770,65 @@ def set_file_limit() -> None:
logger.debug( logger.debug(
f"File limit set. New soft limit: {new_soft}, Hard limit remains: {current_hard}" f"File limit set. New soft limit: {new_soft}, Hard limit remains: {current_hard}"
) )
def get_fs_type(path: str) -> str:
bestMatch = ""
fsType = ""
for part in psutil.disk_partitions(all=True):
if path.startswith(part.mountpoint) and len(bestMatch) < len(part.mountpoint):
fsType = part.fstype
bestMatch = part.mountpoint
return fsType
def calculate_shm_requirements(config) -> dict:
try:
storage_stats = shutil.disk_usage("/dev/shm")
except (FileNotFoundError, OSError):
return {}
total_mb = round(storage_stats.total / pow(2, 20), 1)
used_mb = round(storage_stats.used / pow(2, 20), 1)
free_mb = round(storage_stats.free / pow(2, 20), 1)
# required for log files + nginx cache
min_req_shm = 40 + 10
if config.birdseye.restream:
min_req_shm += 8
available_shm = total_mb - min_req_shm
cam_total_frame_size = 0.0
for camera in config.cameras.values():
if camera.enabled_in_config and camera.detect.width and camera.detect.height:
cam_total_frame_size += round(
(camera.detect.width * camera.detect.height * 1.5 + 270480) / 1048576,
1,
)
# leave room for 2 cameras that are added dynamically, if a user wants to add more cameras they may need to increase the SHM size and restart after adding them.
cam_total_frame_size += 2 * round(
(1280 * 720 * 1.5 + 270480) / 1048576,
1,
)
shm_frame_count = min(
int(os.environ.get(SHM_FRAMES_VAR, "50")),
int(available_shm / cam_total_frame_size),
)
# minimum required shm recommendation
min_shm = round(min_req_shm + cam_total_frame_size * 20)
return {
"total": total_mb,
"used": used_mb,
"free": free_mb,
"mount_type": get_fs_type("/dev/shm"),
"available": round(available_shm, 1),
"camera_frame_size": cam_total_frame_size,
"shm_frame_count": shm_frame_count,
"min_shm": min_shm,
}

View File

@ -71,7 +71,7 @@ def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger):
def start_or_restart_ffmpeg( def start_or_restart_ffmpeg(
ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
): ) -> sp.Popen[Any]:
if ffmpeg_process is not None: if ffmpeg_process is not None:
stop_ffmpeg(ffmpeg_process, logger) stop_ffmpeg(ffmpeg_process, logger)
@ -96,7 +96,7 @@ def start_or_restart_ffmpeg(
def capture_frames( def capture_frames(
ffmpeg_process, ffmpeg_process: sp.Popen[Any],
config: CameraConfig, config: CameraConfig,
shm_frame_count: int, shm_frame_count: int,
frame_index: int, frame_index: int,
@ -107,7 +107,7 @@ def capture_frames(
skipped_fps: Value, skipped_fps: Value,
current_frame: Value, current_frame: Value,
stop_event: MpEvent, stop_event: MpEvent,
): ) -> None:
frame_size = frame_shape[0] * frame_shape[1] frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond() frame_rate = EventsPerSecond()
frame_rate.start() frame_rate.start()
@ -196,6 +196,7 @@ class CameraWatchdog(threading.Thread):
self.config_subscriber = CameraConfigUpdateSubscriber( self.config_subscriber = CameraConfigUpdateSubscriber(
None, {config.name: config}, [CameraConfigUpdateEnum.enabled] None, {config.name: config}, [CameraConfigUpdateEnum.enabled]
) )
self.requestor = InterProcessRequestor()
self.was_enabled = self.config.enabled self.was_enabled = self.config.enabled
def _update_enabled_state(self) -> bool: def _update_enabled_state(self) -> bool:
@ -245,6 +246,14 @@ class CameraWatchdog(threading.Thread):
else: else:
self.logger.debug(f"Disabling camera {self.config.name}") self.logger.debug(f"Disabling camera {self.config.name}")
self.stop_all_ffmpeg() self.stop_all_ffmpeg()
# update camera status
self.requestor.send_data(
f"{self.config.name}/status/detect", "disabled"
)
self.requestor.send_data(
f"{self.config.name}/status/record", "disabled"
)
self.was_enabled = enabled self.was_enabled = enabled
continue continue
@ -254,6 +263,7 @@ class CameraWatchdog(threading.Thread):
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive(): if not self.capture_thread.is_alive():
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
self.camera_fps.value = 0 self.camera_fps.value = 0
self.logger.error( self.logger.error(
f"Ffmpeg process crashed unexpectedly for {self.config.name}." f"Ffmpeg process crashed unexpectedly for {self.config.name}."
@ -263,6 +273,9 @@ class CameraWatchdog(threading.Thread):
self.fps_overflow_count += 1 self.fps_overflow_count += 1
if self.fps_overflow_count == 3: if self.fps_overflow_count == 3:
self.requestor.send_data(
f"{self.config.name}/status/detect", "offline"
)
self.fps_overflow_count = 0 self.fps_overflow_count = 0
self.camera_fps.value = 0 self.camera_fps.value = 0
self.logger.info( self.logger.info(
@ -270,6 +283,7 @@ class CameraWatchdog(threading.Thread):
) )
self.reset_capture_thread(drain_output=False) self.reset_capture_thread(drain_output=False)
elif now - self.capture_thread.current_frame.value > 20: elif now - self.capture_thread.current_frame.value > 20:
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
self.camera_fps.value = 0 self.camera_fps.value = 0
self.logger.info( self.logger.info(
f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..." f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..."
@ -277,6 +291,7 @@ class CameraWatchdog(threading.Thread):
self.reset_capture_thread() self.reset_capture_thread()
else: else:
# process is running normally # process is running normally
self.requestor.send_data(f"{self.config.name}/status/detect", "online")
self.fps_overflow_count = 0 self.fps_overflow_count = 0
for p in self.ffmpeg_other_processes: for p in self.ffmpeg_other_processes:
@ -302,13 +317,27 @@ class CameraWatchdog(threading.Thread):
p["logpipe"], p["logpipe"],
ffmpeg_process=p["process"], ffmpeg_process=p["process"],
) )
for role in p["roles"]:
self.requestor.send_data(
f"{self.config.name}/status/{role}", "offline"
)
continue continue
else: else:
self.requestor.send_data(
f"{self.config.name}/status/record", "online"
)
p["latest_segment_time"] = latest_segment_time p["latest_segment_time"] = latest_segment_time
if poll is None: if poll is None:
continue continue
for role in p["roles"]:
self.requestor.send_data(
f"{self.config.name}/status/{role}", "offline"
)
p["logpipe"].dump() p["logpipe"].dump()
p["process"] = start_or_restart_ffmpeg( p["process"] = start_or_restart_ffmpeg(
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]

View File

@ -19,8 +19,8 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.11/dist-packages/super_gradients/training/pretrained_models.py\n", "! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.12/dist-packages/super_gradients/training/pretrained_models.py\n",
"! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.11/dist-packages/super_gradients/training/utils/checkpoint_utils.py" "! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.12/dist-packages/super_gradients/training/utils/checkpoint_utils.py"
] ]
}, },
{ {

View File

@ -106,5 +106,7 @@
}, },
"label": { "label": {
"back": "Върни се" "back": "Върни се"
} },
"selectItem": "Избери {{item}}",
"readTheDocumentation": "Прочетете документацията"
} }

View File

@ -261,5 +261,6 @@
"title": "404", "title": "404",
"desc": "Pàgina no trobada" "desc": "Pàgina no trobada"
}, },
"selectItem": "Selecciona {{item}}" "selectItem": "Selecciona {{item}}",
"readTheDocumentation": "Llegir la documentació"
} }

View File

@ -110,5 +110,12 @@
"error": "No s'ha pogut suprimir: {{error}}" "error": "No s'ha pogut suprimir: {{error}}"
} }
} }
},
"imagePicker": {
"selectImage": "Selecciona la miniatura d'un objecte rastrejat",
"search": {
"placeholder": "Cerca per etiqueta o subetiqueta..."
},
"noImages": "No s'han trobat miniatures per a aquesta càmera"
} }
} }

View File

@ -122,5 +122,13 @@
}, },
"motion": { "motion": {
"showMotionOnly": "Mostar només el moviment" "showMotionOnly": "Mostar només el moviment"
},
"classes": {
"label": "Classes",
"all": {
"title": "Totes les classes"
},
"count_one": "{{count}} Classe",
"count_other": "{{count}} Classes"
} }
} }

View File

@ -12,5 +12,7 @@
"savingError": "Error al desar la configuració" "savingError": "Error al desar la configuració"
} }
}, },
"confirm": "Sortir sense desar?" "confirm": "Sortir sense desar?",
"safeConfigEditor": "Editor de Configuració (Mode Segur)",
"safeModeDescription": "Frigate està en mode segur a causa d'un error de validació de la configuració."
} }

View File

@ -97,7 +97,8 @@
"success": { "success": {
"updatedSublabel": "Subetiqueta actualitzada amb èxit.", "updatedSublabel": "Subetiqueta actualitzada amb èxit.",
"updatedLPR": "Matrícula actualitzada amb èxit.", "updatedLPR": "Matrícula actualitzada amb èxit.",
"regenerate": "El {{provider}} ha sol·licitat una nova descripció. En funció de la velocitat del vostre proveïdor, la nova descripció pot trigar un temps a regenerar-se." "regenerate": "El {{provider}} ha sol·licitat una nova descripció. En funció de la velocitat del vostre proveïdor, la nova descripció pot trigar un temps a regenerar-se.",
"audioTranscription": "Transcripció d'àudio sol·licitada amb èxit."
}, },
"error": { "error": {
"regenerate": "No s'ha pogut contactar amb {{provider}} per obtenir una nova descripció: {{errorMessage}}", "regenerate": "No s'ha pogut contactar amb {{provider}} per obtenir una nova descripció: {{errorMessage}}",

View File

@ -32,7 +32,15 @@
"label": "Fer clic a la imatge per centrar la càmera PTZ" "label": "Fer clic a la imatge per centrar la càmera PTZ"
} }
}, },
"presets": "Predefinits de la càmera PTZ" "presets": "Predefinits de la càmera PTZ",
"focus": {
"in": {
"label": "Enfoca la càmera PTZ aprop"
},
"out": {
"label": "Enfoca la càmera PTZ lluny"
}
}
}, },
"documentTitle": "Directe - Frigate", "documentTitle": "Directe - Frigate",
"documentTitle.withCamera": "{{camera}} - Directe - Frigate", "documentTitle.withCamera": "{{camera}} - Directe - Frigate",
@ -154,5 +162,9 @@
"label": "Editar grup de càmeres" "label": "Editar grup de càmeres"
}, },
"exitEdit": "Sortir de l'edició" "exitEdit": "Sortir de l'edició"
},
"transcription": {
"enable": "Habilita la transcripció d'àudio en temps real",
"disable": "Deshabilita la transcripció d'àudio en temps real"
} }
} }

View File

@ -261,5 +261,7 @@
"admin": "Správce", "admin": "Správce",
"viewer": "Divák", "viewer": "Divák",
"desc": "Správci mají plný přístup ke všem funkcím v uživatelském rozhraní Frigate. Diváci jsou omezeni na sledování kamer, položek přehledu a historických záznamů v UI." "desc": "Správci mají plný přístup ke všem funkcím v uživatelském rozhraní Frigate. Diváci jsou omezeni na sledování kamer, položek přehledu a historických záznamů v UI."
} },
"selectItem": "Vybrat {{item}}",
"readTheDocumentation": "Přečtěte si dokumentaci"
} }

View File

@ -254,5 +254,6 @@
"title": "404", "title": "404",
"desc": "Side ikke fundet" "desc": "Side ikke fundet"
}, },
"selectItem": "Vælg {{item}}" "selectItem": "Vælg {{item}}",
"readTheDocumentation": "Læs dokumentationen"
} }

View File

@ -38,7 +38,7 @@
"1hour": "1 Stunde", "1hour": "1 Stunde",
"lastWeek": "Letzte Woche", "lastWeek": "Letzte Woche",
"h": "{{time}} Stunde", "h": "{{time}} Stunde",
"ago": "{{timeAgo}} her", "ago": "vor {{timeAgo}}",
"untilRestart": "Bis zum Neustart", "untilRestart": "Bis zum Neustart",
"justNow": "Gerade", "justNow": "Gerade",
"pm": "nachmittags", "pm": "nachmittags",
@ -160,7 +160,15 @@
"sk": "Slowakisch", "sk": "Slowakisch",
"yue": "粵語 (Kantonesisch)", "yue": "粵語 (Kantonesisch)",
"th": "ไทย (Thailändisch)", "th": "ไทย (Thailändisch)",
"ca": "Català (Katalanisch)" "ca": "Català (Katalanisch)",
"ur": "اردو (Urdu)",
"ptBR": "Portugiesisch (Brasilianisch)",
"sr": "Српски (Serbisch)",
"sl": "Slovenščina (Slowenisch)",
"lt": "Lietuvių (Litauisch)",
"bg": "Български (bulgarisch)",
"gl": "Galego (Galicisch)",
"id": "Bahasa Indonesia (Indonesisch)"
}, },
"appearance": "Erscheinung", "appearance": "Erscheinung",
"theme": { "theme": {
@ -168,7 +176,7 @@
"blue": "Blau", "blue": "Blau",
"green": "Grün", "green": "Grün",
"default": "Standard", "default": "Standard",
"nord": "Norden", "nord": "Nord",
"red": "Rot", "red": "Rot",
"contrast": "Hoher Kontrast", "contrast": "Hoher Kontrast",
"highcontrast": "Hoher Kontrast" "highcontrast": "Hoher Kontrast"
@ -260,6 +268,7 @@
"documentTitle": "Nicht gefunden - Frigate" "documentTitle": "Nicht gefunden - Frigate"
}, },
"selectItem": "Wähle {{item}}", "selectItem": "Wähle {{item}}",
"readTheDocumentation": "Dokumentation lesen",
"accessDenied": { "accessDenied": {
"desc": "Du hast keine Berechtigung diese Seite anzuzeigen.", "desc": "Du hast keine Berechtigung diese Seite anzuzeigen.",
"documentTitle": "Zugang verweigert - Frigate", "documentTitle": "Zugang verweigert - Frigate",

View File

@ -119,5 +119,12 @@
"markAsReviewed": "Als geprüft markieren", "markAsReviewed": "Als geprüft markieren",
"deleteNow": "Jetzt löschen" "deleteNow": "Jetzt löschen"
} }
},
"imagePicker": {
"selectImage": "Vorschaubild eines verfolgten Objekts selektieren",
"search": {
"placeholder": "Nach Label oder Unterlabel suchen..."
},
"noImages": "Kein Vorschaubild für diese Kamera gefunden"
} }
} }

View File

@ -122,5 +122,13 @@
"loading": "Lade bekannte Nummernschilder…", "loading": "Lade bekannte Nummernschilder…",
"placeholder": "Tippe, um Kennzeichen zu suchen…", "placeholder": "Tippe, um Kennzeichen zu suchen…",
"selectPlatesFromList": "Wählen eine oder mehrere Kennzeichen aus der Liste aus." "selectPlatesFromList": "Wählen eine oder mehrere Kennzeichen aus der Liste aus."
},
"classes": {
"label": "Klassen",
"all": {
"title": "Alle Klassen"
},
"count_one": "{{count}} Klasse",
"count_other": "{{count}} Klassen"
} }
} }

View File

@ -12,5 +12,7 @@
} }
}, },
"documentTitle": "Konfigurationseditor Frigate", "documentTitle": "Konfigurationseditor Frigate",
"confirm": "Verlassen ohne zu Speichern?" "confirm": "Verlassen ohne zu Speichern?",
"safeConfigEditor": "Konfiguration Editor (abgesicherter Modus)",
"safeModeDescription": "Frigate ist aufgrund eines Konfigurationsvalidierungsfehlers im abgesicherten Modus."
} }

View File

@ -34,5 +34,7 @@
"markAsReviewed": "Als geprüft kennzeichnen", "markAsReviewed": "Als geprüft kennzeichnen",
"selected_one": "{{count}} ausgewählt", "selected_one": "{{count}} ausgewählt",
"selected_other": "{{count}} ausgewählt", "selected_other": "{{count}} ausgewählt",
"detected": "erkannt" "detected": "erkannt",
"suspiciousActivity": "Verdächtige Aktivität",
"threateningActivity": "Bedrohliche Aktivität"
} }

View File

@ -17,12 +17,14 @@
"success": { "success": {
"updatedSublabel": "Unterkategorie erfolgreich aktualisiert.", "updatedSublabel": "Unterkategorie erfolgreich aktualisiert.",
"updatedLPR": "Nummernschild erfolgreich aktualisiert.", "updatedLPR": "Nummernschild erfolgreich aktualisiert.",
"regenerate": "Eine neue Beschreibung wurde von {{provider}} angefordert. Je nach Geschwindigkeit des Anbieters kann es einige Zeit dauern, bis die neue Beschreibung generiert ist." "regenerate": "Eine neue Beschreibung wurde von {{provider}} angefordert. Je nach Geschwindigkeit des Anbieters kann es einige Zeit dauern, bis die neue Beschreibung generiert ist.",
"audioTranscription": "Audio Transkription erfolgreich angefordert."
}, },
"error": { "error": {
"regenerate": "Der Aufruf von {{provider}} für eine neue Beschreibung ist fehlgeschlagen: {{errorMessage}}", "regenerate": "Der Aufruf von {{provider}} für eine neue Beschreibung ist fehlgeschlagen: {{errorMessage}}",
"updatedSublabelFailed": "Untekategorie konnte nicht aktualisiert werden: {{errorMessage}}", "updatedSublabelFailed": "Untekategorie konnte nicht aktualisiert werden: {{errorMessage}}",
"updatedLPRFailed": "Aktualisierung des Kennzeichens fehlgeschlagen: {{errorMessage}}" "updatedLPRFailed": "Aktualisierung des Kennzeichens fehlgeschlagen: {{errorMessage}}",
"audioTranscription": "Die Anforderung der Audio Transkription ist fehlgeschlagen: {{errorMessage}}"
} }
} }
}, },
@ -67,6 +69,9 @@
}, },
"snapshotScore": { "snapshotScore": {
"label": "Schnappschuss Bewertung" "label": "Schnappschuss Bewertung"
},
"score": {
"label": "Ergebnis"
} }
}, },
"documentTitle": "Erkunde - Frigate", "documentTitle": "Erkunde - Frigate",
@ -182,6 +187,14 @@
}, },
"deleteTrackedObject": { "deleteTrackedObject": {
"label": "Dieses verfolgte Objekt löschen" "label": "Dieses verfolgte Objekt löschen"
},
"audioTranscription": {
"aria": "Audio Transkription anfordern",
"label": "Transkribieren"
},
"addTrigger": {
"aria": "Einen Trigger für dieses verfolgte Objekt hinzufügen",
"label": "Trigger hinzufügen"
} }
}, },
"dialog": { "dialog": {
@ -203,5 +216,11 @@
"fetchingTrackedObjectsFailed": "Fehler beim Abrufen von verfolgten Objekten: {{errorMessage}}", "fetchingTrackedObjectsFailed": "Fehler beim Abrufen von verfolgten Objekten: {{errorMessage}}",
"trackedObjectsCount_one": "{{count}} verfolgtes Objekt ", "trackedObjectsCount_one": "{{count}} verfolgtes Objekt ",
"trackedObjectsCount_other": "{{count}} verfolgte Objekte ", "trackedObjectsCount_other": "{{count}} verfolgte Objekte ",
"exploreMore": "Erkunde mehr {{label}} Objekte" "exploreMore": "Erkunde mehr {{label}} Objekte",
"aiAnalysis": {
"title": "KI-Analyse"
},
"concerns": {
"label": "Bedenken"
}
} }

View File

@ -46,7 +46,7 @@
"train": { "train": {
"title": "Trainiere", "title": "Trainiere",
"aria": "Wähle Training", "aria": "Wähle Training",
"empty": "Es gibt keine aktuellen Versuche zurGesichtserkennung" "empty": "Es gibt keine aktuellen Versuche zur Gesichtserkennung"
}, },
"deleteFaceLibrary": { "deleteFaceLibrary": {
"title": "Lösche Name", "title": "Lösche Name",

View File

@ -41,6 +41,14 @@
"center": { "center": {
"label": "Klicken Sie in den Rahmen, um die PTZ-Kamera zu zentrieren" "label": "Klicken Sie in den Rahmen, um die PTZ-Kamera zu zentrieren"
} }
},
"focus": {
"in": {
"label": "PTZ Kamera hinein fokussieren"
},
"out": {
"label": "PTZ Kamera hinaus fokussieren"
}
} }
}, },
"documentTitle": "Live - Frigate", "documentTitle": "Live - Frigate",
@ -100,7 +108,7 @@
"tips": "Ihr Gerät muss die Funktion unterstützen und WebRTC muss für die bidirektionale Kommunikation konfiguriert sein.", "tips": "Ihr Gerät muss die Funktion unterstützen und WebRTC muss für die bidirektionale Kommunikation konfiguriert sein.",
"tips.documentation": "Dokumentation lesen ", "tips.documentation": "Dokumentation lesen ",
"available": "Für diesen Stream ist eine Zwei-Wege-Sprechfunktion verfügbar", "available": "Für diesen Stream ist eine Zwei-Wege-Sprechfunktion verfügbar",
"unavailable": "Für diesen Stream ist keine Zwei-Wege-Kommunikation möglich." "unavailable": "Zwei-Wege-Kommunikation für diesen Stream nicht verfügbar"
}, },
"lowBandwidth": { "lowBandwidth": {
"tips": "Die Live-Ansicht befindet sich aufgrund von Puffer- oder Stream-Fehlern im Modus mit geringer Bandbreite.", "tips": "Die Live-Ansicht befindet sich aufgrund von Puffer- oder Stream-Fehlern im Modus mit geringer Bandbreite.",
@ -146,7 +154,8 @@
"cameraEnabled": "Kamera aktiviert", "cameraEnabled": "Kamera aktiviert",
"autotracking": "Autotracking", "autotracking": "Autotracking",
"audioDetection": "Audioerkennung", "audioDetection": "Audioerkennung",
"title": "{{camera}} Einstellungen" "title": "{{camera}} Einstellungen",
"transcription": "Audio Transkription"
}, },
"history": { "history": {
"label": "Historisches Filmmaterial zeigen" "label": "Historisches Filmmaterial zeigen"
@ -154,5 +163,9 @@
"audio": "Audio", "audio": "Audio",
"suspend": { "suspend": {
"forTime": "Aussetzen für: " "forTime": "Aussetzen für: "
},
"transcription": {
"enable": "Live Audio Transkription einschalten",
"disable": "Live Audio Transkription ausschalten"
} }
} }

View File

@ -58,7 +58,7 @@
"title": "Wie man Textfilter verwendet" "title": "Wie man Textfilter verwendet"
}, },
"searchType": { "searchType": {
"thumbnail": "Miniaturansicht", "thumbnail": "Vorschaubild",
"description": "Beschreibung" "description": "Beschreibung"
} }
}, },

View File

@ -8,7 +8,7 @@
"general": "Allgemeine Einstellungen Frigate", "general": "Allgemeine Einstellungen Frigate",
"frigatePlus": "Frigate+ Einstellungen Frigate", "frigatePlus": "Frigate+ Einstellungen Frigate",
"classification": "Klassifizierungseinstellungen Frigate", "classification": "Klassifizierungseinstellungen Frigate",
"motionTuner": "Bewegungstuner Frigate", "motionTuner": "Bewegungserkennungs-Optimierer Frigate",
"notifications": "Benachrichtigungs-Einstellungen", "notifications": "Benachrichtigungs-Einstellungen",
"enrichments": "Erweiterte Statistiken - Frigate" "enrichments": "Erweiterte Statistiken - Frigate"
}, },
@ -17,12 +17,12 @@
"cameras": "Kameraeinstellungen", "cameras": "Kameraeinstellungen",
"classification": "Klassifizierung", "classification": "Klassifizierung",
"masksAndZones": "Maskierungen / Zonen", "masksAndZones": "Maskierungen / Zonen",
"motionTuner": "Bewegungstuner", "motionTuner": "Bewegungserkennungs-Optimierer",
"debug": "Debug", "debug": "Debug",
"frigateplus": "Frigate+", "frigateplus": "Frigate+",
"users": "Benutzer", "users": "Benutzer",
"notifications": "Benachrichtigungen", "notifications": "Benachrichtigungen",
"enrichments": "Verbesserungen" "enrichments": "Erkennungsfunktionen"
}, },
"dialog": { "dialog": {
"unsavedChanges": { "unsavedChanges": {
@ -178,7 +178,44 @@
"detections": "Erkennungen ", "detections": "Erkennungen ",
"desc": "Aktiviere/deaktiviere Benachrichtigungen und Erkennungen für diese Kamera vorübergehend, bis Frigate neu gestartet wird. Wenn deaktiviert, werden keine neuen Überprüfungseinträge erstellt. " "desc": "Aktiviere/deaktiviere Benachrichtigungen und Erkennungen für diese Kamera vorübergehend, bis Frigate neu gestartet wird. Wenn deaktiviert, werden keine neuen Überprüfungseinträge erstellt. "
}, },
"title": "Kamera-Einstellungen" "title": "Kameraeinstellungen",
"object_descriptions": {
"title": "Generative KI-Objektbeschreibungen",
"desc": "Generativen KI-Objektbeschreibungen für diese Kamera vorübergehend aktivieren/deaktivieren. Wenn diese Funktion deaktiviert ist, werden keine KI-generierten Beschreibungen für verfolgte Objekte auf dieser Kamera angefordert."
},
"cameraConfig": {
"ffmpeg": {
"roles": "Rollen",
"pathRequired": "Stream-Pfad ist erforderlich",
"path": "Stream-Pfad",
"inputs": "Eingabe Streams",
"pathPlaceholder": "rtsp://...",
"rolesRequired": "Mindestens eine Rolle ist erforderlich",
"rolesUnique": "Jede Rolle (Audio, Erkennung, Aufzeichnung) kann nur einem Stream zugewiesen werden",
"addInput": "Eingabe-Stream hinzufügen",
"removeInput": "Eingabe-Stream entfernen",
"inputsRequired": "Mindestens ein Eingabe-Stream ist erforderlich"
},
"enabled": "Aktiviert",
"namePlaceholder": "z. B., Vorder_Türe",
"nameInvalid": "Der Name der Kamera darf nur Buchstaben, Zahlen, Unterstriche oder Bindestriche enthalten",
"name": "Kamera Name",
"edit": "Kamera bearbeiten",
"add": "Kamera hinzufügen",
"description": "Kameraeinstellungen einschließlich Stream-Eingänge und Rollen konfigurieren.",
"nameRequired": "Kameraname ist erforderlich",
"toast": {
"success": "Kamera {{cameraName}} erfolgreich gespeichert"
}
},
"backToSettings": "Zurück zu den Kamera Einstellungen",
"selectCamera": "Kamera wählen",
"editCamera": "Kamera bearbeiten:",
"addCamera": "Neue Kamera hinzufügen",
"review_descriptions": {
"desc": "Generativen KI-Objektbeschreibungen für diese Kamera vorübergehend aktivieren/deaktivieren. Wenn diese Funktion deaktiviert ist, werden keine KI-generierten Beschreibungen für Überprüfungselemente auf dieser Kamera angefordert.",
"title": "Beschreibungen zur generativen KI-Überprüfung"
}
}, },
"masksAndZones": { "masksAndZones": {
"form": { "form": {
@ -397,7 +434,12 @@
"desc": "Einen Rahmen für den an den Objektdetektor übermittelten Interessensbereich anzeigen" "desc": "Einen Rahmen für den an den Objektdetektor übermittelten Interessensbereich anzeigen"
}, },
"title": "Debug", "title": "Debug",
"desc": "Die Debug-Ansicht zeigt eine Echtzeitansicht der verfolgten Objekte und ihrer Statistiken. Die Objektliste zeigt eine zeitverzögerte Zusammenfassung der erkannten Objekte." "desc": "Die Debug-Ansicht zeigt eine Echtzeitansicht der verfolgten Objekte und ihrer Statistiken. Die Objektliste zeigt eine zeitverzögerte Zusammenfassung der erkannten Objekte.",
"paths": {
"title": "Pfade",
"desc": "Wichtige Punkte des Pfads des verfolgten Objekts anzeigen",
"tips": "<p><strong>Pfade</strong></p><br><p>Linien und Kreise zeigen wichtige Punkte an, an denen sich das verfolgte Objekt während seines Lebenszyklus bewegt hat.</p>"
}
}, },
"motionDetectionTuner": { "motionDetectionTuner": {
"Threshold": { "Threshold": {
@ -420,7 +462,7 @@
"desc": "Der Wert für die Konturfläche wird verwendet, um zu bestimmen, welche Gruppen von veränderten Pixeln als Bewegung gelten. <em>Standard: 10</em>" "desc": "Der Wert für die Konturfläche wird verwendet, um zu bestimmen, welche Gruppen von veränderten Pixeln als Bewegung gelten. <em>Standard: 10</em>"
}, },
"title": "Bewegungserkennungs-Optimierer", "title": "Bewegungserkennungs-Optimierer",
"unsavedChanges": "Nicht gespeicherte Änderungen am Bewegungstuner ({{camera}})" "unsavedChanges": "Nicht gespeicherte Änderungen im Bewegungserkennungs-Optimierer ({{camera}})"
}, },
"users": { "users": {
"addUser": "Benutzer hinzufügen", "addUser": "Benutzer hinzufügen",
@ -679,5 +721,100 @@
"success": "Die Einstellungen für die Verbesserungen wurden gespeichert. Starten Sie Frigate neu, um Ihre Änderungen zu übernehmen.", "success": "Die Einstellungen für die Verbesserungen wurden gespeichert. Starten Sie Frigate neu, um Ihre Änderungen zu übernehmen.",
"error": "Konfigurationsänderungen konnten nicht gespeichert werden: {{errorMessage}}" "error": "Konfigurationsänderungen konnten nicht gespeichert werden: {{errorMessage}}"
} }
},
"triggers": {
"documentTitle": "Auslöser",
"management": {
"title": "Auslöser Verwaltung",
"desc": "Auslöser für {{camera}} verwalten. Verwenden Sie den Vorschaubild Typ, um ähnliche Vorschaubilder wie das ausgewählte verfolgte Objekt auszulösen, und den Beschreibungstyp, um ähnliche Beschreibungen wie den von Ihnen angegebenen Text auszulösen."
},
"addTrigger": "Auslöser hinzufügen",
"table": {
"name": "Name",
"type": "Typ",
"content": "Inhalt",
"threshold": "Schwellenwert",
"actions": "Aktionen",
"noTriggers": "Für diese Kamera sind keine Auslöser konfiguriert.",
"edit": "Bearbeiten",
"deleteTrigger": "Auslöser löschen",
"lastTriggered": "Zuletzt ausgelöst"
},
"type": {
"thumbnail": "Vorschaubild",
"description": "Beschreibung"
},
"actions": {
"alert": "Als Alarm markieren",
"notification": "Benachrichtigung senden"
},
"dialog": {
"createTrigger": {
"title": "Auslöser erstellen",
"desc": "Auslöser für Kamera {{camera}} erstellen"
},
"editTrigger": {
"title": "Auslöser bearbeiten",
"desc": "Einstellungen für Kamera {{camera}} bearbeiten"
},
"deleteTrigger": {
"title": "Auslöser löschen",
"desc": "Sind Sie sicher, dass Sie den Auslöser <strong>{{triggerName}}</strong> löschen wollen? Dies kann nicht Rückgängig gemacht werden."
},
"form": {
"name": {
"title": "Name",
"placeholder": "Auslöser Name eingeben",
"error": {
"minLength": "Der Name muss mindestens 2 Zeichen lang sein.",
"invalidCharacters": "Der Name darf nur Buchstaben, Zahlen, Unterstriche und Bindestriche enthalten.",
"alreadyExists": "Ein Auslöser mit diesem Namen existiert bereits für diese Kamera."
}
},
"enabled": {
"description": "Diesen Auslöser aktivieren oder deaktivieren"
},
"type": {
"title": "Typ",
"placeholder": "Auslöser Typ wählen"
},
"content": {
"title": "Inhalt",
"imagePlaceholder": "Ein Bild auswählen",
"textPlaceholder": "Inhaltstext eingeben",
"imageDesc": "Ein Bild auswählen, um diese Aktion auszulösen, wenn ein ähnliches Bild erkannt wird.",
"textDesc": "Einen Text eingeben, um diese Aktion auszulösen, wenn eine ähnliche Beschreibung eines verfolgten Objekts erkannt wird.",
"error": {
"required": "Inhalt ist erforderlich."
}
},
"threshold": {
"title": "Schwellenwert",
"error": {
"min": "Schwellenwert muss mindestens 0 sein",
"max": "Schwellenwert darf höchstens 1 sein"
}
},
"actions": {
"title": "Aktionen",
"desc": "Standardmäßig sendet Frigate eine MQTT-Nachricht für alle Trigger. Wähle eine zusätzliche Aktion aus, die ausgeführt werden soll, wenn dieser Trigger ausgelöst wird.",
"error": {
"min": "Mindesten eine Aktion muss ausgewählt sein."
}
}
}
},
"toast": {
"success": {
"createTrigger": "Auslöser {{name}} erfolgreich erstellt.",
"updateTrigger": "Auslöser {{name}} erfolgreich aktualisiert.",
"deleteTrigger": "Auslöser {{name}} erfolgreich gelöscht."
},
"error": {
"createTriggerFailed": "Auslöser konnte nicht erstellt werden: {{errorMessage}}",
"updateTriggerFailed": "Auslöser könnte nicht aktualisiert werden: {{errorMessage}}",
"deleteTriggerFailed": "Auslöser konnte nicht gelöscht werden: {{errorMessage}}"
}
}
} }
} }

View File

@ -3,6 +3,7 @@
"untilForTime": "Ως{{time}}", "untilForTime": "Ως{{time}}",
"untilForRestart": "Μέχρι να γίνει επανεκίννηση του Frigate.", "untilForRestart": "Μέχρι να γίνει επανεκίννηση του Frigate.",
"untilRestart": "Μέχρι να γίνει επανεκκίνηση", "untilRestart": "Μέχρι να γίνει επανεκκίνηση",
"justNow": "Μόλις τώρα" "justNow": "Μόλις τώρα",
"ago": "{{timeAgo}} Πριν"
} }
} }

View File

@ -1,6 +1,10 @@
{ {
"group": { "group": {
"add": "Προσθήκη ομάδας καμερών", "add": "Προσθήκη ομάδας καμερών",
"label": "Ομάδες καμερών" "label": "Ομάδες καμερών",
"edit": "Επεξεργασία ομάδας καμερών",
"delete": {
"label": "Διαγραφή ομάδας κάμερας"
}
} }
} }

View File

@ -1,3 +1,8 @@
{ {
"documentTitle": "Εξερευνήστε - Frigate" "documentTitle": "Εξερευνήστε - Frigate",
"generativeAI": "Παραγωγική τεχνητή νοημοσύνη",
"exploreMore": "Εξερευνήστε περισσότερα αντικείμενα {{label}}",
"exploreIsUnavailable": {
"title": "Η εξερεύνηση δεν είναι διαθέσιμη"
}
} }

View File

@ -1,5 +1,6 @@
{ {
"documentTitle": "Εξαγωγή - Frigate", "documentTitle": "Εξαγωγή - Frigate",
"search": "Αναζήτηση", "search": "Αναζήτηση",
"deleteExport": "Διαγραφή εξαγωγής" "deleteExport": "Διαγραφή εξαγωγής",
"noExports": "Δεν βρέθηκαν εξαγωγές"
} }

View File

@ -2,5 +2,6 @@
"documentTitle": "Ζωντανά - Frigate", "documentTitle": "Ζωντανά - Frigate",
"twoWayTalk": { "twoWayTalk": {
"enable": "Ενεργοποίηση αμφίδρομης επικοινωνίας" "enable": "Ενεργοποίηση αμφίδρομης επικοινωνίας"
} },
"documentTitle.withCamera": "{{camera}} - Live - Frigate"
} }

View File

@ -3,5 +3,6 @@
"savedSearches": "Αποθηκευμένες Αναζητήσεις", "savedSearches": "Αποθηκευμένες Αναζητήσεις",
"button": { "button": {
"clear": "Εκαθάρηση αναζήτησης" "clear": "Εκαθάρηση αναζήτησης"
} },
"searchFor": "Αναζήτηση {{inputValue}}"
} }

View File

@ -1,5 +1,7 @@
{ {
"documentTitle": { "documentTitle": {
"cameras": "Στατιστικά Καμερών - Frigate" "cameras": "Στατιστικά Καμερών - Frigate",
"storage": "Στατιστικά αποθήκευσης - Frigate",
"general": "Γενικά στατιστικά στοιχεία - Frigate"
} }
} }

View File

@ -262,5 +262,6 @@
"title": "404", "title": "404",
"desc": "Page not found" "desc": "Page not found"
}, },
"selectItem": "Select {{item}}" "selectItem": "Select {{item}}",
"readTheDocumentation": "Read the documentation"
} }

View File

@ -36,8 +36,7 @@
"audioIsUnavailable": "Audio is unavailable for this stream", "audioIsUnavailable": "Audio is unavailable for this stream",
"audio": { "audio": {
"tips": { "tips": {
"title": "Audio must be output from your camera and configured in go2rtc for this stream.", "title": "Audio must be output from your camera and configured in go2rtc for this stream."
"document": "Read the documentation "
} }
}, },
"stream": "Stream", "stream": "Stream",

View File

@ -69,8 +69,7 @@
"restreaming": { "restreaming": {
"disabled": "Restreaming is not enabled for this camera.", "disabled": "Restreaming is not enabled for this camera.",
"desc": { "desc": {
"title": "Set up go2rtc for additional live view options and audio for this camera.", "title": "Set up go2rtc for additional live view options and audio for this camera."
"readTheDocumentation": "Read the documentation"
} }
}, },
"showStats": { "showStats": {

View File

@ -34,5 +34,7 @@
"selected_one": "{{count}} selected", "selected_one": "{{count}} selected",
"selected_other": "{{count}} selected", "selected_other": "{{count}} selected",
"camera": "Camera", "camera": "Camera",
"detected": "detected" "detected": "detected",
"suspiciousActivity": "Suspicious Activity",
"threateningActivity": "Threatening Activity"
} }

View File

@ -24,8 +24,7 @@
"textTokenizer": "Text tokenizer" "textTokenizer": "Text tokenizer"
}, },
"tips": { "tips": {
"context": "You may want to reindex the embeddings of your tracked objects once the models are downloaded.", "context": "You may want to reindex the embeddings of your tracked objects once the models are downloaded."
"documentation": "Read the documentation"
}, },
"error": "An error has occurred. Check Frigate logs." "error": "An error has occurred. Check Frigate logs."
} }
@ -73,7 +72,6 @@
"offset": { "offset": {
"label": "Annotation Offset", "label": "Annotation Offset",
"desc": "This data comes from your camera's detect feed but is overlayed on images from the the record feed. It is unlikely that the two streams are perfectly in sync. As a result, the bounding box and the footage will not line up perfectly. However, the <code>annotation_offset</code> field can be used to adjust this.", "desc": "This data comes from your camera's detect feed but is overlayed on images from the the record feed. It is unlikely that the two streams are perfectly in sync. As a result, the bounding box and the footage will not line up perfectly. However, the <code>annotation_offset</code> field can be used to adjust this.",
"documentation": "Read the documentation ",
"millisecondsToOffset": "Milliseconds to offset detect annotations by. <em>Default: 0</em>", "millisecondsToOffset": "Milliseconds to offset detect annotations by. <em>Default: 0</em>",
"tips": "TIP: Imagine there is an event clip with a person walking from left to right. If the event timeline bounding box is consistently to the left of the person then the value should be decreased. Similarly, if a person is walking from left to right and the bounding box is consistently ahead of the person then the value should be increased.", "tips": "TIP: Imagine there is an event clip with a person walking from left to right. If the event timeline bounding box is consistently to the left of the person then the value should be decreased. Similarly, if a person is walking from left to right and the bounding box is consistently ahead of the person then the value should be increased.",
"toast": { "toast": {
@ -132,6 +130,9 @@
"label": "Top Score", "label": "Top Score",
"info": "The top score is the highest median score for the tracked object, so this may differ from the score shown on the search result thumbnail." "info": "The top score is the highest median score for the tracked object, so this may differ from the score shown on the search result thumbnail."
}, },
"score": {
"label": "Score"
},
"recognizedLicensePlate": "Recognized License Plate", "recognizedLicensePlate": "Recognized License Plate",
"estimatedSpeed": "Estimated Speed", "estimatedSpeed": "Estimated Speed",
"objects": "Objects", "objects": "Objects",
@ -213,5 +214,11 @@
"error": "Failed to delete tracked object: {{errorMessage}}" "error": "Failed to delete tracked object: {{errorMessage}}"
} }
} }
},
"aiAnalysis": {
"title": "AI Analysis"
},
"concerns": {
"label": "Concerns"
} }
} }

View File

@ -71,7 +71,6 @@
}, },
"nofaces": "No faces available", "nofaces": "No faces available",
"pixels": "{{area}}px", "pixels": "{{area}}px",
"readTheDocs": "Read the documentation",
"trainFaceAs": "Train Face as:", "trainFaceAs": "Train Face as:",
"trainFace": "Train Face", "trainFace": "Train Face",
"toast": { "toast": {

Some files were not shown because too many files have changed in this diff Show More