Compare commits

...

14 Commits

Author SHA1 Message Date
ivanshi1108
8cd8b21542
Merge acb17a7b50 into 0a8f499640 2026-01-18 15:52:18 +00:00
Josh Hawkins
0a8f499640
Miscellaneous fixes (0.17 beta) (#21683)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
* misc triggers tweaks

i18n fixes
fix toaster color
fix clicking on labels selecting incorrect checkbox

* update copilot instructions

* lpr docs tweaks

* add retry params to gemini

* i18n fix

* ensure users only see recognized plates from accessible cameras in explore

* ensure all zone filters are converted to pixels

zone-level filters were never converted from percentage area to pixels. RuntimeFilterConfig was only applied to filters at the camera level, not zone.filters.

Fixes https://github.com/blakeblackshear/frigate/discussions/21694

* add test for percentage based zone filters

* use export id for key instead of name

* update gemini docs
2026-01-18 06:36:27 -07:00
shizhicheng
acb17a7b50 Format code based on the results of Python Checks
x
2025-12-01 04:47:39 +00:00
ivanshi1108
7933a83a42
Update docs/docs/configuration/object_detectors.md
Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-11-24 23:04:19 +08:00
shizhicheng
2eef58aa1d Modify the description of AXERA in the documentation. 2025-11-24 07:04:42 +00:00
ivanshi1108
6659b7cb0f
Merge branch 'dev' into AXERA-axcl 2025-11-24 10:55:09 +08:00
shizhicheng
f134796913 format code with ruff 2025-11-24 02:42:04 +00:00
shizhicheng
b4abbd7d3b Modify the document based on review suggestions 2025-11-24 02:20:40 +00:00
shizhicheng
438df7d484 The model inference time has been changed to the time displayed on the Frigate UI 2025-11-16 22:22:38 +08:00
shizhicheng
e27a94ae0b Fix logical errors caused by code formatting 2025-11-11 05:54:19 +00:00
shizhicheng
1dee548dbc Modifications to the YOLOv9 object detection model:
The model is now dynamically downloaded to the cache directory.
Post-processing is now done using Frigate's built-in `post_process_yolo`.
Configuration in the relevant documentation has been updated.
2025-11-11 05:42:28 +00:00
shizhicheng
91e17e12b7 Change the default detection model to YOLOv9 2025-11-09 13:21:17 +00:00
ivanshi1108
bb45483e9e
Modify AXERA section from hardware.md
Modify AXERA section and related content from hardware documentation.
2025-10-28 09:54:00 +08:00
shizhicheng
7b4eaf2d10 Initial commit for AXERA AI accelerators 2025-10-24 09:03:13 +00:00
21 changed files with 472 additions and 30 deletions

View File

@ -1,2 +1,3 @@
Never write strings in the frontend directly, always write to and reference the relevant translations file. - For Frigate NVR, never write strings in the frontend directly. Since the project uses `react-i18next`, use `t()` and write the English string in the relevant translations file in `web/public/locales/en`.
Always conform new and refactored code to the existing coding style in the project. - Always conform new and refactored code to the existing coding style in the project.
- Always have a way to test your work and confirm your changes. When running backend tests, use `python3 -u -m unittest`.

View File

@ -224,3 +224,29 @@ jobs:
sources: | sources: |
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64 ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi
axera_build:
runs-on: ubuntu-22.04
name: AXERA Build
needs:
- amd64_build
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Axera build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: axcl
files: docker/axcl/axcl.hcl
set: |
axcl.tags=${{ steps.setup.outputs.image-name }}-axcl
*.cache-from=type=gha

55
docker/axcl/Dockerfile Normal file
View File

@ -0,0 +1,55 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM frigate AS frigate-axcl
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
# Install axpyengine
RUN wget https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3.rc1/axengine-0.1.3-py3-none-any.whl -O /axengine-0.1.3-py3-none-any.whl
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ /axengine-0.1.3-py3-none-any.whl \
&& rm /axengine-0.1.3-py3-none-any.whl
# Install axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
echo "Installing x86_64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
else \
echo "Installing aarch64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
fi
RUN mkdir /unpack_axcl && \
dpkg-deb -x /axcl.deb /unpack_axcl && \
cp -R /unpack_axcl/usr/bin/axcl /usr/bin/ && \
cp -R /unpack_axcl/usr/lib/axcl /usr/lib/ && \
rm -rf /unpack_axcl /axcl.deb
# Install axcl ffmpeg
RUN mkdir -p /usr/lib/ffmpeg/axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-x64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-x64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
else \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-aarch64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-aarch64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
fi
RUN chmod +x /usr/lib/ffmpeg/axcl/ffmpeg /usr/lib/ffmpeg/axcl/ffprobe
# Set ldconfig path
RUN echo "/usr/lib/axcl" > /etc/ld.so.conf.d/ax.conf
# Set env
ENV PATH="$PATH:/usr/bin/axcl"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/axcl"
ENTRYPOINT ["sh", "-c", "ldconfig && exec /init"]

13
docker/axcl/axcl.hcl Normal file
View File

@ -0,0 +1,13 @@
target frigate {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64", "linux/arm64"]
target = "frigate"
}
target axcl {
dockerfile = "docker/axcl/Dockerfile"
contexts = {
frigate = "target:frigate",
}
platforms = ["linux/amd64", "linux/arm64"]
}

15
docker/axcl/axcl.mk Normal file
View File

@ -0,0 +1,15 @@
BOARDS += axcl
local-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=frigate:latest-axcl \
--load
build-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl
push-axcl: build-axcl
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl \
--push

View File

@ -0,0 +1,83 @@
#!/bin/bash
# Update package list and install dependencies
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget pciutils kmod udev
# Check if gcc-12 is needed
current_gcc_version=$(gcc --version | head -n1 | awk '{print $NF}')
gcc_major_version=$(echo $current_gcc_version | cut -d'.' -f1)
if [[ $gcc_major_version -lt 12 ]]; then
echo "Current GCC version ($current_gcc_version) is lower than 12, installing gcc-12..."
sudo apt-get install -y gcc-12
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12
echo "GCC-12 installed and set as default"
else
echo "Current GCC version ($current_gcc_version) is sufficient, skipping GCC installation"
fi
# Determine architecture
arch=$(uname -m)
download_url=""
if [[ $arch == "x86_64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
elif [[ $arch == "aarch64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
else
echo "Unsupported architecture: $arch"
exit 1
fi
# Download AXCL driver
echo "Downloading AXCL driver for $arch..."
wget "$download_url" -O "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to download AXCL driver"
exit 1
fi
# Install AXCL driver
echo "Installing AXCL driver..."
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to install AXCL driver, attempting to fix dependencies..."
sudo apt-get install -f -y
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "AXCL driver installation failed"
exit 1
fi
fi
# Update environment
echo "Updating environment..."
source /etc/profile
# Verify installation
echo "Verifying AXCL installation..."
if command -v axcl-smi &> /dev/null; then
echo "AXCL driver detected, checking AI accelerator status..."
axcl_output=$(axcl-smi 2>&1)
axcl_exit_code=$?
echo "$axcl_output"
if [ $axcl_exit_code -eq 0 ]; then
echo "AXCL driver installation completed successfully!"
else
echo "AXCL driver installed but no AI accelerator detected or communication failed."
echo "Please check if the AI accelerator is properly connected and powered on."
exit 1
fi
else
echo "axcl-smi command not found. AXCL driver installation may have failed."
exit 1
fi

View File

@ -66,8 +66,6 @@ Some models are labeled as **hybrid** (capable of both thinking and instruct tas
**Recommendation:** **Recommendation:**
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model providers documentation or model library for guidance on the correct model variant to use. Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model providers documentation or model library for guidance on the correct model variant to use.
### Supported Models ### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/search?c=vision). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull qwen3-vl:2b-instruct` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag. You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/search?c=vision). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull qwen3-vl:2b-instruct` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
@ -93,7 +91,7 @@ genai:
## Google Gemini ## Google Gemini
Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage. Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.
### Supported Models ### Supported Models
@ -114,7 +112,7 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
genai: genai:
provider: gemini provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}" api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-2.0-flash model: gemini-2.5-flash
``` ```
:::note :::note

View File

@ -68,8 +68,8 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
- **`device`**: Device to use to run license plate detection _and_ recognition models. - **`device`**: Device to use to run license plate detection _and_ recognition models.
- Default: `CPU` - Default: `None`
- This can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. However, for users who run a model that detects `license_plate` natively, there is little to no performance gain reported with running LPR on GPU compared to the CPU. - This is auto-selected by Frigate and can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. However, for users who run a model that detects `license_plate` natively, there is little to no performance gain reported with running LPR on GPU compared to the CPU.
- **`model_size`**: The size of the model used to identify regions of text on plates. - **`model_size`**: The size of the model used to identify regions of text on plates.
- Default: `small` - Default: `small`
- This can be `small` or `large`. - This can be `small` or `large`.
@ -432,6 +432,6 @@ If you are using a model that natively detects `license_plate`, add an _object m
If you are not using a model that natively detects `license_plate` or you are using dedicated LPR camera mode, only a _motion mask_ over your text is required. If you are not using a model that natively detects `license_plate` or you are using dedicated LPR camera mode, only a _motion mask_ over your text is required.
### I see "Error running ... model" in my logs. How can I fix this? ### I see "Error running ... model" in my logs, or my inference time is very high. How can I fix this?
This usually happens when your GPU is unable to compile or use one of the LPR models. Set your `device` to `CPU` and try again. GPU acceleration only provides a slight performance increase, and the models are lightweight enough to run without issue on most CPUs. This usually happens when your GPU is unable to compile or use one of the LPR models. Set your `device` to `CPU` and try again. GPU acceleration only provides a slight performance increase, and the models are lightweight enough to run without issue on most CPUs.

View File

@ -49,6 +49,11 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs. - [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**AXERA** <CommunityBadge />
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
**For Testing** **For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. - [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@ -1474,6 +1479,41 @@ model:
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
``` ```
## AXERA
Hardware accelerated object detection is supported on the following SoCs:
- AX650N
- AX8850N
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
### Configuration
When configuring the AXEngine detector, you have to specify the model name.
#### yolov9
A yolov9 model is provided in the container at /axmodels and is used by this detector type by default.
Use the model configuration shown below when using the axengine detector with the default axmodel:
```yaml
detectors:
axengine:
type: axengine
model:
path: frigate-yolov9-tiny
model_type: yolo-generic
width: 320
height: 320
tensor_format: bgr
labelmap_path: /labelmap/coco-80.txt
```
# Models # Models
Some model types are not included in Frigate by default. Some model types are not included in Frigate by default.

View File

@ -106,6 +106,10 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection. - [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
**AXERA** <CommunityBadge />
- [AXEngine](#axera): axera models can run on AXERA NPUs via AXEngine, delivering highly efficient object detection.
::: :::
### Hailo-8 ### Hailo-8
@ -295,6 +299,14 @@ The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms fo
| ssd mobilenet | ~ 25 ms | | ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms | | yolov5m | ~ 118 ms |
### AXERA
- **AXEngine** Default model is **yolov9**
| Name | AXERA AX650N/AX8850N Inference Time |
| ---------------- | ----------------------------------- |
| yolov9-tiny | ~ 4 ms |
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
@ -315,4 +327,4 @@ Basically - When you increase the resolution and/or the frame rate of the stream
YES! The Coral does not help with decoding video streams. YES! The Coral does not help with decoding video streams.
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work. Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.

View File

@ -384,6 +384,42 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics). Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
### AXERA
<details>
<summary>AXERA accelerators</summary>
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
#### Installation
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
Follow these steps for installation:
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
3. Run the script with `./user_installation.sh`
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/axcl_host
- /dev/ax_mmb_dev
- /dev/msg_userdev
```
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
</details>
## Docker ## Docker
Running through Docker with Docker Compose is the recommended install method. Running through Docker with Docker Compose is the recommended install method.

View File

@ -23,7 +23,12 @@ from markupsafe import escape
from peewee import SQL, fn, operator from peewee import SQL, fn, operator
from pydantic import ValidationError from pydantic import ValidationError
from frigate.api.auth import allow_any_authenticated, allow_public, require_role from frigate.api.auth import (
allow_any_authenticated,
allow_public,
get_allowed_cameras_for_filter,
require_role,
)
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
@ -687,13 +692,19 @@ def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
@router.get( @router.get(
"/recognized_license_plates", dependencies=[Depends(allow_any_authenticated())] "/recognized_license_plates", dependencies=[Depends(allow_any_authenticated())]
) )
def get_recognized_license_plates(split_joined: Optional[int] = None): def get_recognized_license_plates(
split_joined: Optional[int] = None,
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
try: try:
query = ( query = (
Event.select( Event.select(
SQL("json_extract(data, '$.recognized_license_plate') AS plate") SQL("json_extract(data, '$.recognized_license_plate') AS plate")
) )
.where(SQL("json_extract(data, '$.recognized_license_plate') IS NOT NULL")) .where(
(SQL("json_extract(data, '$.recognized_license_plate') IS NOT NULL"))
& (Event.camera << allowed_cameras)
)
.distinct() .distinct()
) )
recognized_license_plates = [row[0] for row in query.tuples()] recognized_license_plates = [row[0] for row in query.tuples()]

View File

@ -662,6 +662,13 @@ class FrigateConfig(FrigateBaseModel):
# generate zone contours # generate zone contours
if len(camera_config.zones) > 0: if len(camera_config.zones) > 0:
for zone in camera_config.zones.values(): for zone in camera_config.zones.values():
if zone.filters:
for object_name, filter_config in zone.filters.items():
zone.filters[object_name] = RuntimeFilterConfig(
frame_shape=camera_config.frame_shape,
**filter_config.model_dump(exclude_unset=True),
)
zone.generate_contour(camera_config.frame_shape) zone.generate_contour(camera_config.frame_shape)
# Set live view stream if none is set # Set live view stream if none is set

View File

@ -0,0 +1,86 @@
import logging
import os.path
import re
import urllib.request
from typing import Literal
import axengine as axe
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolo
logger = logging.getLogger(__name__)
DETECTOR_KEY = "axengine"
supported_models = {
ModelTypeEnum.yologeneric: "frigate-yolov9-.*$",
}
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "axengine_cache/")
class AxengineDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class Axengine(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: AxengineDetectorConfig):
logger.info("__init__ axengine")
super().__init__(config)
self.height = config.model.height
self.width = config.model.width
model_path = config.model.path or "frigate-yolov9-tiny"
model_props = self.parse_model_input(model_path)
self.session = axe.InferenceSession(model_props["path"])
def __del__(self):
pass
def parse_model_input(self, model_path):
model_props = {}
model_props["preset"] = True
model_matched = False
for model_type, pattern in supported_models.items():
if re.match(pattern, model_path):
model_matched = True
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + ".axmodel"
model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"])
else:
supported_models_str = ", ".join(model[1:-1] for model in supported_models)
raise Exception(
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
)
return model_props
def download_model(self, filename):
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
urllib.request.urlretrieve(
f"{GITHUB_ENDPOINT}/ivanshi1108/assets/releases/download/v0.16.2/{filename}",
model_cache_dir + filename,
)
def detect_raw(self, tensor_input):
results = None
results = self.session.run(None, {"images": tensor_input})
if self.detector_config.model.model_type == ModelTypeEnum.yologeneric:
return post_process_yolo(results, self.width, self.height)
else:
raise ValueError(
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
)

View File

@ -24,6 +24,14 @@ class GeminiClient(GenAIClient):
http_options_dict = { http_options_dict = {
"api_version": "v1", "api_version": "v1",
"timeout": int(self.timeout * 1000), # requires milliseconds "timeout": int(self.timeout * 1000), # requires milliseconds
"retry_options": types.HttpRetryOptions(
attempts=3,
initial_delay=1.0,
max_delay=60.0,
exp_base=2.0,
jitter=1.0,
http_status_codes=[429, 500, 502, 503, 504],
),
} }
if isinstance(self.genai_config.provider_options, dict): if isinstance(self.genai_config.provider_options, dict):

View File

@ -632,6 +632,49 @@ class TestConfig(unittest.TestCase):
) )
assert frigate_config.cameras["back"].zones["test"].color != (0, 0, 0) assert frigate_config.cameras["back"].zones["test"].color != (0, 0, 0)
def test_zone_filter_area_percent_converts_to_pixels(self):
config = {
"mqtt": {"host": "mqtt"},
"record": {
"alerts": {
"retain": {
"days": 20,
}
}
},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"zones": {
"notification": {
"coordinates": "0.03,1,0.025,0,0.626,0,0.643,1",
"objects": ["person"],
"filters": {"person": {"min_area": 0.1}},
}
},
}
},
}
frigate_config = FrigateConfig(**config)
expected_min_area = int(1080 * 1920 * 0.1)
assert (
frigate_config.cameras["back"]
.zones["notification"]
.filters["person"]
.min_area
== expected_min_area
)
def test_zone_relative_matches_explicit(self): def test_zone_relative_matches_explicit(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},

View File

@ -3,6 +3,7 @@
"untilForTime": "Until {{time}}", "untilForTime": "Until {{time}}",
"untilForRestart": "Until Frigate restarts.", "untilForRestart": "Until Frigate restarts.",
"untilRestart": "Until restart", "untilRestart": "Until restart",
"never": "Never",
"ago": "{{timeAgo}} ago", "ago": "{{timeAgo}} ago",
"justNow": "Just now", "justNow": "Just now",
"today": "Today", "today": "Today",

View File

@ -268,7 +268,7 @@ export default function CreateTriggerDialog({
<FormItem className="flex flex-row items-center justify-between"> <FormItem className="flex flex-row items-center justify-between">
<div className="space-y-0.5"> <div className="space-y-0.5">
<FormLabel className="text-base"> <FormLabel className="text-base">
{t("enabled", { ns: "common" })} {t("button.enabled", { ns: "common" })}
</FormLabel> </FormLabel>
<div className="text-sm text-muted-foreground"> <div className="text-sm text-muted-foreground">
{t("triggers.dialog.form.enabled.description")} {t("triggers.dialog.form.enabled.description")}
@ -394,7 +394,10 @@ export default function CreateTriggerDialog({
</FormLabel> </FormLabel>
<div className="space-y-2"> <div className="space-y-2">
{availableActions.map((action) => ( {availableActions.map((action) => (
<div key={action} className="flex items-center space-x-2"> <label
key={action}
className="flex cursor-pointer items-center space-x-2"
>
<FormControl> <FormControl>
<Checkbox <Checkbox
checked={form checked={form
@ -416,10 +419,10 @@ export default function CreateTriggerDialog({
}} }}
/> />
</FormControl> </FormControl>
<FormLabel className="text-sm font-normal"> <span className="text-sm font-normal">
{t(`triggers.actions.${action}`)} {t(`triggers.actions.${action}`)}
</FormLabel> </span>
</div> </label>
))} ))}
</div> </div>
<FormDescription> <FormDescription>

View File

@ -142,7 +142,10 @@ export default function Step3ThresholdAndActions({
<FormLabel>{t("triggers.dialog.form.actions.title")}</FormLabel> <FormLabel>{t("triggers.dialog.form.actions.title")}</FormLabel>
<div className="space-y-2"> <div className="space-y-2">
{availableActions.map((action) => ( {availableActions.map((action) => (
<div key={action} className="flex items-center space-x-2"> <label
key={action}
className="flex cursor-pointer items-center space-x-2"
>
<FormControl> <FormControl>
<Checkbox <Checkbox
checked={form checked={form
@ -164,10 +167,10 @@ export default function Step3ThresholdAndActions({
}} }}
/> />
</FormControl> </FormControl>
<FormLabel className="text-sm font-normal"> <span className="text-sm font-normal">
{t(`triggers.actions.${action}`)} {t(`triggers.actions.${action}`)}
</FormLabel> </span>
</div> </label>
))} ))}
</div> </div>
<FormDescription> <FormDescription>
@ -197,9 +200,7 @@ export default function Step3ThresholdAndActions({
{isLoading && <ActivityIndicator className="mr-2 size-5" />} {isLoading && <ActivityIndicator className="mr-2 size-5" />}
{isLoading {isLoading
? t("button.saving", { ns: "common" }) ? t("button.saving", { ns: "common" })
: t("triggers.dialog.form.save", { : t("button.save", { ns: "common" })}
defaultValue: "Save Trigger",
})}
</Button> </Button>
</div> </div>
</form> </form>

View File

@ -206,7 +206,7 @@ function Exports() {
> >
{Object.values(exports).map((item) => ( {Object.values(exports).map((item) => (
<ExportCard <ExportCard
key={item.name} key={item.id}
className={ className={
search == "" || filteredExports.includes(item) ? "" : "hidden" search == "" || filteredExports.includes(item) ? "" : "hidden"
} }

View File

@ -1,6 +1,7 @@
import { useCallback, useEffect, useMemo, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { Trans, useTranslation } from "react-i18next"; import { Trans, useTranslation } from "react-i18next";
import { Toaster, toast } from "sonner"; import { toast } from "sonner";
import { Toaster } from "@/components/ui/sonner";
import useSWR from "swr"; import useSWR from "swr";
import axios from "axios"; import axios from "axios";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
@ -598,7 +599,7 @@ export default function TriggerView({
date_style: "medium", date_style: "medium",
}, },
) )
: "Never"} : t("never", { ns: "common" })}
</span> </span>
{trigger_status?.triggers[trigger.name] {trigger_status?.triggers[trigger.name]
?.triggering_event_id && ( ?.triggering_event_id && (
@ -663,7 +664,9 @@ export default function TriggerView({
<TableHeader className="sticky top-0 bg-muted/50"> <TableHeader className="sticky top-0 bg-muted/50">
<TableRow> <TableRow>
<TableHead className="w-4"></TableHead> <TableHead className="w-4"></TableHead>
<TableHead>{t("name", { ns: "common" })}</TableHead> <TableHead>
{t("name", { ns: "triggers.table.name" })}
</TableHead>
<TableHead>{t("triggers.table.type")}</TableHead> <TableHead>{t("triggers.table.type")}</TableHead>
<TableHead> <TableHead>
{t("triggers.table.lastTriggered")} {t("triggers.table.lastTriggered")}
@ -759,7 +762,7 @@ export default function TriggerView({
date_style: "medium", date_style: "medium",
}, },
) )
: "Never"} : t("time.never", { ns: "common" })}
</span> </span>
{trigger_status?.triggers[trigger.name] {trigger_status?.triggers[trigger.name]
?.triggering_event_id && ( ?.triggering_event_id && (