Compare commits

...

17 Commits

Author SHA1 Message Date
mathieu-d
c4c3540a3b
Merge 0f3dd097ec into b6fd86a066 2026-05-03 14:45:52 +02:00
Pedro Diogo
b6fd86a066
feat(genai): add api_key auth support for ollama cloud (#23096)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
- Add _auth_headers() helper to pass Bearer token when api_key is set
- Wire headers into all Ollama client instantiations (sync + async)
- Update docs with Ollama Cloud direct connection example and yaml config
2026-05-02 17:55:25 -06:00
Josh Hawkins
147cd5cc2b
Miscellaneous fixes (#23092)
* lpr fixes

- remove duplicate code
- fix min_area check for non frigate+ code path
- move log outside of non frigate+ code path

* only show chat link when a genai provider is configured with the chat role

* respect ui.timezone when generating fallback export names

* reapply radix pointer events fix to call sites that use navigate()

* formatting

* fall back to prior preview frame for short export thumbnails

* fix typing

* fix e2e test for chat navigation

* batch annotation offset to seek atomically and throttle slider drag

* add debug replay loading toast for explore actions

* Improve handling of webpush missing shortSummary

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2026-05-02 16:35:42 -06:00
Blake Blackshear
6a2b914b10 Merge remote-tracking branch 'origin/master' into dev
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
2026-05-02 10:08:36 -05:00
Josh Hawkins
2cfb530dbf
fix yolonas colab notebook (#22936)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
2026-04-21 11:08:10 -06:00
Josh Hawkins
81b0d94793
fix broken docs links with hash fragments that resolve wrong on reload (#22925) 2026-04-18 16:50:28 -06:00
matieu-d
0f3dd097ec Prepare for pull request. Remove specific configurations 2026-04-17 22:25:46 +02:00
matieu-d
2a4d7e4766 Prepare for pull request. Remove specific configurations 2026-04-14 23:14:31 +02:00
matieu-d
46415ffeb5 Add Hailo-10H detector configuration to global.json 2026-04-14 22:54:58 +02:00
matieu-d
e35ab0b8a1 Add support of temperature reading for hailo 10H 2026-04-14 22:54:58 +02:00
matieu-d
837373547d H10 support patch 2026-04-14 22:54:58 +02:00
Mark
67837f61d0
Update restream.md docs and clarify output config (#22860)
* Update restream.md

Clarified that exec output must be put in curly braces ONLY in case of RTSP, not pipe, as per go2rtc docs. Added additional example use case for exec function (rpi5b cam set-up).

* Cleanup

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2026-04-14 14:00:18 -05:00
Josh Hawkins
58c93c2e9e
clarify emergency cleanup (#22864) 2026-04-13 07:07:09 -06:00
Abinila Siva
6b71feffab
Memryx docs update (#22746)
* docs: update MemryX documentation section

* docs: update MemryX documentation section
2026-04-03 11:32:32 -06:00
Abinila Siva
1c26bc289e
docs: update MemryX docs (#22712)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
2026-03-31 12:22:23 -05:00
Josh Hawkins
0371b60c71
limit access to admin-only websocket topics for viewer users (#22710) 2026-03-31 08:51:55 -05:00
Nicolas Mowen
01392e03ac
Update docs for DEIMv2 support (#22598) 2026-03-23 16:16:54 -06:00
34 changed files with 1356 additions and 208 deletions

View File

@ -21,6 +21,13 @@ local: version
--tag frigate:latest \ --tag frigate:latest \
--load --load
localh10: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg HAILORT_VERSION=5.1.1 \
--build-arg HAILORT_GIT_REPO=mathieu-d/hailort \
--tag frigate:latest \
--load
debug: version debug: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \ docker buildx build --target=frigate --file docker/main/Dockerfile . \
--build-arg DEBUG=true \ --build-arg DEBUG=true \

View File

@ -12,6 +12,11 @@ services:
build: build:
context: . context: .
dockerfile: docker/main/Dockerfile dockerfile: docker/main/Dockerfile
# Use args to specify hailort version and location
# args:
# HAILORT_VERSION: "5.1.1"
# HAILORT_GIT_REPO: "mathieu-d/hailort"
# Use target devcontainer-trt for TensorRT dev # Use target devcontainer-trt for TensorRT dev
target: devcontainer target: devcontainer
cache_from: cache_from:
@ -29,6 +34,7 @@ services:
# devices: # devices:
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB # - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware # - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
volumes: volumes:
- .:/workspace/frigate:cached - .:/workspace/frigate:cached
- ./web/dist:/opt/frigate/web:cached - ./web/dist:/opt/frigate/web:cached

View File

@ -0,0 +1,7 @@
#!/bin/bash
# Update package list and install hailo driver version 5.1.1 for Hailo-10H
sudo apt update
sudo apt install -y hailo-h10-all=5.1.1

View File

@ -157,6 +157,8 @@ FROM base AS wheels
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
ARG TARGETARCH ARG TARGETARCH
ARG DEBUG=false ARG DEBUG=false
ARG HAILORT_VERSION=4.21.0
ARG HAILORT_GIT_REPO=frigate-nvr/hailort
# Use a separate container to build wheels to prevent build dependencies in final image # Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \ RUN apt-get -qq update \

View File

@ -2,13 +2,11 @@
set -euxo pipefail set -euxo pipefail
hailo_version="4.21.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64" arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64" arch="aarch64"
fi fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf - wget -qO- "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl" wget -P /wheels/ "https://github.com/${HAILORT_GIT_REPO}/releases/download/v${HAILORT_VERSION}/hailort-${HAILORT_VERSION}-cp311-cp311-linux_${arch}.whl"

View File

@ -19,7 +19,7 @@ Face recognition requires a one-time internet connection to download detection a
### Face Detection ### Face Detection
When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient. When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/index.md#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track. When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.

View File

@ -201,7 +201,7 @@ Cloud Generative AI providers require an active internet connection to send imag
### Ollama Cloud ### Ollama Cloud
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud). Ollama also supports [cloud models](https://ollama.com/cloud), where model inference is performed in the cloud. You can connect directly to Ollama Cloud by setting `base_url` to `https://ollama.com` and providing an API key. Alternatively, you can run Ollama locally and use a cloud model name so your local instance forwards requests to the cloud. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
#### Configuration #### Configuration
@ -210,7 +210,8 @@ Ollama also supports [cloud models](https://ollama.com/cloud), where your local
1. Navigate to <NavPath path="Settings > Enrichments > Generative AI" />. 1. Navigate to <NavPath path="Settings > Enrichments > Generative AI" />.
- Set **Provider** to `ollama` - Set **Provider** to `ollama`
- Set **Base URL** to your local Ollama address (e.g., `http://localhost:11434`) - Set **Base URL** to your local Ollama address (e.g., `http://localhost:11434`) or `https://ollama.com` for direct cloud inference
- Set **API key** if required by your endpoint (e.g., when using `https://ollama.com`)
- Set **Model** to the cloud model name - Set **Model** to the cloud model name
</TabItem> </TabItem>
@ -223,6 +224,16 @@ genai:
model: cloud-model-name model: cloud-model-name
``` ```
or when using Ollama Cloud directly
```yaml
genai:
provider: ollama
base_url: https://ollama.com
model: cloud-model-name
api_key: your-api-key
```
</TabItem> </TabItem>
</ConfigTabs> </ConfigTabs>

View File

@ -494,7 +494,7 @@ detectors:
| [YOLO-NAS](#yolo-nas) | ✅ | ✅ | | | [YOLO-NAS](#yolo-nas) | ✅ | ✅ | |
| [MobileNet v2](#ssdlite-mobilenet-v2) | ✅ | ✅ | Fast and lightweight model, less accurate than larger models | | [MobileNet v2](#ssdlite-mobilenet-v2) | ✅ | ✅ | Fast and lightweight model, less accurate than larger models |
| [YOLOX](#yolox) | ✅ | ? | | | [YOLOX](#yolox) | ✅ | ? | |
| [D-FINE](#d-fine) | ❌ | ❌ | | | [D-FINE / DEIMv2](#d-fine--deimv2) | ❌ | ❌ | |
#### SSDLite MobileNet v2 #### SSDLite MobileNet v2
@ -710,13 +710,13 @@ model:
</details> </details>
#### D-FINE #### D-FINE / DEIMv2
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. [D-FINE](https://github.com/Peterande/D-FINE) and [DEIMv2](https://github.com/Intellindust-AI-Lab/DEIMv2) are DETR based models that share the same ONNX input/output format. The ONNX exported models are supported, but not included by default. See the models section for downloading [D-FINE](#downloading-d-fine-model) or [DEIMv2](#downloading-deimv2-model) for use in Frigate.
:::warning :::warning
Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model Currently D-FINE / DEIMv2 models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model
::: :::
@ -766,6 +766,31 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
</details> </details>
<details>
<summary>DEIMv2 Setup & Config</summary>
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml
detectors:
ov:
type: openvino
device: CPU
model:
model_type: dfine
width: 640
height: 640
input_tensor: nchw
input_dtype: float
path: /config/model_cache/deimv2_hgnetv2_n.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
</details>
## Apple Silicon detector ## Apple Silicon detector
The NPU in Apple Silicon can't be accessed from within a container, so the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) must first be setup. It is recommended to use the Frigate docker image with `-standard-arm64` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-standard-arm64`. The NPU in Apple Silicon can't be accessed from within a container, so the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) must first be setup. It is recommended to use the Frigate docker image with `-standard-arm64` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-standard-arm64`.
@ -947,7 +972,7 @@ The AMD GPU kernel is known problematic especially when converting models to mxr
See [ONNX supported models](#supported-models) for supported models, there are some caveats: See [ONNX supported models](#supported-models) for supported models, there are some caveats:
- D-FINE models are not supported - D-FINE / DEIMv2 models are not supported
- YOLO-NAS models are known to not run well on integrated GPUs - YOLO-NAS models are known to not run well on integrated GPUs
## ONNX ## ONNX
@ -1003,7 +1028,7 @@ detectors:
| [RF-DETR](#rf-detr) | ✅ | ❌ | Supports CUDA Graphs for optimal Nvidia performance | | [RF-DETR](#rf-detr) | ✅ | ❌ | Supports CUDA Graphs for optimal Nvidia performance |
| [YOLO-NAS](#yolo-nas-1) | ⚠️ | ⚠️ | Not supported by CUDA Graphs | | [YOLO-NAS](#yolo-nas-1) | ⚠️ | ⚠️ | Not supported by CUDA Graphs |
| [YOLOX](#yolox-1) | ✅ | ✅ | Supports CUDA Graphs for optimal Nvidia performance | | [YOLOX](#yolox-1) | ✅ | ✅ | Supports CUDA Graphs for optimal Nvidia performance |
| [D-FINE](#d-fine) | ⚠️ | ❌ | Not supported by CUDA Graphs | | [D-FINE / DEIMv2](#d-fine--deimv2-1) | ⚠️ | ❌ | Not supported by CUDA Graphs |
There is no default model provided, the following formats are supported: There is no default model provided, the following formats are supported:
@ -1215,9 +1240,9 @@ model:
</details> </details>
#### D-FINE #### D-FINE / DEIMv2
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. [D-FINE](https://github.com/Peterande/D-FINE) and [DEIMv2](https://github.com/Intellindust-AI-Lab/DEIMv2) are DETR based models that share the same ONNX input/output format. The ONNX exported models are supported, but not included by default. See the models section for downloading [D-FINE](#downloading-d-fine-model) or [DEIMv2](#downloading-deimv2-model) for use in Frigate.
<details> <details>
<summary>D-FINE Setup & Config</summary> <summary>D-FINE Setup & Config</summary>
@ -1262,6 +1287,28 @@ model:
</details> </details>
<details>
<summary>DEIMv2 Setup & Config</summary>
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: onnx
model:
model_type: dfine
width: 640
height: 640
input_tensor: nchw
input_dtype: float
path: /config/model_cache/deimv2_hgnetv2_n.onnx
labelmap_path: /labelmap/coco-80.txt
```
</details>
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## CPU Detector (not recommended) ## CPU Detector (not recommended)
@ -1405,7 +1452,7 @@ MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the
#### YOLO-NAS #### YOLO-NAS
The [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) model included in this detector is downloaded from the [Models Section](#downloading-yolo-nas-model) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage). The [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) model included in this detector is downloaded from the [Models Section](#downloading-yolo-nas-model) and compiled to DFP with [mx_nc](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage).
**Note:** The default model for the MemryX detector is YOLO-NAS 320x320. **Note:** The default model for the MemryX detector is YOLO-NAS 320x320.
@ -1459,7 +1506,7 @@ model:
#### YOLOv9 #### YOLOv9
The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage). The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage).
##### Configuration ##### Configuration
@ -1601,19 +1648,39 @@ model:
#### Using a Custom Model #### Using a Custom Model
To use your own model: To use your own custom model, first compile it into a [.dfp](https://developer.memryx.com/2p1/specs/files.html#dataflow-program) file, which is the format used by MemryX.
1. Package your compiled model into a `.zip` file. #### Compile the Model
2. The `.zip` must contain the compiled `.dfp` file. Custom models must be compiled using **MemryX SDK 2.1**.
3. Depending on the model, the compiler may also generate a cropped post-processing network. If present, it will be named with the suffix `_post.onnx`. Before compiling your model, install the MemryX Neural Compiler tools from the
[Install Tools](https://developer.memryx.com/2p1/get_started/install_tools.html) page on the **host**.
4. Bind-mount the `.zip` file into the container and specify its path using `model.path` in your config. > **Note:** It is recommended to compile the model on the host machine, or on another separate machine, rather than inside the Frigate Docker container. Installing the compiler inside Docker may conflict with container packages. It is recommended to create a Python virtual environment and install the compiler there.
5. Update the `labelmap_path` to match your custom model's labels. Once the SDK 2.1 environment is set up, follow the
[MemryX Compiler](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage) documentation to compile your model.
For detailed instructions on compiling models, refer to the [MemryX Compiler](https://developer.memryx.com/tools/neural_compiler.html#usage) docs and [Tutorials](https://developer.memryx.com/tutorials/tutorials.html). Example:
```bash
mx_nc -m yolonas.onnx -c 4 --autocrop -v --dfp_fname yolonas.dfp
```
For detailed instructions on compiling models, refer to the [MemryX Compiler](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage) docs and [Tutorials](https://developer.memryx.com/2p1/tutorials/tutorials.html).
#### Package the Compiled Model
1. Package your compiled model into a `.zip` file.
2. The `.zip` file must contain the compiled `.dfp` file.
3. Depending on the model, the compiler may also generate a cropped post-processing network. If present, it will be named with the suffix `_post.onnx`.
4. Bind-mount the `.zip` file into the container and specify its path using `model.path` in your config.
5. Update `labelmap_path` to match your custom model's labels.
```yaml ```yaml
# The detector automatically selects the default model if nothing is provided in the config. # The detector automatically selects the default model if nothing is provided in the config.
@ -2274,6 +2341,49 @@ COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL
EOF EOF
``` ```
### Downloading DEIMv2 Model
[DEIMv2](https://github.com/Intellindust-AI-Lab/DEIMv2) can be exported as ONNX by running the command below. Pretrained weights are available on Hugging Face for two backbone families:
- **HGNetv2** (smaller/faster): `atto`, `femto`, `pico`, `n`
- **DINOv3** (larger/more accurate): `s`, `m`, `l`, `x`
Set `BACKBONE` and `MODEL_SIZE` in the first line to match your desired variant. Hugging Face model names use uppercase (e.g. `HGNetv2_N`, `DINOv3_S`), while config files use lowercase (e.g. `hgnetv2_n`, `dinov3_s`).
```sh
docker build . --rm --build-arg BACKBONE=hgnetv2 --build-arg MODEL_SIZE=n --output . -f- <<'EOF'
FROM python:3.11-slim AS build
RUN apt-get update && apt-get install --no-install-recommends -y git libgl1 libglib2.0-0 && rm -rf /var/lib/apt/lists/*
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /deimv2
RUN git clone https://github.com/Intellindust-AI-Lab/DEIMv2.git .
# Install CPU-only PyTorch first to avoid pulling CUDA variant
RUN uv pip install --no-cache --system torch torchvision --index-url https://download.pytorch.org/whl/cpu
RUN uv pip install --no-cache --system -r requirements.txt
RUN uv pip install --no-cache --system onnx safetensors huggingface_hub
RUN mkdir -p output
ARG BACKBONE
ARG MODEL_SIZE
# Download from Hugging Face and convert safetensors to pth
RUN python3 -c "\
from huggingface_hub import hf_hub_download; \
from safetensors.torch import load_file; \
import torch; \
backbone = '${BACKBONE}'.replace('hgnetv2','HGNetv2').replace('dinov3','DINOv3'); \
size = '${MODEL_SIZE}'.upper(); \
st = load_file(hf_hub_download('Intellindust/DEIMv2_' + backbone + '_' + size + '_COCO', 'model.safetensors')); \
torch.save({'model': st}, 'output/deimv2.pth')"
RUN sed -i "s/data = torch.rand(2/data = torch.rand(1/" tools/deployment/export_onnx.py
# HuggingFace safetensors omits frozen constants that the model constructor initializes
RUN sed -i "s/cfg.model.load_state_dict(state)/cfg.model.load_state_dict(state, strict=False)/" tools/deployment/export_onnx.py
RUN python3 tools/deployment/export_onnx.py -c configs/deimv2/deimv2_${BACKBONE}_${MODEL_SIZE}_coco.yml -r output/deimv2.pth
FROM scratch
ARG BACKBONE
ARG MODEL_SIZE
COPY --from=build /deimv2/output/deimv2.onnx /deimv2_${BACKBONE}_${MODEL_SIZE}.onnx
EOF
```
### Downloading RF-DETR Model ### Downloading RF-DETR Model
RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size. RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size.

View File

@ -195,7 +195,7 @@ Pre and post capture footage is included in the **recording timeline**, visible
## Will Frigate delete old recordings if my storage runs out? ## Will Frigate delete old recordings if my storage runs out?
As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. If there is less than an hour left of storage, the oldest hour of recordings will be deleted and a message will be printed in the Frigate logs. This emergency cleanup deletes the oldest recordings first regardless of retention settings to reclaim space as quickly as possible.
## Configuring Recording Retention ## Configuring Recording Retention

View File

@ -236,7 +236,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g
## Advanced Restream Configurations ## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands and other applications. An example is below:
:::warning :::warning
@ -244,16 +244,11 @@ The `exec:`, `echo:`, and `expr:` sources are disabled by default for security.
::: :::
:::warning NOTE: RTSP output will need to be passed with two curly braces `{{output}}`, whereas pipe output must be passed without curly braces.
The `exec:`, `echo:`, and `expr:` sources are disabled by default for security. You must set `GO2RTC_ALLOW_ARBITRARY_EXEC=true` to use them. See [Security: Restricted Stream Sources](#security-restricted-stream-sources) for more information.
:::
NOTE: The output will need to be passed with two curly braces `{{output}}`
```yaml ```yaml
go2rtc: go2rtc:
streams: streams:
stream1: exec:ffmpeg -hide_banner -re -stream_loop -1 -i /media/BigBuckBunny.mp4 -c copy -rtsp_transport tcp -f rtsp {{output}} stream1: exec:ffmpeg -hide_banner -re -stream_loop -1 -i /media/BigBuckBunny.mp4 -c copy -rtsp_transport tcp -f rtsp {{output}}
stream2: exec:rpicam-vid -t 0 --libav-format h264 -o -
``` ```

View File

@ -9,7 +9,7 @@ Frigate is a Docker container that can be run on any Docker host including as a
:::tip :::tip
If you already have Frigate installed as a Home Assistant App, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate. If you already have Frigate installed as a Home Assistant App, check out the [getting started guide](../guides/getting_started.md#configuring-frigate) to configure Frigate.
::: :::
@ -286,7 +286,7 @@ The MemryX MX3 Accelerator is available in the M.2 2280 form factor (like an NVM
#### Installation #### Installation
To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/get_started/hardware_setup.html). To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/2p1/get_started/install_hardware.html).
Then follow these steps for installing the correct driver/runtime configuration: Then follow these steps for installing the correct driver/runtime configuration:
@ -295,6 +295,12 @@ Then follow these steps for installing the correct driver/runtime configuration:
3. Run the script with `./user_installation.sh` 3. Run the script with `./user_installation.sh`
4. **Restart your computer** to complete driver installation. 4. **Restart your computer** to complete driver installation.
:::warning
For manual setup, use **MemryX SDK 2.1** only. Other SDK versions are not supported for this setup. See the [SDK 2.1 documentation](https://developer.memryx.com/2p1/index.html)
:::
#### Setup #### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable` To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`

View File

@ -429,7 +429,10 @@ class WebPushClient(Communicator):
else: else:
title = base_title title = base_title
message = payload["after"]["data"]["metadata"]["shortSummary"] if payload["after"]["data"]["metadata"].get("shortSummary"):
message = payload["after"]["data"]["metadata"]["shortSummary"]
else:
message = f"Detected on {camera_name}"
else: else:
zone_names = payload["after"]["data"]["zones"] zone_names = payload["after"]["data"]["zones"]
formatted_zone_names = [] formatted_zone_names = []

View File

@ -17,9 +17,90 @@ from ws4py.websocket import WebSocket as WebSocket_
from frigate.comms.base_communicator import Communicator from frigate.comms.base_communicator import Communicator
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import (
CLEAR_ONGOING_REVIEW_SEGMENTS,
EXPIRE_AUDIO_ACTIVITY,
INSERT_MANY_RECORDINGS,
INSERT_PREVIEW,
NOTIFICATION_TEST,
REQUEST_REGION_GRID,
UPDATE_AUDIO_ACTIVITY,
UPDATE_AUDIO_TRANSCRIPTION_STATE,
UPDATE_BIRDSEYE_LAYOUT,
UPDATE_CAMERA_ACTIVITY,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
UPDATE_EVENT_DESCRIPTION,
UPDATE_MODEL_STATE,
UPDATE_REVIEW_DESCRIPTION,
UPSERT_REVIEW_SEGMENT,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Internal IPC topics — NEVER allowed from WebSocket, regardless of role
_WS_BLOCKED_TOPICS = frozenset(
{
INSERT_MANY_RECORDINGS,
INSERT_PREVIEW,
REQUEST_REGION_GRID,
UPSERT_REVIEW_SEGMENT,
CLEAR_ONGOING_REVIEW_SEGMENTS,
UPDATE_CAMERA_ACTIVITY,
UPDATE_AUDIO_ACTIVITY,
EXPIRE_AUDIO_ACTIVITY,
UPDATE_EVENT_DESCRIPTION,
UPDATE_REVIEW_DESCRIPTION,
UPDATE_MODEL_STATE,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
UPDATE_BIRDSEYE_LAYOUT,
UPDATE_AUDIO_TRANSCRIPTION_STATE,
NOTIFICATION_TEST,
}
)
# Read-only topics any authenticated user (including viewer) can send
_WS_VIEWER_TOPICS = frozenset(
{
"onConnect",
"modelState",
"audioTranscriptionState",
"birdseyeLayout",
"embeddingsReindexProgress",
}
)
def _check_ws_authorization(
topic: str,
role_header: str | None,
separator: str,
) -> bool:
"""Check if a WebSocket message is authorized.
Args:
topic: The message topic.
role_header: The HTTP_REMOTE_ROLE header value, or None.
separator: The role separator character from proxy config.
Returns:
True if authorized, False if blocked.
"""
# Block IPC-only topics unconditionally
if topic in _WS_BLOCKED_TOPICS:
return False
# No role header: default to viewer (fail-closed)
if role_header is None:
return topic in _WS_VIEWER_TOPICS
# Check if any role is admin
roles = [r.strip() for r in role_header.split(separator)]
if "admin" in roles:
return True
# Non-admin: only viewer topics allowed
return topic in _WS_VIEWER_TOPICS
class WebSocket(WebSocket_): # type: ignore[misc] class WebSocket(WebSocket_): # type: ignore[misc]
def unhandled_error(self, error: Any) -> None: def unhandled_error(self, error: Any) -> None:
@ -49,6 +130,7 @@ class WebSocketClient(Communicator):
class _WebSocketHandler(WebSocket): class _WebSocketHandler(WebSocket):
receiver = self._dispatcher receiver = self._dispatcher
role_separator = self.config.proxy.separator or ","
def received_message(self, message: WebSocket.received_message) -> None: # type: ignore[name-defined] def received_message(self, message: WebSocket.received_message) -> None: # type: ignore[name-defined]
try: try:
@ -63,11 +145,25 @@ class WebSocketClient(Communicator):
) )
return return
logger.debug( topic = json_message["topic"]
f"Publishing mqtt message from websockets at {json_message['topic']}."
# Authorization check (skip when environ is None — direct internal connection)
role_header = (
self.environ.get("HTTP_REMOTE_ROLE") if self.environ else None
) )
if self.environ is not None and not _check_ws_authorization(
topic, role_header, self.role_separator
):
logger.warning(
"Blocked unauthorized WebSocket message: topic=%s, role=%s",
topic,
role_header,
)
return
logger.debug(f"Publishing mqtt message from websockets at {topic}.")
self.receiver( self.receiver(
json_message["topic"], topic,
json_message["payload"], json_message["payload"],
) )

View File

@ -1073,10 +1073,6 @@ class LicensePlateProcessingMixin:
top_score = score top_score = score
top_box = bbox top_box = bbox
if score > top_score:
top_score = score
top_box = bbox
# Return the top scoring bounding box if found # Return the top scoring bounding box if found
if top_box is not None: if top_box is not None:
# expand box by 5% to help with OCR # expand box by 5% to help with OCR
@ -1092,9 +1088,6 @@ class LicensePlateProcessingMixin:
] ]
).clip(0, [input.shape[1], input.shape[0]] * 2) ).clip(0, [input.shape[1], input.shape[0]] * 2)
logger.debug(
f"{camera}: Found license plate. Bounding box: {expanded_box.astype(int)}"
)
return tuple(int(x) for x in expanded_box) # type: ignore[return-value] return tuple(int(x) for x in expanded_box) # type: ignore[return-value]
else: else:
return None # No detection above the threshold return None # No detection above the threshold
@ -1360,8 +1353,8 @@ class LicensePlateProcessingMixin:
) )
# check that license plate is valid # check that license plate is valid
# double the value because we've doubled the size of the car # quadruple the value because we've doubled both dimensions of the car
if license_plate_area < self.config.cameras[camera].lpr.min_area * 2: if license_plate_area < self.config.cameras[camera].lpr.min_area * 4:
logger.debug(f"{camera}: License plate is less than min_area") logger.debug(f"{camera}: License plate is less than min_area")
return return
@ -1465,6 +1458,7 @@ class LicensePlateProcessingMixin:
license_plate_frame, license_plate_frame,
) )
logger.debug(f"{camera}: Found license plate. Bounding box: {list(plate_box)}")
logger.debug(f"{camera}: Running plate recognition for id: {id}.") logger.debug(f"{camera}: Running plate recognition for id: {id}.")
# run detection, returns results sorted by confidence, best first # run detection, returns results sorted by confidence, best first

View File

@ -0,0 +1,415 @@
import logging
import os
import subprocess
import threading
import urllib.request
from functools import partial
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from pydantic import ConfigDict, Field
from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
)
from frigate.object_detection.util import RequestStore, ResponseStore
logger = logging.getLogger(__name__)
# ----------------- Utility Functions ----------------- #
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
"""
Resize an image with unchanged aspect ratio using padding.
Assumes input image shape is (H, W, 3).
"""
if image.ndim == 4 and image.shape[0] == 1:
image = image[0]
h, w = image.shape[:2]
scale = min(model_w / w, model_h / h)
new_w, new_h = int(w * scale), int(h * scale)
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
x_offset = (model_w - new_w) // 2
y_offset = (model_h - new_h) // 2
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
resized_image
)
return padded_image
# ----------------- Global Constants ----------------- #
DETECTOR_KEY = "hailo10h"
ARCH = None
H10H_DEFAULT_MODEL = "yolov6n.hef"
H10H_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v5.2.0/hailo10h/yolov6n.hef"
def detect_hailo_arch():
try:
result = subprocess.run(
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
)
if result.returncode != 0:
logger.error(f"Inference error: {result.stderr}")
return None
for line in result.stdout.split("\n"):
if "Device Architecture" in line:
if "HAILO10H" in line:
return "hailo10h"
logger.error("Inference error: Could not determine Hailo architecture.")
return None
except Exception as e:
logger.error(f"Inference error: {e}")
return None
# ----------------- HailoAsyncInference Class ----------------- #
class HailoAsyncInference:
def __init__(
self,
hef_path: str,
input_store: RequestStore,
output_store: ResponseStore,
batch_size: int = 1,
input_type: Optional[str] = None,
output_type: Optional[Dict[str, str]] = None,
send_original_frame: bool = False,
) -> None:
# when importing hailo it activates the driver
# which leaves processes running even though it may not be used.
try:
from hailo_platform import (
HEF,
FormatType,
HailoSchedulingAlgorithm,
VDevice,
)
except ModuleNotFoundError:
pass
self.input_store = input_store
self.output_store = output_store
params = VDevice.create_params()
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
self.hef = HEF(hef_path)
self.target = VDevice(params)
self.infer_model = self.target.create_infer_model(hef_path)
self.infer_model.set_batch_size(batch_size)
if input_type is not None:
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
if output_type is not None:
for output_name, output_type in output_type.items():
self.infer_model.output(output_name).set_format_type(
getattr(FormatType, output_type)
)
self.output_type = output_type
self.send_original_frame = send_original_frame
def callback(
self,
completion_info,
bindings_list: List,
input_batch: List,
request_ids: List[int],
):
if completion_info.exception:
logger.error(f"Inference error: {completion_info.exception}")
else:
for i, bindings in enumerate(bindings_list):
if len(bindings._output_names) == 1:
result = bindings.output().get_buffer()
else:
result = {
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
for name in bindings._output_names
}
self.output_store.put(request_ids[i], (input_batch[i], result))
def _create_bindings(self, configured_infer_model) -> object:
if self.output_type is None:
output_buffers = {
output_info.name: np.empty(
self.infer_model.output(output_info.name).shape,
dtype=getattr(
np, str(output_info.format.type).split(".")[1].lower()
),
)
for output_info in self.hef.get_output_vstream_infos()
}
else:
output_buffers = {
name: np.empty(
self.infer_model.output(name).shape,
dtype=getattr(np, self.output_type[name].lower()),
)
for name in self.output_type
}
return configured_infer_model.create_bindings(output_buffers=output_buffers)
def get_input_shape(self) -> Tuple[int, ...]:
return self.hef.get_input_vstream_infos()[0].shape
def run(self) -> None:
job = None
with self.infer_model.configure() as configured_infer_model:
while True:
batch_data = self.input_store.get()
if batch_data is None:
break
request_id, frame_data = batch_data
preprocessed_batch = [frame_data]
request_ids = [request_id]
input_batch = preprocessed_batch # non-send_original_frame mode
bindings_list = []
for frame in preprocessed_batch:
bindings = self._create_bindings(configured_infer_model)
bindings.input().set_buffer(np.array(frame))
bindings_list.append(bindings)
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
job = configured_infer_model.run_async(
bindings_list,
partial(
self.callback,
input_batch=input_batch,
request_ids=request_ids,
bindings_list=bindings_list,
),
)
if job is not None:
job.wait(100)
# ----------------- HailoDetector Class ----------------- #
class HailoDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: "HailoDetectorConfig"):
global ARCH
ARCH = detect_hailo_arch()
self.cache_dir = MODEL_CACHE_DIR
self.device_type = detector_config.device
self.model_height = (
detector_config.model.height
if hasattr(detector_config.model, "height")
else None
)
self.model_width = (
detector_config.model.width
if hasattr(detector_config.model, "width")
else None
)
self.model_type = (
detector_config.model.model_type
if hasattr(detector_config.model, "model_type")
else None
)
self.tensor_format = (
detector_config.model.input_tensor
if hasattr(detector_config.model, "input_tensor")
else None
)
self.pixel_format = (
detector_config.model.input_pixel_format
if hasattr(detector_config.model, "input_pixel_format")
else None
)
self.input_dtype = (
detector_config.model.input_dtype
if hasattr(detector_config.model, "input_dtype")
else None
)
self.output_type = "FLOAT32"
self.set_path_and_url(detector_config.model.path)
self.working_model_path = self.check_and_prepare()
self.batch_size = 1
self.input_store = RequestStore()
self.response_store = ResponseStore()
try:
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
self.inference_engine = HailoAsyncInference(
self.working_model_path,
self.input_store,
self.response_store,
self.batch_size,
)
self.input_shape = self.inference_engine.get_input_shape()
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
self.inference_thread = threading.Thread(
target=self.inference_engine.run, daemon=True
)
self.inference_thread.start()
except Exception as e:
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
raise
def set_path_and_url(self, path: str = None):
if not path:
self.model_path = None
self.url = None
return
if self.is_url(path):
self.url = path
self.model_path = None
else:
self.model_path = path
self.url = None
def is_url(self, url: str) -> bool:
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("www.")
)
@staticmethod
def extract_model_name(path: str = None, url: str = None) -> str:
if path and path.endswith(".hef"):
return os.path.basename(path)
elif url and url.endswith(".hef"):
return os.path.basename(url)
else:
return H10H_DEFAULT_MODEL
@staticmethod
def download_model(url: str, destination: str):
if not url.endswith(".hef"):
raise ValueError("Invalid model URL. Only .hef files are supported.")
try:
urllib.request.urlretrieve(url, destination)
logger.debug(f"Downloaded model to {destination}")
except Exception as e:
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
def check_and_prepare(self) -> str:
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
model_name = self.extract_model_name(self.model_path, self.url)
cached_model_path = os.path.join(self.cache_dir, model_name)
if not self.model_path and not self.url:
if os.path.exists(cached_model_path):
logger.debug(f"Model found in cache: {cached_model_path}")
return cached_model_path
else:
logger.debug(f"Downloading default model: {model_name}")
self.download_model(H10H_DEFAULT_URL, cached_model_path)
elif self.url:
logger.debug(f"Downloading model from URL: {self.url}")
self.download_model(self.url, cached_model_path)
elif self.model_path:
if os.path.exists(self.model_path):
logger.debug(f"Using existing model at: {self.model_path}")
return self.model_path
else:
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
return cached_model_path
def detect_raw(self, tensor_input):
tensor_input = self.preprocess(tensor_input)
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
tensor_input = np.expand_dims(tensor_input, axis=0)
request_id = self.input_store.put(tensor_input)
try:
_, infer_results = self.response_store.get(request_id, timeout=1.0)
except TimeoutError:
logger.error(
f"Timeout waiting for inference results for request {request_id}"
)
if not self.inference_thread.is_alive():
raise RuntimeError(
"HailoRT inference thread has stopped, restart required."
)
return np.zeros((20, 6), dtype=np.float32)
if isinstance(infer_results, list) and len(infer_results) == 1:
infer_results = infer_results[0]
threshold = 0.4
all_detections = []
for class_id, detection_set in enumerate(infer_results):
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
continue
for det in detection_set:
if det.shape[0] < 5:
continue
score = float(det[4])
if score < threshold:
continue
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
if len(all_detections) == 0:
detections_array = np.zeros((20, 6), dtype=np.float32)
else:
detections_array = np.array(all_detections, dtype=np.float32)
if detections_array.shape[0] > 20:
detections_array = detections_array[:20, :]
elif detections_array.shape[0] < 20:
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
detections_array = np.vstack((detections_array, pad))
return detections_array
def preprocess(self, image):
if isinstance(image, np.ndarray):
processed = preprocess_tensor(
image, self.input_shape[1], self.input_shape[0]
)
return np.expand_dims(processed, axis=0)
else:
raise ValueError("Unsupported image format for preprocessing")
def close(self):
"""Properly shuts down the inference engine and releases the VDevice."""
logger.debug("[CLOSE] Closing HailoDetector")
try:
if hasattr(self, "inference_engine"):
if hasattr(self.inference_engine, "target"):
self.inference_engine.target.release()
logger.debug("Hailo VDevice released successfully")
except Exception as e:
logger.error(f"Failed to close Hailo device: {e}")
raise
def __del__(self):
"""Destructor to ensure cleanup when the object is deleted."""
self.close()
# ----------------- HailoDetectorConfig Class ----------------- #
class HailoDetectorConfig(BaseDetectorConfig):
"""Hailo10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
model_config = ConfigDict(
title="Hailo-10H",
)
type: Literal[DETECTOR_KEY]
device: str = Field(
default="PCIe",
title="Device Type",
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
)

View File

@ -31,6 +31,12 @@ class OllamaClient(GenAIClient):
provider: ApiClient | None provider: ApiClient | None
provider_options: dict[str, Any] provider_options: dict[str, Any]
def _auth_headers(self) -> dict | None:
if self.genai_config.api_key:
return {"Authorization": "Bearer " + self.genai_config.api_key}
return None
def _init_provider(self) -> ApiClient | None: def _init_provider(self) -> ApiClient | None:
"""Initialize the client.""" """Initialize the client."""
self.provider_options = { self.provider_options = {
@ -39,7 +45,11 @@ class OllamaClient(GenAIClient):
} }
try: try:
client = ApiClient(host=self.genai_config.base_url, timeout=self.timeout) client = ApiClient(
host=self.genai_config.base_url,
timeout=self.timeout,
headers=self._auth_headers(),
)
# ensure the model is available locally # ensure the model is available locally
response = client.show(self.genai_config.model) response = client.show(self.genai_config.model)
if response.get("error"): if response.get("error"):
@ -166,7 +176,9 @@ class OllamaClient(GenAIClient):
return [] return []
try: try:
client = ApiClient( client = ApiClient(
host=self.genai_config.base_url, timeout=self.timeout host=self.genai_config.base_url,
timeout=self.timeout,
headers=self._auth_headers(),
) )
except Exception: except Exception:
return [] return []
@ -344,6 +356,7 @@ class OllamaClient(GenAIClient):
async_client = OllamaAsyncClient( async_client = OllamaAsyncClient(
host=self.genai_config.base_url, host=self.genai_config.base_url,
timeout=self.timeout, timeout=self.timeout,
headers=self._auth_headers(),
) )
response = await async_client.chat(**request_params) response = await async_client.chat(**request_params)
result = self._message_from_response(response) result = self._message_from_response(response)
@ -359,6 +372,7 @@ class OllamaClient(GenAIClient):
async_client = OllamaAsyncClient( async_client = OllamaAsyncClient(
host=self.genai_config.base_url, host=self.genai_config.base_url,
timeout=self.timeout, timeout=self.timeout,
headers=self._auth_headers(),
) )
content_parts: list[str] = [] content_parts: list[str] = []
final_message: dict[str, Any] | None = None final_message: dict[str, Any] | None = None

View File

@ -13,6 +13,7 @@ from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Callable, Optional from typing import Callable, Optional
import pytz # type: ignore[import-untyped]
from peewee import DoesNotExist from peewee import DoesNotExist
from frigate.config import FfmpegConfig, FrigateConfig from frigate.config import FfmpegConfig, FrigateConfig
@ -344,7 +345,19 @@ class RecordingExporter(threading.Thread):
return proc.returncode, "".join(captured) return proc.returncode, "".join(captured)
def get_datetime_from_timestamp(self, timestamp: int) -> str: def get_datetime_from_timestamp(self, timestamp: int) -> str:
# return in iso format # return in iso format using the configured ui.timezone when set,
# so the auto-generated export name reflects local time rather
# than the container's UTC clock
tz_name = self.config.ui.timezone
if tz_name:
try:
tz = pytz.timezone(tz_name)
except pytz.UnknownTimeZoneError:
tz = None
if tz is not None:
return datetime.datetime.fromtimestamp(timestamp, tz=tz).strftime(
"%Y-%m-%d %H:%M:%S"
)
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
def _chapter_metadata_path(self) -> str: def _chapter_metadata_path(self) -> str:
@ -538,12 +551,18 @@ class RecordingExporter(threading.Thread):
start_file = f"{file_start}{self.start_time}.{PREVIEW_FRAME_TYPE}" start_file = f"{file_start}{self.start_time}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}{self.end_time}.{PREVIEW_FRAME_TYPE}" end_file = f"{file_start}{self.end_time}.{PREVIEW_FRAME_TYPE}"
selected_preview = None selected_preview = None
# Preview frames are written at most 1-2 fps during activity
# and as little as one every 30s during quiet periods, so a
# short export window can contain zero frames. Track the most
# recent frame before the window as a fallback.
fallback_preview = None
for file in sorted(os.listdir(preview_dir)): for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start): if not file.startswith(file_start):
continue continue
if file < start_file: if file < start_file:
fallback_preview = os.path.join(preview_dir, file)
continue continue
if file > end_file: if file > end_file:
@ -552,6 +571,9 @@ class RecordingExporter(threading.Thread):
selected_preview = os.path.join(preview_dir, file) selected_preview = os.path.join(preview_dir, file)
break break
if not selected_preview:
selected_preview = fallback_preview
if not selected_preview: if not selected_preview:
return "" return ""

View File

@ -123,6 +123,15 @@ def get_detector_temperature(
if index < len(hailo_device_names): if index < len(hailo_device_names):
device_name = hailo_device_names[index] device_name = hailo_device_names[index]
return hailo_temps[device_name] return hailo_temps[device_name]
elif detector_type == "hailo10h":
# Get temperatures for Hailo devices
hailo_temps = get_hailo_temps()
if hailo_temps:
hailo_device_names = sorted(hailo_temps.keys())
index = detector_index_by_type.get("hailo10h", 0)
if index < len(hailo_device_names):
device_name = hailo_device_names[index]
return hailo_temps[device_name]
elif detector_type == "rknn": elif detector_type == "rknn":
# Rockchip temperatures are handled by the GPU / NPU stats # Rockchip temperatures are handled by the GPU / NPU stats
# as there are not detector specific temperatures # as there are not detector specific temperatures

View File

@ -1,6 +1,9 @@
"""Tests for export progress tracking, broadcast, and FFmpeg parsing.""" """Tests for export progress tracking, broadcast, and FFmpeg parsing."""
import io import io
import os
import shutil
import tempfile
import unittest import unittest
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
@ -363,6 +366,121 @@ class TestBroadcastAggregation(unittest.TestCase):
assert job.progress_percent == 33.0 assert job.progress_percent == 33.0
class TestGetDatetimeFromTimestamp(unittest.TestCase):
"""Auto-generated export name should honor config.ui.timezone, not
fall back to the container's UTC clock when a timezone is configured.
"""
def test_uses_configured_ui_timezone(self) -> None:
exporter = _make_exporter()
exporter.config.ui.timezone = "America/New_York"
# 2025-01-15 12:00:00 UTC is 07:00:00 EST
assert exporter.get_datetime_from_timestamp(1736942400) == "2025-01-15 07:00:00"
def test_falls_back_to_local_when_timezone_unset(self) -> None:
exporter = _make_exporter()
exporter.config.ui.timezone = None
# No assertion on the exact wall-clock value — just confirm no
# exception and that pytz isn't required when the field is unset.
assert isinstance(exporter.get_datetime_from_timestamp(1736942400), str)
def test_invalid_timezone_falls_back_to_local(self) -> None:
exporter = _make_exporter()
exporter.config.ui.timezone = "Not/A_Real_Zone"
assert isinstance(exporter.get_datetime_from_timestamp(1736942400), str)
class TestSaveThumbnailFromPreviewFrames(unittest.TestCase):
"""Short exports in the current hour can fall between preview frame
writes (1-2 fps during activity, every 30s otherwise). When no frame
falls inside the export window, save_thumbnail should fall back to
the most recent prior frame instead of returning no thumbnail."""
def setUp(self) -> None:
self.tmp_root = tempfile.mkdtemp(prefix="frigate_thumb_test_")
self.preview_dir = os.path.join(self.tmp_root, "cache", "preview_frames")
self.export_clips = os.path.join(self.tmp_root, "clips", "export")
os.makedirs(self.preview_dir, exist_ok=True)
os.makedirs(self.export_clips, exist_ok=True)
def tearDown(self) -> None:
shutil.rmtree(self.tmp_root, ignore_errors=True)
def _write_frame(self, camera: str, frame_time: float) -> str:
path = os.path.join(self.preview_dir, f"preview_{camera}-{frame_time}.webp")
with open(path, "wb") as f:
f.write(b"fake-webp-bytes")
return path
def _make_short_current_hour_exporter(self) -> RecordingExporter:
# Use a "now-ish" timestamp so save_thumbnail's start-of-hour
# comparison takes the current-hour branch (preview frames).
import datetime
now = datetime.datetime.now(datetime.timezone.utc).timestamp()
exporter = _make_exporter()
exporter.export_id = "thumb_short"
exporter.start_time = now
exporter.end_time = now + 3
return exporter
def test_short_export_falls_back_to_prior_preview_frame(self) -> None:
exporter = self._make_short_current_hour_exporter()
# Most recent preview frame is 10s before the export window
prior = self._write_frame(exporter.camera, exporter.start_time - 10.0)
thumb_target = os.path.join(self.export_clips, f"{exporter.export_id}.webp")
with (
patch(
"frigate.record.export.CACHE_DIR", os.path.join(self.tmp_root, "cache")
),
patch(
"frigate.record.export.CLIPS_DIR", os.path.join(self.tmp_root, "clips")
),
):
result = exporter.save_thumbnail(exporter.export_id)
assert result == thumb_target
assert os.path.isfile(thumb_target)
with open(thumb_target, "rb") as f, open(prior, "rb") as src:
assert f.read() == src.read()
def test_returns_empty_when_no_preview_frames_exist(self) -> None:
exporter = self._make_short_current_hour_exporter()
with (
patch(
"frigate.record.export.CACHE_DIR", os.path.join(self.tmp_root, "cache")
),
patch(
"frigate.record.export.CLIPS_DIR", os.path.join(self.tmp_root, "clips")
),
):
result = exporter.save_thumbnail(exporter.export_id)
assert result == ""
def test_prefers_in_window_frame_over_prior_frame(self) -> None:
exporter = self._make_short_current_hour_exporter()
self._write_frame(exporter.camera, exporter.start_time - 10.0)
in_window = self._write_frame(exporter.camera, exporter.start_time + 1.0)
thumb_target = os.path.join(self.export_clips, f"{exporter.export_id}.webp")
with (
patch(
"frigate.record.export.CACHE_DIR", os.path.join(self.tmp_root, "cache")
),
patch(
"frigate.record.export.CLIPS_DIR", os.path.join(self.tmp_root, "clips")
),
):
result = exporter.save_thumbnail(exporter.export_id)
assert result == thumb_target
with open(thumb_target, "rb") as f, open(in_window, "rb") as src:
assert f.read() == src.read()
class TestSchedulesCleanup(unittest.TestCase): class TestSchedulesCleanup(unittest.TestCase):
def test_schedule_job_cleanup_removes_after_delay(self) -> None: def test_schedule_job_cleanup_removes_after_delay(self) -> None:
config = MagicMock() config = MagicMock()

View File

@ -0,0 +1,166 @@
"""Tests for WebSocket authorization checks."""
import unittest
from frigate.comms.ws import _check_ws_authorization
from frigate.const import INSERT_MANY_RECORDINGS, UPDATE_CAMERA_ACTIVITY
class TestCheckWsAuthorization(unittest.TestCase):
"""Tests for the _check_ws_authorization pure function."""
DEFAULT_SEPARATOR = ","
# --- IPC topic blocking (unconditional, regardless of role) ---
def test_ipc_topic_blocked_for_admin(self):
self.assertFalse(
_check_ws_authorization(
INSERT_MANY_RECORDINGS, "admin", self.DEFAULT_SEPARATOR
)
)
def test_ipc_topic_blocked_for_viewer(self):
self.assertFalse(
_check_ws_authorization(
UPDATE_CAMERA_ACTIVITY, "viewer", self.DEFAULT_SEPARATOR
)
)
def test_ipc_topic_blocked_when_no_role(self):
self.assertFalse(
_check_ws_authorization(
INSERT_MANY_RECORDINGS, None, self.DEFAULT_SEPARATOR
)
)
# --- Viewer allowed topics ---
def test_viewer_can_send_on_connect(self):
self.assertTrue(
_check_ws_authorization("onConnect", "viewer", self.DEFAULT_SEPARATOR)
)
def test_viewer_can_send_model_state(self):
self.assertTrue(
_check_ws_authorization("modelState", "viewer", self.DEFAULT_SEPARATOR)
)
def test_viewer_can_send_audio_transcription_state(self):
self.assertTrue(
_check_ws_authorization(
"audioTranscriptionState", "viewer", self.DEFAULT_SEPARATOR
)
)
def test_viewer_can_send_birdseye_layout(self):
self.assertTrue(
_check_ws_authorization("birdseyeLayout", "viewer", self.DEFAULT_SEPARATOR)
)
def test_viewer_can_send_embeddings_reindex_progress(self):
self.assertTrue(
_check_ws_authorization(
"embeddingsReindexProgress", "viewer", self.DEFAULT_SEPARATOR
)
)
# --- Viewer blocked from admin topics ---
def test_viewer_blocked_from_restart(self):
self.assertFalse(
_check_ws_authorization("restart", "viewer", self.DEFAULT_SEPARATOR)
)
def test_viewer_blocked_from_camera_detect_set(self):
self.assertFalse(
_check_ws_authorization(
"front_door/detect/set", "viewer", self.DEFAULT_SEPARATOR
)
)
def test_viewer_blocked_from_camera_ptz(self):
self.assertFalse(
_check_ws_authorization("front_door/ptz", "viewer", self.DEFAULT_SEPARATOR)
)
def test_viewer_blocked_from_global_notifications_set(self):
self.assertFalse(
_check_ws_authorization(
"notifications/set", "viewer", self.DEFAULT_SEPARATOR
)
)
def test_viewer_blocked_from_camera_notifications_suspend(self):
self.assertFalse(
_check_ws_authorization(
"front_door/notifications/suspend", "viewer", self.DEFAULT_SEPARATOR
)
)
def test_viewer_blocked_from_arbitrary_unknown_topic(self):
self.assertFalse(
_check_ws_authorization(
"some_random_topic", "viewer", self.DEFAULT_SEPARATOR
)
)
# --- Admin access ---
def test_admin_can_send_restart(self):
self.assertTrue(
_check_ws_authorization("restart", "admin", self.DEFAULT_SEPARATOR)
)
def test_admin_can_send_camera_detect_set(self):
self.assertTrue(
_check_ws_authorization(
"front_door/detect/set", "admin", self.DEFAULT_SEPARATOR
)
)
def test_admin_can_send_camera_ptz(self):
self.assertTrue(
_check_ws_authorization("front_door/ptz", "admin", self.DEFAULT_SEPARATOR)
)
# --- Comma-separated roles ---
def test_comma_separated_admin_viewer_grants_admin(self):
self.assertTrue(
_check_ws_authorization("restart", "admin,viewer", self.DEFAULT_SEPARATOR)
)
def test_comma_separated_viewer_admin_grants_admin(self):
self.assertTrue(
_check_ws_authorization("restart", "viewer,admin", self.DEFAULT_SEPARATOR)
)
def test_comma_separated_with_spaces(self):
self.assertTrue(
_check_ws_authorization("restart", "viewer, admin", self.DEFAULT_SEPARATOR)
)
# --- Custom separator ---
def test_pipe_separator(self):
self.assertTrue(_check_ws_authorization("restart", "viewer|admin", "|"))
def test_pipe_separator_no_admin(self):
self.assertFalse(_check_ws_authorization("restart", "viewer|editor", "|"))
# --- No role header (fail-closed) ---
def test_no_role_header_blocks_admin_topics(self):
self.assertFalse(
_check_ws_authorization("restart", None, self.DEFAULT_SEPARATOR)
)
def test_no_role_header_allows_viewer_topics(self):
self.assertTrue(
_check_ws_authorization("onConnect", None, self.DEFAULT_SEPARATOR)
)
if __name__ == "__main__":
unittest.main()

View File

@ -1,88 +1,95 @@
{ {
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "markdown",
"execution_count": null, "metadata": {
"metadata": { "id": "runtime-notice"
"id": "rmuF9iKWTbdk" },
}, "source": [
"outputs": [], "**Before running:** go to **Runtime → Change runtime type → Fallback runtime version: 2025.07** (Python 3.11). The current Colab default (Python 3.12+) is incompatible with `super-gradients`."
"source": [ ]
"! pip install -q git+https://github.com/Deci-AI/super-gradients.git"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NiRCt917KKcL"
},
"outputs": [],
"source": [
"! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.12/dist-packages/super_gradients/training/pretrained_models.py\n",
"! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.12/dist-packages/super_gradients/training/utils/checkpoint_utils.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "dTB0jy_NNSFz"
},
"outputs": [],
"source": [
"from super_gradients.common.object_names import Models\n",
"from super_gradients.conversion import DetectionOutputFormatMode\n",
"from super_gradients.training import models\n",
"\n",
"model = models.get(Models.YOLO_NAS_S, pretrained_weights=\"coco\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "GymUghyCNXem"
},
"outputs": [],
"source": [
"# export the model for compatibility with Frigate\n",
"\n",
"model.export(\"yolo_nas_s.onnx\",\n",
" output_predictions_format=DetectionOutputFormatMode.FLAT_FORMAT,\n",
" max_predictions_per_image=20,\n",
" num_pre_nms_predictions=300,\n",
" confidence_threshold=0.4,\n",
" input_image_shape=(320,320),\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "uBhXV5g4Nh42"
},
"outputs": [],
"source": [
"from google.colab import files\n",
"\n",
"files.download('yolo_nas_s.onnx')"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
}, },
"nbformat": 4, {
"nbformat_minor": 0 "cell_type": "code",
"execution_count": null,
"metadata": {
"id": "rmuF9iKWTbdk"
},
"outputs": [],
"source": [
"! pip install -q \"jedi>=0.16\"\n",
"! pip install -q git+https://github.com/Deci-AI/super-gradients.git"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NiRCt917KKcL"
},
"outputs": [],
"source": "! sed -i 's/sghub\\.deci\\.ai/d2gjn4b69gu75n.cloudfront.net/g; s/sg-hub-nv\\.s3\\.amazonaws\\.com/d2gjn4b69gu75n.cloudfront.net/g' /usr/local/lib/python*/dist-packages/super_gradients/training/pretrained_models.py\n! sed -i 's/sghub\\.deci\\.ai/d2gjn4b69gu75n.cloudfront.net/g; s/sg-hub-nv\\.s3\\.amazonaws\\.com/d2gjn4b69gu75n.cloudfront.net/g' /usr/local/lib/python*/dist-packages/super_gradients/training/utils/checkpoint_utils.py"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "dTB0jy_NNSFz"
},
"outputs": [],
"source": [
"from super_gradients.common.object_names import Models\n",
"from super_gradients.conversion import DetectionOutputFormatMode\n",
"from super_gradients.training import models\n",
"\n",
"model = models.get(Models.YOLO_NAS_S, pretrained_weights=\"coco\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "GymUghyCNXem"
},
"outputs": [],
"source": [
"# export the model for compatibility with Frigate\n",
"\n",
"model.export(\"yolo_nas_s.onnx\",\n",
" output_predictions_format=DetectionOutputFormatMode.FLAT_FORMAT,\n",
" max_predictions_per_image=20,\n",
" num_pre_nms_predictions=300,\n",
" confidence_threshold=0.4,\n",
" input_image_shape=(320,320),\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "uBhXV5g4Nh42"
},
"outputs": [],
"source": [
"from google.colab import files\n",
"\n",
"files.download('yolo_nas_s.onnx')"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
} }

View File

@ -69,17 +69,18 @@ test.describe("Navigation — conditional items @critical", () => {
).toBeVisible(); ).toBeVisible();
}); });
test("/chat is hidden when genai.model is none (desktop)", async ({ test("/chat is hidden when no agent has the chat role (desktop)", async ({
frigateApp, frigateApp,
}) => { }) => {
test.skip(frigateApp.isMobile, "Desktop sidebar"); test.skip(frigateApp.isMobile, "Desktop sidebar");
await frigateApp.installDefaults({ await frigateApp.installDefaults({
config: { config: {
genai: { genai: {
enabled: false, descriptions_only: {
provider: "ollama", provider: "ollama",
model: "none", model: "llava",
base_url: "", roles: ["descriptions"],
},
}, },
}, },
}); });
@ -89,12 +90,20 @@ test.describe("Navigation — conditional items @critical", () => {
).toHaveCount(0); ).toHaveCount(0);
}); });
test("/chat is visible when genai.model is set (desktop)", async ({ test("/chat is visible when an agent has the chat role (desktop)", async ({
frigateApp, frigateApp,
}) => { }) => {
test.skip(frigateApp.isMobile, "Desktop sidebar"); test.skip(frigateApp.isMobile, "Desktop sidebar");
await frigateApp.installDefaults({ await frigateApp.installDefaults({
config: { genai: { enabled: true, model: "llava" } }, config: {
genai: {
chat_agent: {
provider: "ollama",
model: "llava",
roles: ["chat"],
},
},
},
}); });
await frigateApp.goto("/"); await frigateApp.goto("/");
await expect( await expect(

View File

@ -397,6 +397,14 @@
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')." "description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
} }
}, },
"hailo10h": {
"label": "Hailo-10H",
"description": "Hailo-10H detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
"device": {
"label": "Device Type",
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"memryx": { "memryx": {
"label": "MemryX", "label": "MemryX",
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.", "description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",

View File

@ -93,6 +93,14 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
useSWR<ProfilesApiResponse>("profiles"); useSWR<ProfilesApiResponse>("profiles");
const logoutUrl = config?.proxy?.logout_url || "/api/logout"; const logoutUrl = config?.proxy?.logout_url || "/api/logout";
const hasChatAgent = useMemo(
() =>
Object.values(config?.genai ?? {}).some((agent) =>
agent?.roles?.includes("chat"),
),
[config?.genai],
);
// languages // languages
const languages = useMemo(() => { const languages = useMemo(() => {
@ -511,7 +519,7 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
<span>{t("menu.classification")}</span> <span>{t("menu.classification")}</span>
</MenuItem> </MenuItem>
</Link> </Link>
{config?.genai?.model !== "none" && ( {hasChatAgent && (
<Link to="/chat"> <Link to="/chat">
<MenuItem <MenuItem
className="flex w-full items-center p-2 text-sm" className="flex w-full items-center p-2 text-sm"

View File

@ -90,6 +90,10 @@ export default function SearchResultActions({
const handleDebugReplay = useCallback( const handleDebugReplay = useCallback(
(event: SearchResult) => { (event: SearchResult) => {
setIsStarting(true); setIsStarting(true);
const toastId = toast.loading(
t("dialog.starting", { ns: "views/replay" }),
{ position: "top-center" },
);
axios axios
.post("debug_replay/start", { .post("debug_replay/start", {
@ -100,6 +104,7 @@ export default function SearchResultActions({
.then((response) => { .then((response) => {
if (response.status === 200) { if (response.status === 200) {
toast.success(t("dialog.toast.success", { ns: "views/replay" }), { toast.success(t("dialog.toast.success", { ns: "views/replay" }), {
id: toastId,
position: "top-center", position: "top-center",
}); });
navigate("/replay"); navigate("/replay");
@ -115,6 +120,7 @@ export default function SearchResultActions({
toast.error( toast.error(
t("dialog.toast.alreadyActive", { ns: "views/replay" }), t("dialog.toast.alreadyActive", { ns: "views/replay" }),
{ {
id: toastId,
position: "top-center", position: "top-center",
closeButton: true, closeButton: true,
dismissible: false, dismissible: false,
@ -129,6 +135,7 @@ export default function SearchResultActions({
); );
} else { } else {
toast.error(t("dialog.toast.error", { error: errorMessage }), { toast.error(t("dialog.toast.error", { error: errorMessage }), {
id: toastId,
position: "top-center", position: "top-center",
}); });
} }

View File

@ -1,4 +1,6 @@
import { useCallback, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { flushSync } from "react-dom";
import { throttle } from "lodash";
import { Slider } from "@/components/ui/slider"; import { Slider } from "@/components/ui/slider";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { Popover, PopoverContent, PopoverTrigger } from "../../ui/popover"; import { Popover, PopoverContent, PopoverTrigger } from "../../ui/popover";
@ -19,11 +21,21 @@ import { useIsAdmin } from "@/hooks/use-is-admin";
import { useDocDomain } from "@/hooks/use-doc-domain"; import { useDocDomain } from "@/hooks/use-doc-domain";
import { Link } from "react-router-dom"; import { Link } from "react-router-dom";
const SLIDER_DRAG_THROTTLE_MS = 80;
type Props = { type Props = {
className?: string; className?: string;
// Optional side-effect invoked atomically with setAnnotationOffset (inside
// flushSync) so callers like the timeline panel can re-seek the video in the
// same React commit as the offset state update — preventing a one-frame
// overlay mismatch where annotationOffset has changed but currentTime has not.
onApplyOffset?: (newOffset: number) => void;
}; };
export default function AnnotationOffsetSlider({ className }: Props) { export default function AnnotationOffsetSlider({
className,
onApplyOffset,
}: Props) {
const { annotationOffset, setAnnotationOffset, camera } = useDetailStream(); const { annotationOffset, setAnnotationOffset, camera } = useDetailStream();
const isAdmin = useIsAdmin(); const isAdmin = useIsAdmin();
const { getLocaleDocUrl } = useDocDomain(); const { getLocaleDocUrl } = useDocDomain();
@ -31,31 +43,62 @@ export default function AnnotationOffsetSlider({ className }: Props) {
const { t } = useTranslation(["views/explore"]); const { t } = useTranslation(["views/explore"]);
const [isSaving, setIsSaving] = useState(false); const [isSaving, setIsSaving] = useState(false);
const applyOffset = useCallback(
(newOffset: number) => {
flushSync(() => {
setAnnotationOffset(newOffset);
onApplyOffset?.(newOffset);
});
},
[setAnnotationOffset, onApplyOffset],
);
const throttledApplyOffset = useMemo(
() =>
throttle(applyOffset, SLIDER_DRAG_THROTTLE_MS, {
leading: true,
trailing: true,
}),
[applyOffset],
);
useEffect(() => () => throttledApplyOffset.cancel(), [throttledApplyOffset]);
const handleChange = useCallback( const handleChange = useCallback(
(values: number[]) => { (values: number[]) => {
if (!values || values.length === 0) return; if (!values || values.length === 0) return;
const valueMs = values[0]; throttledApplyOffset(values[0]);
setAnnotationOffset(valueMs);
}, },
[setAnnotationOffset], [throttledApplyOffset],
);
const handleCommit = useCallback(
(values: number[]) => {
if (!values || values.length === 0) return;
// Ensure the final value lands even if it would otherwise be discarded
// by the trailing edge of the throttle window.
throttledApplyOffset.cancel();
applyOffset(values[0]);
},
[throttledApplyOffset, applyOffset],
); );
const stepOffset = useCallback( const stepOffset = useCallback(
(delta: number) => { (delta: number) => {
setAnnotationOffset((prev) => { const next = Math.max(
const next = prev + delta; ANNOTATION_OFFSET_MIN,
return Math.max( Math.min(ANNOTATION_OFFSET_MAX, annotationOffset + delta),
ANNOTATION_OFFSET_MIN, );
Math.min(ANNOTATION_OFFSET_MAX, next), throttledApplyOffset.cancel();
); applyOffset(next);
});
}, },
[setAnnotationOffset], [annotationOffset, applyOffset, throttledApplyOffset],
); );
const reset = useCallback(() => { const reset = useCallback(() => {
setAnnotationOffset(0); throttledApplyOffset.cancel();
}, [setAnnotationOffset]); applyOffset(0);
}, [applyOffset, throttledApplyOffset]);
const save = useCallback(async () => { const save = useCallback(async () => {
setIsSaving(true); setIsSaving(true);
@ -130,6 +173,7 @@ export default function AnnotationOffsetSlider({ className }: Props) {
max={ANNOTATION_OFFSET_MAX} max={ANNOTATION_OFFSET_MAX}
step={ANNOTATION_OFFSET_STEP} step={ANNOTATION_OFFSET_STEP}
onValueChange={handleChange} onValueChange={handleChange}
onValueCommit={handleCommit}
/> />
</div> </div>
<Button <Button

View File

@ -1,7 +1,9 @@
import { Event } from "@/types/event"; import { Event } from "@/types/event";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import axios from "axios"; import axios from "axios";
import { useCallback, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { flushSync } from "react-dom";
import { throttle } from "lodash";
import { LuExternalLink, LuMinus, LuPlus } from "react-icons/lu"; import { LuExternalLink, LuMinus, LuPlus } from "react-icons/lu";
import { Link } from "react-router-dom"; import { Link } from "react-router-dom";
import { toast } from "sonner"; import { toast } from "sonner";
@ -19,6 +21,8 @@ import {
ANNOTATION_OFFSET_STEP, ANNOTATION_OFFSET_STEP,
} from "@/lib/const"; } from "@/lib/const";
const SLIDER_DRAG_THROTTLE_MS = 80;
type AnnotationSettingsPaneProps = { type AnnotationSettingsPaneProps = {
event: Event; event: Event;
annotationOffset: number; annotationOffset: number;
@ -38,30 +42,64 @@ export function AnnotationSettingsPane({
const [isLoading, setIsLoading] = useState(false); const [isLoading, setIsLoading] = useState(false);
const handleSliderChange = useCallback( // flushSync ensures setAnnotationOffset commits synchronously so the
(values: number[]) => { // useLayoutEffect in TrackingDetails (which seeks the video and sets
if (!values || values.length === 0) return; // currentTime in response) runs before the browser paints — preventing a
setAnnotationOffset(values[0]); // one-frame overlay mismatch where annotationOffset has changed but
}, // currentTime has not.
[setAnnotationOffset], const applyOffset = useCallback(
); (newOffset: number) => {
flushSync(() => {
const stepOffset = useCallback( setAnnotationOffset(newOffset);
(delta: number) => {
setAnnotationOffset((prev) => {
const next = prev + delta;
return Math.max(
ANNOTATION_OFFSET_MIN,
Math.min(ANNOTATION_OFFSET_MAX, next),
);
}); });
}, },
[setAnnotationOffset], [setAnnotationOffset],
); );
const throttledApplyOffset = useMemo(
() =>
throttle(applyOffset, SLIDER_DRAG_THROTTLE_MS, {
leading: true,
trailing: true,
}),
[applyOffset],
);
useEffect(() => () => throttledApplyOffset.cancel(), [throttledApplyOffset]);
const handleSliderChange = useCallback(
(values: number[]) => {
if (!values || values.length === 0) return;
throttledApplyOffset(values[0]);
},
[throttledApplyOffset],
);
const handleSliderCommit = useCallback(
(values: number[]) => {
if (!values || values.length === 0) return;
throttledApplyOffset.cancel();
applyOffset(values[0]);
},
[throttledApplyOffset, applyOffset],
);
const stepOffset = useCallback(
(delta: number) => {
const next = Math.max(
ANNOTATION_OFFSET_MIN,
Math.min(ANNOTATION_OFFSET_MAX, annotationOffset + delta),
);
throttledApplyOffset.cancel();
applyOffset(next);
},
[annotationOffset, applyOffset, throttledApplyOffset],
);
const reset = useCallback(() => { const reset = useCallback(() => {
setAnnotationOffset(0); throttledApplyOffset.cancel();
}, [setAnnotationOffset]); applyOffset(0);
}, [applyOffset, throttledApplyOffset]);
const saveToConfig = useCallback(async () => { const saveToConfig = useCallback(async () => {
if (!config || !event) return; if (!config || !event) return;
@ -143,6 +181,7 @@ export function AnnotationSettingsPane({
max={ANNOTATION_OFFSET_MAX} max={ANNOTATION_OFFSET_MAX}
step={ANNOTATION_OFFSET_STEP} step={ANNOTATION_OFFSET_STEP}
onValueChange={handleSliderChange} onValueChange={handleSliderChange}
onValueCommit={handleSliderCommit}
className="flex-1" className="flex-1"
/> />
<Button <Button

View File

@ -73,7 +73,7 @@ export default function DetailActionsMenu({
} }
return ( return (
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}> <DropdownMenu modal={false} open={isOpen} onOpenChange={setIsOpen}>
<DropdownMenuTrigger> <DropdownMenuTrigger>
<div className="rounded" role="button"> <div className="rounded" role="button">
<HiDotsHorizontal className="size-4 text-muted-foreground" /> <HiDotsHorizontal className="size-4 text-muted-foreground" />

View File

@ -957,8 +957,9 @@ function ObjectDetailsTab({
toast.success( toast.success(
t("details.item.toast.success.regenerate", { t("details.item.toast.success.regenerate", {
provider: capitalizeAll( provider: capitalizeAll(
config?.genai.provider.replaceAll("_", " ") ?? Object.values(config?.genai ?? {})
t("generativeAI"), .find((agent) => agent?.roles?.includes("descriptions"))
?.provider?.replaceAll("_", " ") ?? t("generativeAI"),
), ),
}), }),
{ {
@ -976,8 +977,9 @@ function ObjectDetailsTab({
toast.error( toast.error(
t("details.item.toast.error.regenerate", { t("details.item.toast.error.regenerate", {
provider: capitalizeAll( provider: capitalizeAll(
config?.genai.provider.replaceAll("_", " ") ?? Object.values(config?.genai ?? {})
t("generativeAI"), .find((agent) => agent?.roles?.includes("descriptions"))
?.provider?.replaceAll("_", " ") ?? t("generativeAI"),
), ),
errorMessage, errorMessage,
}), }),

View File

@ -1,5 +1,13 @@
import useSWR from "swr"; import useSWR from "swr";
import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import {
useCallback,
useEffect,
useLayoutEffect,
useMemo,
useRef,
useState,
} from "react";
import { flushSync } from "react-dom";
import { useResizeObserver } from "@/hooks/resize-observer"; import { useResizeObserver } from "@/hooks/resize-observer";
import { useFullscreen } from "@/hooks/use-fullscreen"; import { useFullscreen } from "@/hooks/use-fullscreen";
import { Event } from "@/types/event"; import { Event } from "@/types/event";
@ -389,7 +397,12 @@ export function TrackingDetails({
// When the pinned timestamp or offset changes, re-seek the video and // When the pinned timestamp or offset changes, re-seek the video and
// explicitly update currentTime so the overlay shows the pinned event's box. // explicitly update currentTime so the overlay shows the pinned event's box.
useEffect(() => { // useLayoutEffect + flushSync force the setCurrentTime commit to land before
// the browser paints, so the overlay never shows a frame where
// annotationOffset has changed but currentTime has not — that mismatch would
// resolve effectiveCurrentTime away from the pinned detect timestamp and
// make the bounding box disappear or jump for one frame.
useLayoutEffect(() => {
const pinned = pinnedDetectTimestampRef.current; const pinned = pinnedDetectTimestampRef.current;
if (!isAnnotationSettingsOpen || pinned == null) return; if (!isAnnotationSettingsOpen || pinned == null) return;
if (!videoRef.current || displaySource !== "video") return; if (!videoRef.current || displaySource !== "video") return;
@ -398,10 +411,9 @@ export function TrackingDetails({
const relativeTime = timestampToVideoTime(targetTimeRecord); const relativeTime = timestampToVideoTime(targetTimeRecord);
videoRef.current.currentTime = relativeTime; videoRef.current.currentTime = relativeTime;
// Explicitly update currentTime state so the overlay's effectiveCurrentTime flushSync(() => {
// resolves back to the pinned detect timestamp: setCurrentTime(targetTimeRecord);
// effectiveCurrentTime = targetTimeRecord - annotationOffset/1000 = pinned });
setCurrentTime(targetTimeRecord);
}, [ }, [
isAnnotationSettingsOpen, isAnnotationSettingsOpen,
annotationOffset, annotationOffset,
@ -1204,7 +1216,11 @@ function LifecycleIconRow({
<div className="flex flex-row items-center gap-3"> <div className="flex flex-row items-center gap-3">
<div className="whitespace-nowrap">{formattedEventTimestamp}</div> <div className="whitespace-nowrap">{formattedEventTimestamp}</div>
{isAdmin && (config?.plus?.enabled || item.data.box) && ( {isAdmin && (config?.plus?.enabled || item.data.box) && (
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}> <DropdownMenu
modal={false}
open={isOpen}
onOpenChange={setIsOpen}
>
<DropdownMenuTrigger> <DropdownMenuTrigger>
<div className="rounded p-1 pr-2" role="button"> <div className="rounded p-1 pr-2" role="button">
<HiDotsHorizontal className="size-4 text-muted-foreground" /> <HiDotsHorizontal className="size-4 text-muted-foreground" />

View File

@ -126,13 +126,20 @@ export default function DetailStream({
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [controlsExpanded]); }, [controlsExpanded]);
// Re-seek on annotation offset change while settings panel is open // The slider invokes this atomically with setAnnotationOffset (inside the
useEffect(() => { // same flushSync) so currentTime advances in the same React commit as the
const pinned = pinnedDetectTimestampRef.current; // offset. Without this, the overlay would render one frame with the new
if (!controlsExpanded || pinned == null) return; // offset but the old currentTime, briefly resolving effectiveCurrentTime to
const recordTime = pinned + annotationOffset / 1000; // the wrong detect-stream timestamp and making the bounding box vanish or
onSeek(recordTime, false); // jump.
}, [controlsExpanded, annotationOffset, onSeek]); const handleApplyOffset = useCallback(
(newOffset: number) => {
const pinned = pinnedDetectTimestampRef.current;
if (!controlsExpanded || pinned == null) return;
onSeek(pinned + newOffset / 1000, false);
},
[controlsExpanded, onSeek],
);
// Ensure we initialize the active review when reviewItems first arrive. // Ensure we initialize the active review when reviewItems first arrive.
// This helps when the component mounts while the video is already // This helps when the component mounts while the video is already
@ -337,7 +344,7 @@ export default function DetailStream({
</button> </button>
{controlsExpanded && ( {controlsExpanded && (
<div className="space-y-4 px-3 pb-5 pt-2"> <div className="space-y-4 px-3 pb-5 pt-2">
<AnnotationOffsetSlider /> <AnnotationOffsetSlider onApplyOffset={handleApplyOffset} />
<Separator /> <Separator />
<div className="flex flex-col gap-1"> <div className="flex flex-col gap-1">
<div className="flex items-center justify-between"> <div className="flex items-center justify-between">

View File

@ -53,6 +53,10 @@ export default function EventMenu({
const handleDebugReplay = useCallback( const handleDebugReplay = useCallback(
(event: Event) => { (event: Event) => {
setIsStarting(true); setIsStarting(true);
const toastId = toast.loading(
t("dialog.starting", { ns: "views/replay" }),
{ position: "top-center" },
);
axios axios
.post("debug_replay/start", { .post("debug_replay/start", {
@ -63,6 +67,7 @@ export default function EventMenu({
.then((response) => { .then((response) => {
if (response.status === 200) { if (response.status === 200) {
toast.success(t("dialog.toast.success", { ns: "views/replay" }), { toast.success(t("dialog.toast.success", { ns: "views/replay" }), {
id: toastId,
position: "top-center", position: "top-center",
}); });
navigate("/replay"); navigate("/replay");
@ -78,6 +83,7 @@ export default function EventMenu({
toast.error( toast.error(
t("dialog.toast.alreadyActive", { ns: "views/replay" }), t("dialog.toast.alreadyActive", { ns: "views/replay" }),
{ {
id: toastId,
position: "top-center", position: "top-center",
closeButton: true, closeButton: true,
dismissible: false, dismissible: false,
@ -92,6 +98,7 @@ export default function EventMenu({
); );
} else { } else {
toast.error(t("dialog.toast.error", { error: errorMessage }), { toast.error(t("dialog.toast.error", { error: errorMessage }), {
id: toastId,
position: "top-center", position: "top-center",
}); });
} }
@ -106,7 +113,7 @@ export default function EventMenu({
return ( return (
<> <>
<span tabIndex={0} className="sr-only" /> <span tabIndex={0} className="sr-only" />
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}> <DropdownMenu modal={false} open={isOpen} onOpenChange={setIsOpen}>
<DropdownMenuTrigger> <DropdownMenuTrigger>
<div className="rounded p-1 pr-2" role="button"> <div className="rounded p-1 pr-2" role="button">
<HiDotsHorizontal className="size-4 text-muted-foreground" /> <HiDotsHorizontal className="size-4 text-muted-foreground" />

View File

@ -28,6 +28,14 @@ export default function useNavigation(
}); });
const isAdmin = useIsAdmin(); const isAdmin = useIsAdmin();
const hasChatAgent = useMemo(
() =>
Object.values(config?.genai ?? {}).some((agent) =>
agent?.roles?.includes("chat"),
),
[config?.genai],
);
return useMemo( return useMemo(
() => () =>
[ [
@ -89,9 +97,9 @@ export default function useNavigation(
icon: MdChat, icon: MdChat,
title: "menu.chat", title: "menu.chat",
url: "/chat", url: "/chat",
enabled: isDesktop && isAdmin && config?.genai?.model !== "none", enabled: isDesktop && isAdmin && hasChatAgent,
}, },
] as NavData[], ] as NavData[],
[config?.face_recognition?.enabled, config?.genai?.model, variant, isAdmin], [config?.face_recognition?.enabled, hasChatAgent, variant, isAdmin],
); );
} }

View File

@ -382,6 +382,18 @@ export type AllGroupsStreamingSettings = {
[groupName: string]: GroupStreamingSettings; [groupName: string]: GroupStreamingSettings;
}; };
export type GenAIRole = "chat" | "descriptions" | "embeddings";
export type GenAIAgentConfig = {
api_key?: string;
base_url?: string;
model: string;
provider?: string;
roles: GenAIRole[];
provider_options?: Record<string, unknown>;
runtime_options?: Record<string, unknown>;
};
export interface FrigateConfig { export interface FrigateConfig {
version: string; version: string;
safe_mode: boolean; safe_mode: boolean;
@ -478,12 +490,7 @@ export interface FrigateConfig {
retry_interval: number; retry_interval: number;
}; };
genai: { genai: Record<string, GenAIAgentConfig>;
provider: string;
base_url?: string;
api_key?: string;
model: string;
};
go2rtc: { go2rtc: {
streams: Record<string, string | string[]>; streams: Record<string, string | string[]>;