diff --git a/docs/.gitignore b/docs/.gitignore
index b2d6de306..6e46bafc0 100644
--- a/docs/.gitignore
+++ b/docs/.gitignore
@@ -7,6 +7,7 @@
# Generated files
.docusaurus
.cache-loader
+docs/integrations/api/
# Misc
.DS_Store
diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md
index 526207823..e6de72593 100644
--- a/docs/docs/configuration/advanced.md
+++ b/docs/docs/configuration/advanced.md
@@ -4,12 +4,29 @@ title: Advanced Options
sidebar_label: Advanced Options
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
### Logging
#### Frigate `logger`
Change the default log level for troubleshooting purposes.
+
+
+
+Navigate to .
+
+| Field | Description |
+| ------------------------- | ------------------------------------------------------- |
+| **Logging level** | The default log level for all modules (default: `info`) |
+| **Per-process log level** | Override the log level for specific modules |
+
+
+
+
```yaml
logger:
# Optional: default log level (default: shown below)
@@ -19,6 +36,9 @@ logger:
frigate.mqtt: error
```
+
+
+
Available log levels are: `debug`, `info`, `warning`, `error`, `critical`
Examples of available modules are:
@@ -48,7 +68,20 @@ This section can be used to set environment variables for those unable to modify
Variables prefixed with `FRIGATE_` can be referenced in config fields that support environment variable substitution (such as MQTT host and credentials, camera stream URLs, and ONVIF host and credentials) using the `{FRIGATE_VARIABLE_NAME}` syntax.
-Example:
+
+
+
+Navigate to to add or edit environment variables.
+
+| Field | Description |
+| --------- | --------------------------------------------------------- |
+| **Key** | The environment variable name (e.g., `FRIGATE_MQTT_USER`) |
+| **Value** | The value for the variable |
+
+Variables defined here can be referenced elsewhere in your configuration using the `{FRIGATE_VARIABLE_NAME}` syntax.
+
+
+
```yaml
environment_vars:
@@ -61,10 +94,27 @@ mqtt:
password: "{FRIGATE_MQTT_PASSWORD}"
```
+
+
+
#### TensorFlow Thread Configuration
If you encounter thread creation errors during classification model training, you can limit TensorFlow's thread usage:
+
+
+
+Navigate to and add the following variables:
+
+| Variable | Description |
+| --------------------------------- | ---------------------------------------------- |
+| `TF_INTRA_OP_PARALLELISM_THREADS` | Threads within operations (`0` = use default) |
+| `TF_INTER_OP_PARALLELISM_THREADS` | Threads between operations (`0` = use default) |
+| `TF_DATASET_THREAD_POOL_SIZE` | Data pipeline threads (`0` = use default) |
+
+
+
+
```yaml
environment_vars:
TF_INTRA_OP_PARALLELISM_THREADS: "2" # Threads within operations (0 = use default)
@@ -72,19 +122,35 @@ environment_vars:
TF_DATASET_THREAD_POOL_SIZE: "2" # Data pipeline threads (0 = use default)
```
+
+
+
### `database`
Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
-If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary.
+If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database if necessary.
This may need to be in a custom location if network storage is used for the media folder.
+
+
+
+Navigate to .
+
+- Set **Database path** to the custom path for the Frigate database file (default: `/config/frigate.db`)
+
+
+
+
```yaml
database:
path: /path/to/frigate.db
```
+
+
+
### `model`
If using a custom model, the width and height will need to be specified.
@@ -103,6 +169,22 @@ Custom models may also require different input tensor formats. The colorspace co
| "nhwc" |
| "nchw" |
+
+
+
+Navigate to to configure the model path, dimensions, and input format.
+
+| Field | Description |
+| --------------------------------------------- | ------------------------------------ |
+| **Custom object detector model path** | Path to the custom model file |
+| **Object detection model input width** | Model input width (default: 320) |
+| **Object detection model input height** | Model input height (default: 320) |
+| **Advanced > Model Input Tensor Shape** | Input tensor shape: `nhwc` or `nchw` |
+| **Advanced > Model Input Pixel Color Format** | Pixel format: `rgb`, `bgr`, or `yuv` |
+
+
+
+
```yaml
# Optional: model config
model:
@@ -113,6 +195,9 @@ model:
input_pixel_format: "bgr"
```
+
+
+
#### `labelmap`
:::warning
@@ -163,7 +248,15 @@ services:
### Enabling IPv6
-IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows:
+IPv6 is disabled by default. Enable it in the Frigate configuration.
+
+
+
+
+Navigate to and expand **IPv6 configuration**, then enable **Enable IPv6**.
+
+
+
```yaml
networking:
@@ -171,11 +264,25 @@ networking:
enabled: True
```
+
+
+
### Listen on different ports
-You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container.
+You can change the ports Nginx uses for listening. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container.
-For example:
+
+
+
+Navigate to to configure the listen ports.
+
+| Field | Description |
+| ----------------- | --------------------------------------------------------- |
+| **Internal port** | The unauthenticated listen address/port (default: `5000`) |
+| **External port** | The authenticated listen address/port (default: `8971`) |
+
+
+
```yaml
networking:
@@ -184,6 +291,9 @@ networking:
external: 8971
```
+
+
+
:::warning
This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations.
diff --git a/docs/docs/configuration/audio_detectors.md b/docs/docs/configuration/audio_detectors.md
index 957667914..bb646e677 100644
--- a/docs/docs/configuration/audio_detectors.md
+++ b/docs/docs/configuration/audio_detectors.md
@@ -3,6 +3,10 @@ id: audio_detectors
title: Audio Detectors
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Frigate provides a builtin audio detector which runs on the CPU. Compared to object detection in images, audio detection is a relatively lightweight operation so the only option is to run the detection on a CPU.
## Configuration
@@ -11,7 +15,17 @@ Audio events work by detecting a type of audio and creating an event, the event
### Enabling Audio Events
-Audio events can be enabled for all cameras or only for specific cameras.
+Audio events can be enabled globally or for specific cameras.
+
+
+
+
+**Global:** Navigate to and set **Enable audio detection** to on.
+
+**Per-camera:** Navigate to and set **Enable audio detection** to on for the desired camera.
+
+
+
```yaml
@@ -26,6 +40,9 @@ cameras:
enabled: True # <- enable audio events for the front_camera
```
+
+
+
If you are using multiple streams then you must set the `audio` role on the stream that is going to be used for audio detection, this can be any stream but the stream must have audio included.
:::note
@@ -34,6 +51,14 @@ The ffmpeg process for capturing audio will be a separate connection to the came
:::
+
+
+
+Navigate to and add an input with the `audio` role pointing to a stream that includes audio.
+
+
+
+
```yaml
cameras:
front_camera:
@@ -48,6 +73,9 @@ cameras:
- detect
```
+
+
+
### Configuring Minimum Volume
The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that Frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. The Debug view in the Frigate UI has an Audio tab for cameras that have the `audio` role assigned where a graph and the current levels are is displayed. The `min_volume` parameter should be set to the minimum the `RMS` level required to run audio detection.
@@ -62,6 +90,17 @@ Volume is considered motion for recordings, this means when the `record -> retai
The included audio model has over [500 different types](https://github.com/blakeblackshear/frigate/blob/dev/audio-labelmap.txt) of audio that can be detected, many of which are not practical. By default `bark`, `fire_alarm`, `scream`, `speech`, and `yell` are enabled but these can be customized.
+
+
+
+Navigate to .
+
+- Set **Enable audio detection** to on
+- Set **Listen types** to include the audio types you want to detect
+
+
+
+
```yaml
audio:
enabled: True
@@ -73,15 +112,32 @@ audio:
- yell
```
+
+
+
### Audio Transcription
-Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI’s open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background.
+Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background.
Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service.
#### Configuration
-To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features.
+To enable transcription, configure it globally and optionally disable for specific cameras. Audio detection must also be enabled as described above.
+
+
+
+
+**Global:** Navigate to .
+
+- Set **Enable audio transcription** to on
+- Set **Transcription device** to the desired device
+- Set **Model size** to the desired size
+
+**Per-camera:** Navigate to to enable or disable transcription for a specific camera.
+
+
+
```yaml
audio_transcription:
@@ -100,6 +156,9 @@ cameras:
enabled: False
```
+
+
+
:::note
Audio detection must be enabled and configured as described above in order to use audio transcription features.
@@ -146,7 +205,7 @@ If you have CUDA hardware, you can experiment with the `large` `whisper` model o
Any `speech` events in Explore can be transcribed and/or translated through the Transcribe button in the Tracked Object Details pane.
-In order to use transcription and translation for past events, you must enable audio detection and define `speech` as an audio type to listen for in your config. To have `speech` events translated into the language of your choice, set the `language` config parameter with the correct [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10).
+In order to use transcription and translation for past events, you must enable audio detection and define `speech` as an audio type to listen for. To have `speech` events translated into the language of your choice, set the `language` config parameter with the correct [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10).
The transcribed/translated speech will appear in the description box in the Tracked Object Details pane. If Semantic Search is enabled, embeddings are generated for the transcription text and are fully searchable using the description search type.
@@ -162,16 +221,16 @@ Recorded `speech` events will always use a `whisper` model, regardless of the `m
1. Why doesn't Frigate automatically transcribe all `speech` events?
- Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That’s a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise.
+ Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That's a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise.
Because transcription is **serialized (one event at a time)** and speech events can be generated far faster than they can be processed, an auto-transcribe toggle would very quickly create an ever-growing backlog and degrade core functionality. For the amount of engineering and risk involved, it adds **very little practical value** for the majority of deployments, which are often on low-powered, edge hardware.
- If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
+ If you hear speech that's actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available.
2. Why don't you save live transcription text and use that for `speech` events?
- There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
+ There's no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
- Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That’s why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event.
+ Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That's why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event.
diff --git a/docs/docs/configuration/authentication.md b/docs/docs/configuration/authentication.md
index 694c4bada..0d80d80ce 100644
--- a/docs/docs/configuration/authentication.md
+++ b/docs/docs/configuration/authentication.md
@@ -3,6 +3,10 @@ id: authentication
title: Authentication
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
# Authentication
Frigate stores user information in its database. Password hashes are generated using industry standard PBKDF2-SHA256 with 600,000 iterations. Upon successful login, a JWT token is issued with an expiration date and set as a cookie. The cookie is refreshed as needed automatically. This JWT token can also be passed in the Authorization header as a bearer token.
@@ -22,13 +26,26 @@ On startup, an admin user and password are generated and printed in the logs. It
## Resetting admin password
-In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file.
+In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup.
+
+
+
+
+Navigate to .
+
+- Set **Reset admin password** to on to reset the admin password and print it in the logs on next startup
+
+
+
```yaml
auth:
reset_admin_password: true
```
+
+
+
## Password guidance
Constructing secure passwords and managing them properly is important. Frigate requires a minimum length of 12 characters. For guidance on password standards see [NIST SP 800-63B](https://pages.nist.gov/800-63-3/sp800-63b.html). To learn what makes a password truly secure, read this [article](https://medium.com/peerio/how-to-build-a-billion-dollar-password-3d92568d9277).
@@ -47,7 +64,20 @@ Restarting Frigate will reset the rate limits.
If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated.
-If you are running a reverse proxy in the same Docker Compose file as Frigate, here is an example of how your auth config might look:
+If you are running a reverse proxy in the same Docker Compose file as Frigate, configure rate limiting and trusted proxies as follows:
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ----------------------- | ------------------------------------------------------------------------------------------------------------------------- |
+| **Failed login limits** | Rate limit string for login failures (e.g., `1/second;5/minute;20/hour`) |
+| **Trusted proxies** | List of upstream network CIDRs to trust for `X-Forwarded-For` (e.g., `172.18.0.0/16` for internal Docker Compose network) |
+
+
+
```yaml
auth:
@@ -56,6 +86,9 @@ auth:
- 172.18.0.0/16 # <---- this is the subnet for the internal Docker Compose network
```
+
+
+
## Session Length
The default session length for user authentication in Frigate is 24 hours. This setting determines how long a user's authenticated session remains active before a token refresh is required — otherwise, the user will need to log in again.
@@ -67,11 +100,24 @@ The default value of `86400` will expire the authentication session after 24 hou
- `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout.
- `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days.
+
+
+
+Navigate to .
+
+- Set **Session length** to the duration in seconds before the authentication session expires (default: 86400 / 24 hours)
+
+
+
+
```yaml
auth:
session_length: 86400
```
+
+
+
## JWT Token Secret
The JWT token secret needs to be kept secure. Anyone with this secret can generate valid JWT tokens to authenticate with Frigate. This should be a cryptographically random string of at least 64 characters.
@@ -99,7 +145,18 @@ Frigate can be configured to leverage features of common upstream authentication
If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication as there is no correspondence between users in Frigate's database and users authenticated via the proxy. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret.
-Here is an example of how to disable Frigate's authentication and also ensure the requests come only from your known proxy.
+To disable Frigate's authentication and ensure requests come only from your known proxy:
+
+
+
+
+1. Navigate to .
+ - Set **Enable authentication** to off
+2. Navigate to .
+ - Set **Proxy secret** to ``
+
+
+
```yaml
auth:
@@ -109,6 +166,9 @@ proxy:
auth_secret:
```
+
+
+
You can use the following code to generate a random secret.
```shell
@@ -119,6 +179,20 @@ python3 -c 'import secrets; print(secrets.token_hex(64))'
If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Groups` values. Header names are not case sensitive. Multiple values can be included in the role header. Frigate expects that the character separating the roles is a comma, but this can be specified using the `separator` config entry.
+
+
+
+Navigate to and configure the header mapping and separator settings.
+
+| Field | Description |
+| -------------------------------- | ---------------------------------------------------------------------------------------------------- |
+| **Separator character** | Character separating multiple roles in the role header (default: comma). Authentik uses a pipe `\|`. |
+| **Header mapping > User header** | Header name for the authenticated username (e.g., `x-forwarded-user`) |
+| **Header mapping > Role header** | Header name for the authenticated role/groups (e.g., `x-forwarded-groups`) |
+
+
+
+
```yaml
proxy:
...
@@ -128,19 +202,37 @@ proxy:
role: x-forwarded-groups
```
+
+
+
Frigate supports `admin`, `viewer`, and custom roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization.
A default role can be provided. Any value in the mapped `role` header will override the default.
+
+
+
+Navigate to and set the default role.
+
+| Field | Description |
+| ---------------- | ------------------------------------------------------------- |
+| **Default role** | Fallback role when no role header is present (e.g., `viewer`) |
+
+
+
+
```yaml
proxy:
...
default_role: viewer
```
+
+
+
## Role mapping
-In some environments, upstream identity providers (OIDC, SAML, LDAP, etc.) do not pass a Frigate-compatible role directly, but instead pass one or more group claims. To handle this, Frigate supports a `role_map` that translates upstream group names into Frigate’s internal roles (`admin`, `viewer`, or custom).
+In some environments, upstream identity providers (OIDC, SAML, LDAP, etc.) do not pass a Frigate-compatible role directly, but instead pass one or more group claims. To handle this, Frigate supports a `role_map` that translates upstream group names into Frigate's internal roles (`admin`, `viewer`, or custom). This is configurable via YAML in the configuration file:
```yaml
proxy:
@@ -175,7 +267,7 @@ In this example:
**Authenticated Port (8971)**
- Header mapping is **fully supported**.
-- The `remote-role` header determines the user’s privileges:
+- The `remote-role` header determines the user's privileges:
- **admin** → Full access (user management, configuration changes).
- **viewer** → Read-only access.
- **Custom roles** → Read-only access limited to the cameras defined in `auth.roles[role]`.
@@ -232,6 +324,14 @@ The viewer role provides read-only access to all cameras in the UI and API. Cust
### Role Configuration Example
+
+
+
+Navigate to to define custom roles and assign which cameras each role can access.
+
+
+
+
```yaml {11-16}
cameras:
front_door:
@@ -251,13 +351,16 @@ auth:
- side_yard
```
+
+
+
If you want to provide access to all cameras to a specific user, just use the **viewer** role.
### Managing User Roles
1. Log in as an **admin** user via port `8971` (preferred), or unauthenticated via port `5000`.
2. Navigate to **Settings**.
-3. In the **Users** section, edit a user’s role by selecting from available roles (admin, viewer, or custom).
+3. In the **Users** section, edit a user's role by selecting from available roles (admin, viewer, or custom).
4. In the **Roles** section, add/edit/delete custom roles (select cameras via switches). Deleting a role auto-reassigns users to "viewer".
### Role Enforcement
@@ -277,7 +380,7 @@ To use role-based access control, you must connect to Frigate via the **authenti
1. Log in as an **admin** user via port `8971`.
2. Navigate to **Settings > Users**.
-3. Edit a user’s role by selecting **admin** or **viewer**.
+3. Edit a user's role by selecting **admin** or **viewer**.
## API Authentication Guide
diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md
index 6dddef458..27312eaa9 100644
--- a/docs/docs/configuration/autotracking.md
+++ b/docs/docs/configuration/autotracking.md
@@ -3,6 +3,10 @@ id: autotracking
title: Camera Autotracking
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
An ONVIF-capable, PTZ (pan-tilt-zoom) camera that supports relative movement within the field of view (FOV) can be configured to automatically track moving objects and keep them in the center of the frame.

@@ -29,12 +33,44 @@ A growing list of cameras and brands that have been reported by users to work wi
First, set up a PTZ preset in your camera's firmware and give it a name. If you're unsure how to do this, consult the documentation for your camera manufacturer's firmware. Some tutorials for common brands: [Amcrest](https://www.youtube.com/watch?v=lJlE9-krmrM), [Reolink](https://www.youtube.com/watch?v=VAnxHUY5i5w), [Dahua](https://www.youtube.com/watch?v=7sNbc5U-k54).
-Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset.
+Configure the ONVIF connection and autotracking parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset.
An [ONVIF connection](cameras.md) is required for autotracking to function. Also, a [motion mask](masks.md) over your camera's timestamp and any overlay text is recommended to ensure they are completely excluded from scene change calculations when the camera is moving.
Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT.
+
+
+
+Navigate to for the desired camera.
+
+**ONVIF Connection**
+
+| Field | Description |
+| ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **ONVIF host** | Host of the camera being connected to. HTTP is assumed by default; prefix with `https://` for HTTPS. |
+| **ONVIF port** | ONVIF port for device (default: 8000) |
+| **ONVIF username** | Username for login. Some devices require admin to access ONVIF. |
+| **ONVIF password** | Password for login |
+| **Disable TLS verify** | Skip TLS verification and disable digest auth for ONVIF (default: false) |
+| **ONVIF profile** | ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically. |
+
+**Autotracking**
+
+| Field | Description |
+| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
+| **Enable Autotracking** | Enable or disable object autotracking (default: false) |
+| **Calibrate on start** | Calibrate the camera on startup by measuring PTZ motor speed (default: false) |
+| **Zoom mode** | Zoom mode during autotracking: `disabled`, `absolute`, or `relative` (default: disabled) |
+| **Zoom Factor** | Controls zoom behavior on tracked objects, between 0.1 and 0.75. Lower keeps more scene visible; higher zooms in more (default: 0.3) |
+| **Tracked objects** | List of object types to track (default: person) |
+| **Required Zones** | Zones an object must enter to begin autotracking |
+| **Return Preset** | Name of ONVIF preset in camera firmware to return to when tracking ends (default: home) |
+| **Return timeout** | Seconds to delay before returning to preset (default: 10) |
+
+
+
+
```yaml
cameras:
ptzcamera:
@@ -92,13 +128,16 @@ cameras:
movement_weights: []
```
+
+
+
## Calibration
PTZ motors operate at different speeds. Performing a calibration will direct Frigate to measure this speed over a variety of movements and use those measurements to better predict the amount of movement necessary to keep autotracked objects in the center of the frame.
Calibration is optional, but will greatly assist Frigate in autotracking objects that move across the camera's field of view more quickly.
-To begin calibration, set the `calibrate_on_startup` for your camera to `True` and restart Frigate. Frigate will then make a series of small and large movements with your camera. Don't move the PTZ manually while calibration is in progress. Once complete, camera motion will stop and your config file will be automatically updated with a `movement_weights` parameter to be used in movement calculations. You should not modify this parameter manually.
+To begin calibration, set `calibrate_on_startup` for your camera to `True` and restart Frigate. Frigate will then make a series of small and large movements with your camera. Don't move the PTZ manually while calibration is in progress. Once complete, camera motion will stop and your config file will be automatically updated with a `movement_weights` parameter to be used in movement calculations. You should not modify this parameter manually.
After calibration has ended, your PTZ will be moved to the preset specified by `return_preset`.
diff --git a/docs/docs/configuration/bird_classification.md b/docs/docs/configuration/bird_classification.md
index 398729290..75c0b8306 100644
--- a/docs/docs/configuration/bird_classification.md
+++ b/docs/docs/configuration/bird_classification.md
@@ -3,6 +3,10 @@ id: bird_classification
title: Bird Classification
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
## Minimum System Requirements
@@ -15,7 +19,18 @@ The classification model used is the MobileNet INat Bird Classification, [availa
## Configuration
-Bird classification is disabled by default, it must be enabled in your config file before it can be used. Bird classification is a global configuration setting.
+Bird classification is disabled by default and must be enabled before it can be used. Bird classification is a global configuration setting.
+
+
+
+
+Navigate to .
+
+- Set **Bird classification config > Bird classification** to on
+- Set **Bird classification config > Minimum score** to the desired confidence score (default: 0.9)
+
+
+
```yaml
classification:
@@ -23,6 +38,9 @@ classification:
enabled: true
```
+
+
+
## Advanced Configuration
Fine-tune bird classification with these optional parameters:
diff --git a/docs/docs/configuration/birdseye.md b/docs/docs/configuration/birdseye.md
index f48299aec..810449478 100644
--- a/docs/docs/configuration/birdseye.md
+++ b/docs/docs/configuration/birdseye.md
@@ -1,5 +1,9 @@
# Birdseye
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about.
Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras.
@@ -22,7 +26,22 @@ A custom icon can be added to the birdseye background by providing a 180x180 ima
### Birdseye view override at camera level
-If you want to include a camera in Birdseye view only for specific circumstances, or just don't include it at all, the Birdseye setting can be set at the camera level.
+To include a camera in Birdseye view only for specific circumstances, or exclude it entirely, configure Birdseye at the camera level.
+
+
+
+
+**Global settings:** Navigate to to configure the default Birdseye behavior for all cameras.
+
+**Per-camera overrides:** Navigate to to override the mode or disable Birdseye for a specific camera.
+
+| Field | Description |
+|-------|-------------|
+| **Enable Birdseye** | Whether this camera appears in Birdseye view |
+| **Tracking mode** | When to show the camera: `continuous`, `motion`, or `objects` |
+
+
+
```yaml {8-10,12-14}
# Include all cameras by default in Birdseye view
@@ -41,9 +60,24 @@ cameras:
enabled: False
```
+
+
+
### Birdseye Inactivity
-By default birdseye shows all cameras that have had the configured activity in the last 30 seconds, this can be configured:
+By default birdseye shows all cameras that have had the configured activity in the last 30 seconds. This threshold can be configured.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+|-------|-------------|
+| **Inactivity threshold** | Seconds of inactivity before a camera is hidden from Birdseye (default: 30) |
+
+
+
```yaml
birdseye:
@@ -52,12 +86,28 @@ birdseye:
inactivity_threshold: 15
```
+
+
+
## Birdseye Layout
### Birdseye Dimensions
The resolution and aspect ratio of birdseye can be configured. Resolution will increase the quality but does not affect the layout. Changing the aspect ratio of birdseye does affect how cameras are laid out.
+
+
+
+Navigate to .
+
+| Field | Description |
+|-------|-------------|
+| **Width** | Birdseye output width in pixels (default: 1280) |
+| **Height** | Birdseye output height in pixels (default: 720) |
+
+
+
+
```yaml
birdseye:
enabled: True
@@ -65,10 +115,20 @@ birdseye:
height: 720
```
+
+
+
### Sorting cameras in the Birdseye view
-It is possible to override the order of cameras that are being shown in the Birdseye view.
-The order needs to be set at the camera level.
+It is possible to override the order of cameras that are being shown in the Birdseye view. The order is set at the camera level.
+
+
+
+
+Navigate to for each camera and set the **Position** field to control the display order.
+
+
+
```yaml
# Include all cameras by default in Birdseye view
@@ -87,13 +147,26 @@ cameras:
order: 2
```
+
+
+
_Note_: Cameras are sorted by default using their name to ensure a constant view inside Birdseye.
### Birdseye Cameras
It is possible to limit the number of cameras shown on birdseye at one time. When this is enabled, birdseye will show the cameras with most recent activity. There is a cooldown to ensure that cameras do not switch too frequently.
-For example, this can be configured to only show the most recently active camera.
+
+
+
+Navigate to .
+
+| Field | Description |
+|-------|-------------|
+| **Layout > Max cameras** | Maximum number of cameras shown at once (e.g., `1` for only the most active camera) |
+
+
+
```yaml {3-4}
birdseye:
@@ -102,13 +175,31 @@ birdseye:
max_cameras: 1
```
+
+
+
### Birdseye Scaling
By default birdseye tries to fit 2 cameras in each row and then double in size until a suitable layout is found. The scaling can be configured with a value between 1.0 and 5.0 depending on use case.
+
+
+
+Navigate to .
+
+| Field | Description |
+|-------|-------------|
+| **Layout > Scaling factor** | Camera scaling factor between 1.0 and 5.0 (default: 2.0) |
+
+
+
+
```yaml {3-4}
birdseye:
enabled: True
layout:
scaling_factor: 3.0
```
+
+
+
diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md
index 84cf318e4..8094c9f1c 100644
--- a/docs/docs/configuration/cameras.md
+++ b/docs/docs/configuration/cameras.md
@@ -3,6 +3,10 @@ id: cameras
title: Camera Configuration
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
## Setting Up Camera Inputs
Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa.
@@ -17,6 +21,25 @@ Each role can only be assigned to one input per camera. The options for roles ar
| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) |
| `audio` | Feed for audio based detection. [docs](audio_detectors.md) |
+
+
+
+Navigate to .
+
+| Field | Description |
+| ----------------- | ------------------------------------------------------------------- |
+| **Camera inputs** | List of input stream definitions (paths and roles) for this camera. |
+
+Navigate to .
+
+| Field | Description |
+| ----------------- | ------------------------------------------------------------------------------------------------------ |
+| **Detect width** | Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution. |
+| **Detect height** | Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution. |
+
+
+
+
```yaml
mqtt:
host: mqtt.server.com
@@ -36,7 +59,18 @@ cameras:
height: 720 # <- optional, by default Frigate tries to automatically detect resolution
```
-Additional cameras are simply added to the config under the `cameras` entry.
+
+
+
+Additional cameras are simply added under the camera configuration section.
+
+
+
+
+Navigate to and use the add camera button to configure each additional camera.
+
+
+
```yaml
mqtt: ...
@@ -46,6 +80,9 @@ cameras:
side: ...
```
+
+
+
:::note
If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section.
@@ -64,7 +101,19 @@ Not every PTZ supports ONVIF, which is the standard protocol Frigate uses to com
:::
-Add the onvif section to your camera in your configuration file:
+Configure the ONVIF connection for your camera to enable PTZ controls.
+
+
+
+
+1. Navigate to and select your camera.
+ - Set **ONVIF host** to your camera's IP address, e.g.: `10.0.10.10`
+ - Set **ONVIF port** to your camera's ONVIF port, e.g.: `8000`
+ - Set **ONVIF username** to your camera's ONVIF username, e.g.: `admin`
+ - Set **ONVIF password** to your camera's ONVIF password, e.g.: `password`
+
+
+
```yaml {4-8}
cameras:
@@ -77,6 +126,9 @@ cameras:
password: password
```
+
+
+
If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI.
:::note
@@ -130,13 +182,15 @@ The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.or
## Setting up camera groups
-:::tip
+Camera groups let you organize cameras together with a shared name and icon, making it easier to review and filter them. A default group for all cameras is always available.
-It is recommended to set up camera groups using the UI.
+
+
-:::
+On the Live dashboard, press the **+** icon in the main navigation to add a new camera group. Configure the group name, select which cameras to include, choose an icon, and set the display order.
-Cameras can be grouped together and assigned a name and icon, this allows them to be reviewed and filtered together. There will always be the default group for all cameras.
+
+
```yaml
camera_groups:
@@ -148,6 +202,9 @@ camera_groups:
order: 0
```
+
+
+
## Two-Way Audio
See the guide [here](/configuration/live/#two-way-talk)
diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md
index 7b5d73d75..6e68d4ba9 100644
--- a/docs/docs/configuration/custom_classification/object_classification.md
+++ b/docs/docs/configuration/custom_classification/object_classification.md
@@ -3,13 +3,17 @@ id: object_classification
title: Object Classification
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API.
## Minimum System Requirements
Object classification models are lightweight and run very fast on CPU.
-Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
+Training the model does briefly use a high amount of system resources for about 1-3 minutes per training run. On lower-power devices, training may take longer.
A CPU with AVX + AVX2 instructions is required for training and inference.
@@ -27,7 +31,7 @@ For object classification:
### Classification Type
- **Sub label**:
- - Applied to the object’s `sub_label` field.
+ - Applied to the object's `sub_label` field.
- Ideal for a single, more specific identity or type.
- Example: `cat` → `Leo`, `Charlie`, `None`.
@@ -55,7 +59,7 @@ This two-step verification prevents false positives by requiring consistent pred
### Sub label
-- **Known pet vs unknown**: For `dog` objects, set sub label to your pet’s name (e.g., `buddy`) or `none` for others.
+- **Known pet vs unknown**: For `dog` objects, set sub label to your pet's name (e.g., `buddy`) or `none` for others.
- **Mail truck vs normal car**: For `car`, classify as `mail_truck` vs `car` to filter important arrivals.
- **Delivery vs non-delivery person**: For `person`, classify `delivery` vs `visitor` based on uniform/props.
@@ -68,7 +72,27 @@ This two-step verification prevents false positives by requiring consistent pred
## Configuration
-Object classification is configured as a custom classification model. Each model has its own name and settings. You must list which object labels should be classified.
+Object classification is configured as a custom classification model. Each model has its own name and settings. Specify which object labels should be classified.
+
+
+
+
+Navigate to the **Classification** page from the main navigation sidebar, then click **Add Classification**.
+
+In the **Create New Classification** dialog:
+
+| Field | Description |
+| ----------------------- | ------------------------------------------------------------- |
+| **Name** | A name for your classification model (e.g., `dog`) |
+| **Type** | Select **Object** for object classification |
+| **Object Label** | The object label to classify (e.g., `dog`, `person`, `car`) |
+| **Classification Type** | Whether to assign results as a **Sub Label** or **Attribute** |
+| **Classes** | The class names the model will learn to distinguish between |
+
+The `threshold` (default: `0.8`) can be adjusted in the YAML configuration.
+
+
+
```yaml
classification:
@@ -82,6 +106,9 @@ classification:
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For object classification models, the default is 200.
+
+
+
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps:
@@ -104,18 +131,18 @@ If examples for some of your classes do not appear in the grid, you can continue
:::tip Diversity matters far more than volume
-Selecting dozens of nearly identical images is one of the fastest ways to degrade model performance. MobileNetV2 can overfit quickly when trained on homogeneous data — the model learns what *that exact moment* looked like rather than what actually defines the class. **This is why Frigate does not implement bulk training in the UI.**
+Selecting dozens of nearly identical images is one of the fastest ways to degrade model performance. MobileNetV2 can overfit quickly when trained on homogeneous data — the model learns what _that exact moment_ looked like rather than what actually defines the class. **This is why Frigate does not implement bulk training in the UI.**
For more detail, see [Frigate Tip: Best Practices for Training Face and Custom Classification Models](https://github.com/blakeblackshear/frigate/discussions/21374).
:::
- **Start small and iterate**: Begin with a small, representative set of images per class. Models often begin working well with surprisingly few examples and improve naturally over time.
-- **Favor hard examples**: When images appear in the Recent Classifications tab, prioritize images scoring below 90–100% or those captured under new lighting, weather, or distance conditions.
+- **Favor hard examples**: When images appear in the Recent Classifications tab, prioritize images scoring below 90-100% or those captured under new lighting, weather, or distance conditions.
- **Avoid bulk training similar images**: Training large batches of images that already score 100% (or close) adds little new information and increases the risk of overfitting.
-- **The wizard is just the starting point**: You don’t need to find and label every class upfront. Missing classes will naturally appear in Recent Classifications, and those images tend to be more valuable because they represent new conditions and edge cases.
+- **The wizard is just the starting point**: You don't need to find and label every class upfront. Missing classes will naturally appear in Recent Classifications, and those images tend to be more valuable because they represent new conditions and edge cases.
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
-- **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered.
+- **Preprocessing**: Ensure examples reflect object crops similar to Frigate's boxes; keep the subject centered.
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.
@@ -125,6 +152,17 @@ To troubleshoot issues with object classification models, enable debug logging t
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
+
+
+
+Navigate to .
+
+- Set **Logging level** to `debug`
+- Set **Per-process log level > Frigate.Data Processing.Real Time.Custom Classification** to `debug` for verbose classification logging
+
+
+
+
```yaml
logger:
default: info
@@ -133,6 +171,9 @@ logger:
frigate.data_processing.real_time.custom_classification: debug
```
+
+
+
The debug logs will show:
- Classification probabilities for each attempt
diff --git a/docs/docs/configuration/custom_classification/state_classification.md b/docs/docs/configuration/custom_classification/state_classification.md
index 3cadd9054..688b8bb0d 100644
--- a/docs/docs/configuration/custom_classification/state_classification.md
+++ b/docs/docs/configuration/custom_classification/state_classification.md
@@ -3,13 +3,17 @@ id: state_classification
title: State Classification
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate//classification/` MQTT topic and in Home Assistant sensors via the official Frigate integration.
## Minimum System Requirements
State classification models are lightweight and run very fast on CPU.
-Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
+Training the model does briefly use a high amount of system resources for about 1-3 minutes per training run. On lower-power devices, training may take longer.
A CPU with AVX + AVX2 instructions is required for training and inference.
@@ -33,7 +37,25 @@ For state classification:
## Configuration
-State classification is configured as a custom classification model. Each model has its own name and settings. You must provide at least one camera crop under `state_config.cameras`.
+State classification is configured as a custom classification model. Each model has its own name and settings. Provide at least one camera crop under `state_config.cameras`.
+
+
+
+
+Navigate to the **Classification** page from the main navigation sidebar, select the **States** tab, then click **Add Classification**.
+
+In the **Create New Classification** dialog:
+
+| Field | Description |
+| ----------- | ------------------------------------------------------------------------------------ |
+| **Name** | A name for your state classification model (e.g., `front_door`) |
+| **Type** | Select **State** for state classification |
+| **Classes** | The state names the model will learn to distinguish between (e.g., `open`, `closed`) |
+
+After creating the model, the wizard will guide you through selecting the camera crop area and assigning training examples. The `threshold` (default: `0.8`), `motion`, and `interval` settings can be adjusted in the YAML configuration.
+
+
+
```yaml
classification:
@@ -50,6 +72,9 @@ classification:
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For state classification models, the default is 100.
+
+
+
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps:
@@ -72,7 +97,7 @@ Once some images are assigned, training will begin automatically.
:::tip Diversity matters far more than volume
-Selecting dozens of nearly identical images is one of the fastest ways to degrade model performance. MobileNetV2 can overfit quickly when trained on homogeneous data — the model learns what *that exact moment* looked like rather than what actually defines the state. This often leads to models that work perfectly under the original conditions but become unstable when day turns to night, weather changes, or seasonal lighting shifts. **This is why Frigate does not implement bulk training in the UI.**
+Selecting dozens of nearly identical images is one of the fastest ways to degrade model performance. MobileNetV2 can overfit quickly when trained on homogeneous data — the model learns what _that exact moment_ looked like rather than what actually defines the state. This often leads to models that work perfectly under the original conditions but become unstable when day turns to night, weather changes, or seasonal lighting shifts. **This is why Frigate does not implement bulk training in the UI.**
For more detail, see [Frigate Tip: Best Practices for Training Face and Custom Classification Models](https://github.com/blakeblackshear/frigate/discussions/21374).
@@ -82,7 +107,7 @@ For more detail, see [Frigate Tip: Best Practices for Training Face and Custom C
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
-- **Favor hard examples**: When images appear in the Recent Classifications tab, prioritize images scoring below 90–100% or those captured under new conditions (e.g., first snow of the year, seasonal changes, objects temporarily in view, insects at night). These represent scenarios different from the default state and help prevent overfitting.
+- **Favor hard examples**: When images appear in the Recent Classifications tab, prioritize images scoring below 90-100% or those captured under new conditions (e.g., first snow of the year, seasonal changes, objects temporarily in view, insects at night). These represent scenarios different from the default state and help prevent overfitting.
- **Avoid bulk training similar images**: Training large batches of images that already score 100% (or close) adds little new information and increases the risk of overfitting.
- **The wizard is just the starting point**: You don't need to find and label every state upfront. Missing states will naturally appear in Recent Classifications, and those images tend to be more valuable because they represent new conditions and edge cases.
@@ -92,6 +117,17 @@ To troubleshoot issues with state classification models, enable debug logging to
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
+
+
+
+Navigate to .
+
+- Set **Logging level** to `debug`
+- Set **Per-process log level > `frigate.data_processing.real_time.custom_classification`** to `debug` for verbose classification logging
+
+
+
+
```yaml
logger:
default: info
@@ -100,6 +136,9 @@ logger:
frigate.data_processing.real_time.custom_classification: debug
```
+
+
+
The debug logs will show:
- Classification probabilities for each attempt
diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md
index c44f76dea..7c23884cc 100644
--- a/docs/docs/configuration/face_recognition.md
+++ b/docs/docs/configuration/face_recognition.md
@@ -3,6 +3,10 @@ id: face_recognition
title: Face Recognition
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
## Model Requirements
@@ -40,50 +44,101 @@ The `large` model is optimized for accuracy, an integrated or discrete GPU / NPU
## Configuration
-Face recognition is disabled by default, face recognition must be enabled in the UI or in your config file before it can be used. Face recognition is a global configuration setting.
+Face recognition is disabled by default and must be enabled before it can be used. Face recognition is a global configuration setting.
+
+
+
+
+Navigate to .
+
+- Set **Enable face recognition** to on
+
+
+
```yaml
face_recognition:
enabled: true
```
+
+
+
Like the other real-time processors in Frigate, face recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements.
## Advanced Configuration
-Fine-tune face recognition with these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled` and `min_area`.
+Fine-tune face recognition with these optional parameters. The only optional parameters that can be set at the camera level are `enabled` and `min_area`.
### Detection
-- `detection_threshold`: Face detection confidence score required before recognition runs:
+
+
+
+Navigate to .
+
+- **Detection threshold**: Face detection confidence score required before recognition runs. This field only applies to the standalone face detection model; `min_score` should be used to filter for models that have face detection built in.
- Default: `0.7`
- - Note: This is field only applies to the standalone face detection model, `min_score` should be used to filter for models that have face detection built in.
-- `min_area`: Defines the minimum size (in pixels) a face must be before recognition runs.
- - Default: `500` pixels.
- - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant faces.
+- **Minimum face area**: Minimum size (in pixels) a face must be before recognition runs. Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant faces.
+ - Default: `500` pixels
+
+
+
+
+```yaml
+face_recognition:
+ enabled: true
+ detection_threshold: 0.7
+ min_area: 500
+```
+
+
+
### Recognition
-- `model_size`: Which model size to use, options are `small` or `large`
-- `unknown_score`: Min score to mark a person as a potential match, matches at or below this will be marked as unknown.
- - Default: `0.8`.
-- `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label.
- - Default: `0.9`.
-- `min_faces`: Min face recognitions for the sub label to be applied to the person object.
+
+
+
+Navigate to .
+
+- **Model size**: Which model size to use, options are `small` or `large`.
+- **Unknown score threshold**: Min score to mark a person as a potential match; matches at or below this will be marked as unknown.
+ - Default: `0.8`
+- **Recognition threshold**: Recognition confidence score required to add the face to the object as a sub label.
+ - Default: `0.9`
+- **Minimum faces**: Min face recognitions for the sub label to be applied to the person object.
- Default: `1`
-- `save_attempts`: Number of images of recognized faces to save for training.
- - Default: `200`.
-- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- - Default: `True`.
-- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
- - Default: `None`.
- - Note: This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)
+- **Save attempts**: Number of images of recognized faces to save for training.
+ - Default: `200`
+- **Blur confidence filter**: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
+ - Default: `True`
+- **Device**: Target a specific device to run the face recognition model on (multi-GPU installation). This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/).
+ - Default: `None`
+
+
+
+
+```yaml
+face_recognition:
+ enabled: true
+ model_size: small
+ unknown_score: 0.8
+ recognition_threshold: 0.9
+ min_faces: 1
+ save_attempts: 200
+ blur_confidence_filter: true
+ device: None
+```
+
+
+
## Usage
Follow these steps to begin:
-1. **Enable face recognition** in your configuration file and restart Frigate.
+1. **Enable face recognition** in your configuration and restart Frigate.
2. **Upload one face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI. Read below for the best practices on expanding your training set.
3. When Frigate detects and attempts to recognize a face, it will appear in the **Train** tab of the Face Library, along with its associated recognition confidence.
4. From the **Train** tab, you can **assign the face** to a new or existing person to improve recognition accuracy for the future.
diff --git a/docs/docs/configuration/ffmpeg_presets.md b/docs/docs/configuration/ffmpeg_presets.md
index 8bba62e36..333388280 100644
--- a/docs/docs/configuration/ffmpeg_presets.md
+++ b/docs/docs/configuration/ffmpeg_presets.md
@@ -3,6 +3,10 @@ id: ffmpeg_presets
title: FFmpeg presets
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Some presets of FFmpeg args are provided by default to make the configuration easier. All presets can be seen in [this file](https://github.com/blakeblackshear/frigate/blob/master/frigate/ffmpeg_presets.py).
### Hwaccel Presets
@@ -21,7 +25,31 @@ See [the hwaccel docs](/configuration/hardware_acceleration_video.md) for more i
| preset-nvidia | Nvidia GPU | |
| preset-jetson-h264 | Nvidia Jetson with h264 stream | |
| preset-jetson-h265 | Nvidia Jetson with h265 stream | |
-| preset-rkmpp | Rockchip MPP | Use image with \*-rk suffix and privileged mode |
+| preset-rkmpp | Rockchip MPP | Use image with \*-rk suffix and privileged mode |
+
+Select the appropriate hwaccel preset for your hardware.
+
+
+
+
+1. Navigate to and set **Hardware acceleration arguments** to the appropriate preset for your hardware.
+2. To override for a specific camera, navigate to and set **Hardware acceleration arguments** for that camera.
+
+
+
+
+```yaml
+ffmpeg:
+ hwaccel_args: preset-vaapi
+
+cameras:
+ front_door:
+ ffmpeg:
+ hwaccel_args: preset-nvidia
+```
+
+
+
### Input Args Presets
@@ -72,7 +100,7 @@ Output args presets help make the config more readable and handle use cases for
| Preset | Usage | Other Notes |
| -------------------------------- | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| preset-record-generic | Record WITHOUT audio | If your camera doesn’t have audio, or if you don’t want to record audio, use this option |
+| preset-record-generic | Record WITHOUT audio | If your camera doesn't have audio, or if you don't want to record audio, use this option |
| preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings |
| preset-record-generic-audio-aac | Record WITH transcoded aac audio | This is the default when no option is specified. Use it to transcode audio to AAC. If the source is already in AAC format, use preset-record-generic-audio-copy instead to avoid unnecessary re-encoding |
| preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead |
diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md
index 4026158b7..56e8e2788 100644
--- a/docs/docs/configuration/genai/config.md
+++ b/docs/docs/configuration/genai/config.md
@@ -3,6 +3,10 @@ id: genai_config
title: Configuring Generative AI
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
## Configuration
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI-Compatible section below.
@@ -69,6 +73,18 @@ You must use a vision capable model with Frigate. The llama.cpp server supports
All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters.
+
+
+
+1. Navigate to .
+ - Set **Provider** to `llamacpp`
+ - Set **Base URL** to your llama.cpp server address (e.g., `http://localhost:8080`)
+ - Set **Model** to the name of your model
+ - Under **Provider Options**, set `context_size` to tell Frigate your context size so it can send the appropriate amount of information
+
+
+
+
```yaml
genai:
provider: llamacpp
@@ -78,6 +94,9 @@ genai:
context_size: 16000 # Tell Frigate your context size so it can send the appropriate amount of information.
```
+
+
+
### Ollama
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
@@ -96,6 +115,18 @@ Note that Frigate will not automatically download the model you specify in your
#### Configuration
+
+
+
+1. Navigate to .
+ - Set **Provider** to `ollama`
+ - Set **Base URL** to your Ollama server address (e.g., `http://localhost:11434`)
+ - Set **Model** to the model tag (e.g., `qwen3-vl:4b`)
+ - Under **Provider Options**, set `keep_alive` (e.g., `-1`) and `options.num_ctx` to match your desired context size
+
+
+
+
```yaml
genai:
provider: ollama
@@ -107,6 +138,9 @@ genai:
num_ctx: 8192 # make sure the context matches other services that are using ollama
```
+
+
+
### OpenAI-Compatible
Frigate supports any provider that implements the OpenAI API standard. This includes self-hosted solutions like [vLLM](https://docs.vllm.ai/), [LocalAI](https://localai.io/), and other OpenAI-compatible servers.
@@ -130,6 +164,18 @@ This ensures Frigate uses the correct context window size when generating prompt
#### Configuration
+
+
+
+1. Navigate to .
+ - Set **Provider** to `openai`
+ - Set **Base URL** to your server address (e.g., `http://your-server:port`)
+ - Set **API key** if required by your server
+ - Set **Model** to the model name
+
+
+
+
```yaml
genai:
provider: openai
@@ -138,6 +184,9 @@ genai:
model: your-model-name
```
+
+
+
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
## Cloud Providers
@@ -150,6 +199,17 @@ Ollama also supports [cloud models](https://ollama.com/cloud), where your local
#### Configuration
+
+
+
+1. Navigate to .
+ - Set **Provider** to `ollama`
+ - Set **Base URL** to your local Ollama address (e.g., `http://localhost:11434`)
+ - Set **Model** to the cloud model name
+
+
+
+
```yaml
genai:
provider: ollama
@@ -157,6 +217,9 @@ genai:
model: cloud-model-name
```
+
+
+
### Google Gemini
Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.
@@ -176,6 +239,17 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
#### Configuration
+
+
+
+1. Navigate to .
+ - Set **Provider** to `gemini`
+ - Set **API key** to your Gemini API key (or use an environment variable such as `{FRIGATE_GEMINI_API_KEY}`)
+ - Set **Model** to the desired model (e.g., `gemini-2.5-flash`)
+
+
+
+
```yaml
genai:
provider: gemini
@@ -183,6 +257,9 @@ genai:
model: gemini-2.5-flash
```
+
+
+
:::note
To use a different Gemini-compatible API endpoint, set the `provider_options` with the `base_url` key to your provider's API URL. For example:
@@ -213,6 +290,17 @@ To start using OpenAI, you must first [create an API key](https://platform.opena
#### Configuration
+
+
+
+1. Navigate to .
+ - Set **Provider** to `openai`
+ - Set **API key** to your OpenAI API key (or use an environment variable such as `{FRIGATE_OPENAI_API_KEY}`)
+ - Set **Model** to the desired model (e.g., `gpt-4o`)
+
+
+
+
```yaml
genai:
provider: openai
@@ -220,6 +308,9 @@ genai:
model: gpt-4o
```
+
+
+
:::note
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
@@ -257,6 +348,18 @@ To start using Azure OpenAI, you must first [create a resource](https://learn.mi
#### Configuration
+
+
+
+1. Navigate to .
+ - Set **Provider** to `azure_openai`
+ - Set **Base URL** to your Azure resource URL including the `api-version` parameter (e.g., `https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview`)
+ - Set **Model** to your deployed model name (e.g., `gpt-5-mini`)
+ - Set **API key** to your Azure OpenAI API key (or use an environment variable such as `{FRIGATE_OPENAI_API_KEY}`)
+
+
+
+
```yaml
genai:
provider: azure_openai
@@ -264,3 +367,6 @@ genai:
model: gpt-5-mini
api_key: "{FRIGATE_OPENAI_API_KEY}"
```
+
+
+
diff --git a/docs/docs/configuration/genai/objects.md b/docs/docs/configuration/genai/objects.md
index 3ed826d21..eb8dadef5 100644
--- a/docs/docs/configuration/genai/objects.md
+++ b/docs/docs/configuration/genai/objects.md
@@ -3,6 +3,10 @@ id: genai_objects
title: Object Descriptions
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle, or can optionally be sent earlier after a number of significantly changed frames, for example in use in more real-time notifications. Descriptions can also be regenerated manually via the Frigate UI. Note that if you are manually entering a description for tracked objects prior to its end, this will be overwritten by the generated response.
@@ -15,9 +19,9 @@ Generative AI object descriptions can also be toggled dynamically for a camera v
## Usage and Best Practices
-Frigate's thumbnail search excels at identifying specific details about tracked objects – for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate’s default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
+Frigate's thumbnail search excels at identifying specific details about tracked objects -- for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate's default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
-While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context.
+While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate's default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what's happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they're moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation's context.
## Custom Prompts
@@ -33,7 +37,18 @@ Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` t
:::
-You are also able to define custom prompts in your configuration.
+You can define custom prompts at the global level and per-object type. To configure custom prompts:
+
+
+
+
+1. Navigate to .
+ - Expand the **GenAI object config** section
+ - Set **Caption prompt** to your custom prompt text
+ - Under **Object prompts**, add entries keyed by object type (e.g., `person`, `car`) with custom prompts for each
+
+
+
```yaml
genai:
@@ -49,7 +64,25 @@ objects:
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
```
-Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
+
+
+
+Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera. To configure camera-level overrides:
+
+
+
+
+1. Navigate to for the desired camera.
+ - Expand the **GenAI object config** section
+ - Set **Enable GenAI** to on
+ - Set **Use snapshots** to on if desired
+ - Set **Caption prompt** to a camera-specific prompt
+ - Under **Object prompts**, add entries keyed by object type with camera-specific prompts
+ - Set **GenAI objects** to the list of object types that should receive descriptions (e.g., `person`, `cat`)
+ - Set **Required zones** to limit descriptions to objects in specific zones (e.g., `steps`)
+
+
+
```yaml
cameras:
@@ -69,6 +102,9 @@ cameras:
- steps
```
+
+
+
### Experiment with prompts
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md
index 8045f5aa3..e492a4893 100644
--- a/docs/docs/configuration/genai/review_summaries.md
+++ b/docs/docs/configuration/genai/review_summaries.md
@@ -3,6 +3,10 @@ id: genai_review
title: Review Summaries
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Generative AI can be used to automatically generate structured summaries of review items. These summaries will show up in Frigate's native notifications as well as in the UI. Generative AI can also be used to take a collection of summaries over a period of time and provide a report, which may be useful to get a quick report of everything that happened while out for some amount of time.
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
@@ -28,6 +32,30 @@ This will show in multiple places in the UI to give additional context about eac
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response.
+To configure the activity context prompt:
+
+
+
+
+Navigate to .
+
+- Set **GenAI config > Activity context prompt** to your custom activity context text
+
+
+
+
+```yaml
+review:
+ genai:
+ activity_context_prompt: |
+ ### Normal Activity Indicators (Level 0)
+ - Known/verified people in any zone at any time
+ ...
+```
+
+
+
+
Default Activity Context Prompt
@@ -74,7 +102,18 @@ review:
### Image Source
-By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution:
+By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, configure Frigate to extract frames directly from recordings at a higher resolution.
+
+
+
+
+Navigate to .
+
+- Set **GenAI config > Enable GenAI descriptions** to on
+- Set **GenAI config > Review image source** to `recordings` (default is `preview`)
+
+
+
```yaml
review:
@@ -84,6 +123,9 @@ review:
image_source: recordings # Options: "preview" (default) or "recordings"
```
+
+
+
When using `recordings`, frames are extracted at 480px height while maintaining the camera's original aspect ratio, providing better detail for the LLM while being mindful of context window size. This is particularly useful for scenarios where fine details matter, such as identifying license plates, reading text, or analyzing distant objects.
The number of frames sent to the LLM is dynamically calculated based on:
@@ -103,7 +145,17 @@ If recordings are not available for a given time period, the system will automat
### Additional Concerns
-Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
+Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. Configure these concerns so that review summaries will make note of them if the activity requires additional review.
+
+
+
+
+Navigate to .
+
+- Set **GenAI config > Additional concerns** to a list of your concerns (e.g., `animals in the garden`)
+
+
+
```yaml {4,5}
review:
@@ -113,9 +165,22 @@ review:
- animals in the garden
```
+
+
+
### Preferred Language
-By default, review summaries are generated in English. You can configure Frigate to generate summaries in your preferred language by setting the `preferred_language` option:
+By default, review summaries are generated in English. Configure Frigate to generate summaries in your preferred language by setting the `preferred_language` option.
+
+
+
+
+Navigate to .
+
+- Set **GenAI config > Preferred language** to the desired language (e.g., `Spanish`)
+
+
+
```yaml {4}
review:
@@ -124,6 +189,9 @@ review:
preferred_language: Spanish
```
+
+
+
## Review Reports
Along with individual review item summaries, Generative AI can also produce a single report of review items from all cameras marked "suspicious" over a specified time period (for example, a daily summary of suspicious activity while you're on vacation).
diff --git a/docs/docs/configuration/hardware_acceleration_video.md b/docs/docs/configuration/hardware_acceleration_video.md
index 318e1b23e..918c23e67 100644
--- a/docs/docs/configuration/hardware_acceleration_video.md
+++ b/docs/docs/configuration/hardware_acceleration_video.md
@@ -4,6 +4,9 @@ title: Video Decoding
---
import CommunityBadge from '@site/src/components/CommunityBadge';
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
# Video Decoding
@@ -78,27 +81,60 @@ See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/00
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)`. For per-camera overrides, navigate to .
+
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-vaapi
```
+
+
+
### Via Quicksync
#### H.264 streams
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `Intel QuickSync (H.264)`. For per-camera overrides, navigate to .
+
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-intel-qsv-h264
```
+
+
+
#### H.265 streams
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `Intel QuickSync (H.265)`. For per-camera overrides, navigate to .
+
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-intel-qsv-h265
```
+
+
+
### Configuring Intel GPU Stats in Docker
Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. There are two options:
@@ -196,11 +232,22 @@ You need to change the driver to `radeonsi` by adding the following environment
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)`. For per-camera overrides, navigate to .
+
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-vaapi
```
+
+
+
## NVIDIA GPUs
While older GPUs may work, it is recommended to use modern, supported GPUs. NVIDIA provides a [matrix of supported GPUs and features](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new). If your card is on the list and supports CUVID/NVDEC, it will most likely work with Frigate for decoding. However, you must also use [a driver version that will work with FFmpeg](https://github.com/FFmpeg/nv-codec-headers/blob/master/README). Older driver versions may be missing symbols and fail to work, and older cards are not supported by newer driver versions. The only way around this is to [provide your own FFmpeg](/configuration/advanced#custom-ffmpeg-build) that will work with your driver version, but this is unsupported and may not work well if at all.
@@ -244,11 +291,22 @@ docker run -d \
Using `preset-nvidia` ffmpeg will automatically select the necessary profile for the incoming video, and will log an error if the profile is not supported by your GPU.
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `NVIDIA GPU`. For per-camera overrides, navigate to .
+
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-nvidia
```
+
+
+
If everything is working correctly, you should see a significant improvement in performance.
Verify that hardware decoding is working by running `nvidia-smi`, which should show `ffmpeg`
processes:
@@ -296,6 +354,14 @@ These instructions were originally based on the [Jellyfin documentation](https:/
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
If you are using the HA App, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `Raspberry Pi (H.264)` (for H.264 streams) or `Raspberry Pi (H.265)` (for H.265/HEVC streams). For per-camera overrides, navigate to .
+
+
+
+
```yaml
# if you want to decode a h264 stream
ffmpeg:
@@ -306,6 +372,9 @@ ffmpeg:
hwaccel_args: preset-rpi-64-h265
```
+
+
+
:::note
If running Frigate through Docker, you either need to run in privileged mode or
@@ -405,11 +474,22 @@ A list of supported codecs (you can use `ffmpeg -decoders | grep nvmpi` in the c
For example, for H264 video, you'll select `preset-jetson-h264`.
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `NVIDIA Jetson (H.264)` (or `NVIDIA Jetson (H.265)` for HEVC streams). For per-camera overrides, navigate to .
+
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-jetson-h264
```
+
+
+
If everything is working correctly, you should see a significant reduction in ffmpeg CPU load and power consumption.
Verify that hardware decoding is working by running `jtop` (`sudo pip3 install -U jetson-stats`), which should show
that NVDEC/NVDEC1 are in use.
@@ -424,13 +504,24 @@ Make sure to follow the [Rockchip specific installation instructions](/frigate/i
### Configuration
-Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
+Set the FFmpeg hwaccel preset to enable hardware video processing.
+
+
+
+
+Navigate to and set **Hardware acceleration arguments** to `Rockchip RKMPP`. For per-camera overrides, navigate to .
+
+
+
```yaml
ffmpeg:
hwaccel_args: preset-rkmpp
```
+
+
+
:::note
Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
@@ -480,7 +571,15 @@ Make sure to follow the [Synaptics specific installation instructions](/frigate/
### Configuration
-Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
+Set the FFmpeg hwaccel args to enable hardware video processing.
+
+
+
+
+Navigate to and configure the hardware acceleration args and input args manually for Synaptics hardware. For per-camera overrides, navigate to .
+
+
+
```yaml {2}
ffmpeg:
@@ -490,6 +589,9 @@ output_args:
record: preset-record-generic-audio-aac
```
+
+
+
:::warning
Make sure that your SoC supports hardware acceleration for your input stream and your input stream is h264 encoding. For example, if your camera streams with h264 encoding, your SoC must be able to de- and encode with it. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md
index be546ca30..84f978078 100644
--- a/docs/docs/configuration/index.md
+++ b/docs/docs/configuration/index.md
@@ -3,13 +3,24 @@ id: index
title: Frigate Configuration
---
-For Home Assistant App installations, the config file should be at `/addon_configs//config.yml`, where `` is specific to the variant of the Frigate App you are running. See the list of directories [here](#accessing-app-config-dir).
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
-For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
+Frigate can be configured through the **Settings UI** or by editing the YAML configuration file directly. The Settings UI is the recommended approach — it provides validation and a guided experience for all configuration options.
+
+It is recommended to start with a minimal configuration and add to it as described in [the getting started guide](../guides/getting_started.md).
+
+## Configuration File Location
+
+For users who prefer to edit the YAML configuration file directly:
+
+- **Home Assistant App:** `/addon_configs//config.yml` — see [directory list](#accessing-app-config-dir)
+- **All other installations:** Map to `/config/config.yml` inside the container
It can be named `config.yml` or `config.yaml`, but if both files exist `config.yml` will be preferred and `config.yaml` will be ignored.
-It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation.
+A minimal starting configuration:
```yaml
mqtt:
@@ -38,7 +49,7 @@ When running Frigate through the HA App, the Frigate `/config` directory is mapp
**Whenever you see `/config` in the documentation, it refers to this directory.**
-If for example you are running the standard App variant and use the [VS Code App](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file.
+If for example you are running the standard App variant and use the [VS Code App](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in config editor in the Frigate UI.
## VS Code Configuration Schema
@@ -81,7 +92,7 @@ genai:
## Common configuration examples
-Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values.
+Here are some common starter configuration examples. These can be configured through the Settings UI or via YAML. Refer to the [reference config](./reference.md) for detailed information about all config values.
### Raspberry Pi Home Assistant App with USB Coral
@@ -94,6 +105,20 @@ Here are some common starter configuration examples. Refer to the [reference con
- Save snapshots for 30 days
- Motion mask for the camera timestamp
+
+
+
+1. Navigate to and configure the MQTT connection to your Home Assistant Mosquitto broker
+2. Navigate to and set **Hardware acceleration arguments** to `Raspberry Pi (H.264)`
+3. Navigate to and add a detector with **Type** `EdgeTPU` and **Device** `usb`
+4. Navigate to and set **Enable recording** to on, **Motion retention > Retention days** to `7`, **Alert retention > Event retention > Retention days** to `30`, **Alert retention > Event retention > Retention mode** to `motion`, **Detection retention > Event retention > Retention days** to `30`, **Detection retention > Event retention > Retention mode** to `motion`
+5. Navigate to and set **Enable snapshots** to on, **Snapshot retention > Default retention** to `30`
+6. Navigate to and add your camera with the appropriate RTSP stream URL
+7. Navigate to to add a motion mask for the camera timestamp
+
+
+
+
```yaml
mqtt:
host: core-mosquitto
@@ -145,10 +170,13 @@ cameras:
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
```
+
+
+
### Standalone Intel Mini PC with USB Coral
- Single camera with 720p, 5fps stream for detect
-- MQTT disabled (not integrated with home assistant)
+- MQTT disabled (not integrated with Home Assistant)
- VAAPI hardware acceleration for decoding video
- USB Coral detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
@@ -156,6 +184,20 @@ cameras:
- Save snapshots for 30 days
- Motion mask for the camera timestamp
+
+
+
+1. Navigate to and set **Enable MQTT** to off
+2. Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)`
+3. Navigate to and add a detector with **Type** `EdgeTPU` and **Device** `usb`
+4. Navigate to and set **Enable recording** to on, **Motion retention > Retention days** to `7`, **Alert retention > Event retention > Retention days** to `30`, **Alert retention > Event retention > Retention mode** to `motion`, **Detection retention > Event retention > Retention days** to `30`, **Detection retention > Event retention > Retention mode** to `motion`
+5. Navigate to and set **Enable snapshots** to on, **Snapshot retention > Default retention** to `30`
+6. Navigate to and add your camera with the appropriate RTSP stream URL
+7. Navigate to to add a motion mask for the camera timestamp
+
+
+
+
```yaml
mqtt:
enabled: False
@@ -205,17 +247,35 @@ cameras:
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
```
-### Home Assistant integrated Intel Mini PC with OpenVino
+
+
+
+### Home Assistant integrated Intel Mini PC with OpenVINO
- Single camera with 720p, 5fps stream for detect
-- MQTT connected to same mqtt server as home assistant
+- MQTT connected to same MQTT server as Home Assistant
- VAAPI hardware acceleration for decoding video
-- OpenVino detector
+- OpenVINO detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it qualified as an alert or detection for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
+
+
+
+1. Navigate to and configure the connection to your MQTT broker
+2. Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)`
+3. Navigate to and add a detector with **Type** `openvino` and **Device** `AUTO`
+4. Navigate to and configure the OpenVINO model path and settings
+5. Navigate to and set **Enable recording** to on, **Motion retention > Retention days** to `7`, **Alert retention > Event retention > Retention days** to `30`, **Alert retention > Event retention > Retention mode** to `motion`, **Detection retention > Event retention > Retention days** to `30`, **Detection retention > Event retention > Retention mode** to `motion`
+6. Navigate to and set **Enable snapshots** to on, **Snapshot retention > Default retention** to `30`
+7. Navigate to and add your camera with the appropriate RTSP stream URL
+8. Navigate to to add a motion mask for the camera timestamp
+
+
+
+
```yaml
mqtt:
host: 192.168.X.X # <---- same mqtt broker that home assistant uses
@@ -274,3 +334,6 @@ cameras:
enabled: true
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
```
+
+
+
diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md
index a44006b63..017cc5e16 100644
--- a/docs/docs/configuration/license_plate_recognition.md
+++ b/docs/docs/configuration/license_plate_recognition.md
@@ -3,6 +3,10 @@ id: license_plate_recognition
title: License Plate Recognition (LPR)
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a [known](#matching) name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition.
@@ -34,14 +38,35 @@ License plate recognition works by running AI models locally on your system. The
## Configuration
-License plate recognition is disabled by default. Enable it in your config file:
+License plate recognition is disabled by default and must be enabled before it can be used.
+
+
+
+
+Navigate to .
+
+- Set **Enable LPR** to on
+
+
+
```yaml
lpr:
enabled: True
```
-Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You should disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras:
+
+
+
+Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. Disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras.
+
+
+
+
+Navigate to for the desired camera and disable the **Enable LPR** toggle.
+
+
+
```yaml {4,5}
cameras:
@@ -51,65 +76,144 @@ cameras:
enabled: False
```
+
+
+
For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car` or `motorcycle`, and that a car or motorcycle is actually being detected by Frigate. Otherwise, LPR will not run.
Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements.
## Advanced Configuration
-Fine-tune the LPR feature using these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`.
+Fine-tune the LPR feature using these optional parameters. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`.
### Detection
-- **`detection_threshold`**: License plate object detection confidence score required before recognition runs.
+
+
+
+Navigate to .
+
+- **Detection threshold**: License plate object detection confidence score required before recognition runs. This field only applies to the standalone license plate detection model; `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in.
- Default: `0.7`
- - Note: This is field only applies to the standalone license plate detection model, `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in.
-- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
- - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
- - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
-- **`device`**: Device to use to run license plate detection _and_ recognition models.
+- **Minimum plate area**: Minimum area (in pixels) a license plate must be before recognition runs. This is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
+ - Default: `1000` pixels
+- **Device**: Device to use to run license plate detection _and_ recognition models. Auto-selected by Frigate and can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
- Default: `None`
- - This is auto-selected by Frigate and can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. However, for users who run a model that detects `license_plate` natively, there is little to no performance gain reported with running LPR on GPU compared to the CPU.
-- **`model_size`**: The size of the model used to identify regions of text on plates.
+- **Model size**: The size of the model used to identify regions of text on plates. The `small` model is fast and identifies groups of Latin and Chinese characters. The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. If your country or region does not use multi-line plates, you should use the `small` model.
- Default: `small`
- - This can be `small` or `large`.
- - The `small` model is fast and identifies groups of Latin and Chinese characters.
- - The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. It is significantly slower than the `small` model.
- - If your country or region does not use multi-line plates, you should use the `small` model as performance is much better for single-line plates.
+
+
+
+
+```yaml
+lpr:
+ enabled: True
+ detection_threshold: 0.7
+ min_area: 1000
+ device: CPU
+ model_size: small
+```
+
+
+
### Recognition
-- **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a `recognized_license_plate` and/or `sub_label`.
- - Default: `0.9`.
-- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a `recognized_license_plate` and/or `sub_label` to an object.
- - Use this to filter out short, incomplete, or incorrect detections.
-- **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded.
- - `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7"
- - `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC"
- - Websites like https://regex101.com/ can help test regular expressions for your plates.
+
+
+
+Navigate to .
+
+- **Recognition threshold**: Recognition confidence score required to add the plate to the object as a `recognized_license_plate` and/or `sub_label`.
+ - Default: `0.9`
+- **Min plate length**: Minimum number of characters a detected license plate must have to be added as a `recognized_license_plate` and/or `sub_label`. Use this to filter out short, incomplete, or incorrect detections.
+- **Plate format regex**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded. Websites like https://regex101.com/ can help test regular expressions for your plates.
+
+
+
+
+```yaml
+lpr:
+ enabled: True
+ recognition_threshold: 0.9
+ min_plate_length: 4
+ format: "^[A-Z]{2}[0-9]{2} [A-Z]{3}$"
+```
+
+
+
### Matching
-- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` and `motorcycle` objects when a recognized plate matches a known value.
- - These labels appear in the UI, filters, and notifications.
- - Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`.
-- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
- - For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
- - This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
+
+
+
+Navigate to .
+
+- **Known plates**: Assign custom `sub_label` values to `car` and `motorcycle` objects when a recognized plate matches a known value. These labels appear in the UI, filters, and notifications. Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`.
+- **Match distance**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate. For example, setting to `1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. This parameter will _not_ operate on known plates that are defined as regular expressions.
+
+
+
+
+```yaml
+lpr:
+ enabled: True
+ match_distance: 1
+ known_plates:
+ Wife's Car:
+ - "ABC-1234"
+ Johnny:
+ - "J*N-*234"
+```
+
+
+
### Image Enhancement
-- **`enhancement`**: A value between 0 and 10 that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. This preprocessing step can sometimes improve accuracy but may also have the opposite effect.
+
+
+
+Navigate to .
+
+- **Enhancement level**: A value between 0 and 10 that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters. This setting is best adjusted at the camera level if running LPR on multiple cameras.
- Default: `0` (no enhancement)
- - Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters, actually making them much harder for Frigate to recognize.
- - This setting is best adjusted at the camera level if running LPR on multiple cameras.
- - If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 5 and adjusting as needed. You should see how different enhancement levels affect your plates. Use the `debug_save_plates` configuration option (see below).
+
+
+
+
+```yaml
+lpr:
+ enabled: True
+ enhancement: 1
+```
+
+
+
+
+If Frigate is already recognizing plates correctly, leave enhancement at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 3 and adjusting as needed. Use the `debug_save_plates` configuration option (see below) to see how different enhancement levels affect your plates.
### Normalization Rules
-- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially and are applied _before_ the `format` regex, if specified. Each rule must have a `pattern` (which can be a string or a regex) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
+
+
-These rules must be defined at the global level of your `lpr` config.
+Navigate to .
+
+Under **Replacement rules**, add regex rules to normalize detected plate strings before matching. Rules fire in order. For example:
+
+| Pattern | Replacement | Description |
+| ---------------- | ----------- | -------------------------------------------------- |
+| `[%#*?]` | _(empty)_ | Remove noise symbols |
+| `[= ]` | `-` | Normalize `=` or space to dash |
+| `O` | `0` | Swap `O` to `0` (common OCR error) |
+| `I` | `1` | Swap `I` to `1` |
+| `(\w{3})(\w{3})` | `\1-\2` | Split 6 chars into groups (e.g., ABC123 → ABC-123) |
+
+
+
```yaml
lpr:
@@ -126,6 +230,11 @@ lpr:
replacement: '\1-\2'
```
+
+
+
+These rules must be defined at the global level of your `lpr` config.
+
- Rules fire in order: In the example above: clean noise first, then separators, then swaps, then splits.
- Backrefs (`\1`, `\2`) allow dynamic replacements (e.g., capture groups).
- Any changes made by the rules are printed to the LPR debug log.
@@ -133,13 +242,50 @@ lpr:
### Debugging
-- **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `/`, and named based on the capture timestamp.
- - These saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection.
- - **Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage.
+
+
+
+Navigate to .
+
+- **Save debug plates**: Set to on to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `/`, and named based on the capture timestamp.
+
+
+
+
+```yaml
+lpr:
+ enabled: True
+ debug_save_plates: True
+```
+
+
+
+
+The saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection.
+
+**Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage.
## Configuration Examples
-These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`.
+These configuration parameters are available at the global level. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ------------------------------ | ----------------------------------------------------------------------------------------------------- |
+| **Enable LPR** | Set to on |
+| **Minimum plate area** | Set to `1500` — ignore plates with an area (length x width) smaller than 1500 pixels |
+| **Min plate length** | Set to `4` — only recognize plates with 4 or more characters |
+| **Known plates > Wife's Car** | `ABC-1234`, `ABC-I234` (accounts for potential confusion between the number one and capital letter I) |
+| **Known plates > Johnny** | `J*N-*234` (matches JHN-1234 and JMN-I234; `*` matches any number of characters) |
+| **Known plates > Sally** | `[S5]LL 1234` (matches both SLL 1234 and 5LL 1234) |
+| **Known plates > Work Trucks** | `EMP-[0-9]{3}[A-Z]` (matches plates like EMP-123A, EMP-456Z) |
+
+
+
```yaml
lpr:
@@ -158,28 +304,21 @@ lpr:
- "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z
```
-```yaml
-lpr:
- enabled: True
- min_area: 4000 # Run recognition on larger plates only (4000 pixels represents a 63x63 pixel square in your image)
- recognition_threshold: 0.85
- format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers
- match_distance: 1 # Allow one character variation in plate matching
- replace_rules:
- - pattern: "O"
- replacement: "0" # Replace the letter O with the number 0 in every plate
- known_plates:
- Delivery Van:
- - "RJ K5678"
- - "UP A1234"
- Supervisor:
- - "MN D3163"
-```
+
+
:::note
If a camera is configured to detect `car` or `motorcycle` but you don't want Frigate to run LPR for that camera, disable LPR at the camera level:
+
+
+
+Navigate to for the desired camera and disable the **Enable LPR** toggle.
+
+
+
+
```yaml
cameras:
side_yard:
@@ -188,13 +327,16 @@ cameras:
...
```
+
+
+
:::
## Dedicated LPR Cameras
Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night.
-To mark a camera as a dedicated LPR camera, add `type: "lpr"` the camera configuration.
+To mark a camera as a dedicated LPR camera, set `type: "lpr"` in the camera configuration.
:::note
@@ -210,6 +352,55 @@ Users running a Frigate+ model (or any model that natively detects `license_plat
An example configuration for a dedicated LPR camera using a `license_plate`-detecting model:
+
+
+
+Navigate to and set **Enable LPR** to on. Set **Device** to `CPU` (can also be `GPU` if available).
+
+Navigate to and add your camera streams.
+
+Navigate to .
+
+| Field | Description |
+| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ |
+| **Enable object detection** | Set to on |
+| **Detect FPS** | Set to `5`. Increase to `10` if vehicles move quickly across your frame. Higher than 10 is unnecessary and is not recommended. |
+| **Minimum initialization frames** | Set to `2` |
+| **Detect width** | Set to `1920` |
+| **Detect height** | Set to `1080` |
+
+Navigate to .
+
+| Field | Description |
+| ---------------------------------------------- | ------------------- |
+| **Objects to track** | Add `license_plate` |
+| **Object filters > License Plate > Threshold** | Set to `0.7` |
+
+Navigate to .
+
+| Field | Description |
+| -------------------- | --------------------------------------------------------------------- |
+| **Motion threshold** | Set to `30` |
+| **Contour area** | Set to `60`. Use an increased value to tune out small motion changes. |
+| **Improve contrast** | Set to off |
+
+Also add a motion mask over your camera's timestamp so it is not incorrectly detected as a license plate.
+
+Navigate to .
+
+| Field | Description |
+| -------------------- | -------------------------------------------------------- |
+| **Enable recording** | Set to on. Disable recording if you only want snapshots. |
+
+Navigate to .
+
+| Field | Description |
+| -------------------- | ----------- |
+| **Enable snapshots** | Set to on |
+
+
+
+
```yaml
# LPR global configuration
lpr:
@@ -248,6 +439,9 @@ cameras:
- license_plate
```
+
+
+
With this setup:
- License plates are treated as normal objects in Frigate.
@@ -259,10 +453,65 @@ With this setup:
### Using the Secondary LPR Pipeline (Without Frigate+)
-If you are not running a Frigate+ model, you can use Frigate’s built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs.
+If you are not running a Frigate+ model, you can use Frigate's built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs.
An example configuration for a dedicated LPR camera using the secondary pipeline:
+
+
+
+Navigate to and set **Enable LPR** to on. Set **Device** to `CPU` (can also be `GPU` if available and the correct Docker image is used). Set **Detection threshold** to `0.7` (change if necessary).
+
+Navigate to for your dedicated LPR camera.
+
+| Field | Description |
+| --------------------- | -------------------------------------------------------------------------------- |
+| **Enable LPR** | Set to on |
+| **Enhancement level** | Set to `3` (optional — enhances the image before trying to recognize characters) |
+
+Navigate to and add your camera streams.
+
+Navigate to .
+
+| Field | Description |
+| --------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| **Enable object detection** | Set to off — disables Frigate's standard object detection pipeline |
+| **Detect FPS** | Set to `5`. Increase if necessary, though high values may slow down Frigate's enrichments pipeline and use considerable CPU. |
+| **Detect width** | Set to `1920` (recommended value, but depends on your camera) |
+| **Detect height** | Set to `1080` (recommended value, but depends on your camera) |
+
+Navigate to .
+
+| Field | Description |
+| -------------------- | -------------------------------------------------------------------------------------- |
+| **Objects to track** | Set to an empty list — required when not using a Frigate+ model for dedicated LPR mode |
+
+Navigate to .
+
+| Field | Description |
+| -------------------- | --------------------------------------------------------------------- |
+| **Motion threshold** | Set to `30` |
+| **Contour area** | Set to `60`. Use an increased value to tune out small motion changes. |
+| **Improve contrast** | Set to off |
+
+Navigate to and add a motion mask over your camera's timestamp so it is not incorrectly detected as a license plate.
+
+Navigate to .
+
+| Field | Description |
+| -------------------- | -------------------------------------------------------- |
+| **Enable recording** | Set to on. Disable recording if you only want snapshots. |
+
+Navigate to .
+
+| Field | Description |
+| ----------------------------------------- | --------------- |
+| **Detections config > Enable detections** | Set to on |
+| **Detections config > Retain > Default** | Set to `7` days |
+
+
+
+
```yaml
# LPR global configuration
lpr:
@@ -299,6 +548,9 @@ cameras:
default: 7
```
+
+
+
With this setup:
- The standard object detection pipeline is bypassed. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. You must **not** specify `license_plate` as an object to track.
@@ -377,12 +629,27 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is
1. Start with a simplified LPR config.
- Remove or comment out everything in your LPR config, including `min_area`, `min_plate_length`, `format`, `known_plates`, or `enhancement` values so that the only values left are `enabled` and `debug_save_plates`. This will run LPR with Frigate's default values.
- ```yaml
- lpr:
- enabled: true
- device: CPU
- debug_save_plates: true
- ```
+
+
+
+Navigate to .
+
+- Set **Enable LPR** to on
+- Set **Device** to `CPU`
+- Set **Save debug plates** to on
+
+
+
+
+```yaml
+lpr:
+ enabled: true
+ device: CPU
+ debug_save_plates: true
+```
+
+
+
2. Enable debug logs to see exactly what Frigate is doing.
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only keep this enabled when necessary. Restart Frigate after this change.
@@ -391,7 +658,7 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is
logger:
default: info
logs:
- # highlight-next-line
+ # highlight-next-line
frigate.data_processing.common.license_plate: debug
```
diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md
index 8e7eff163..18e2054c4 100644
--- a/docs/docs/configuration/live.md
+++ b/docs/docs/configuration/live.md
@@ -3,6 +3,10 @@ id: live
title: Live View
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Frigate intelligently displays your camera streams on the Live view dashboard. By default, Frigate employs "smart streaming" where camera images update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any motion or active objects are detected, cameras seamlessly switch to a live stream.
### Live View technologies
@@ -63,19 +67,26 @@ go2rtc:
### Setting Streams For Live UI
-You can configure Frigate to allow manual selection of the stream you want to view in the Live UI. For example, you may want to view your camera's substream on mobile devices, but the full resolution stream on desktop devices. Setting the `live -> streams` list will populate a dropdown in the UI's Live view that allows you to choose between the streams. This stream setting is _per device_ and is saved in your browser's local storage.
+You can configure Frigate to allow manual selection of the stream you want to view in the Live UI. For example, you may want to view your camera's substream on mobile devices, but the full resolution stream on desktop devices. Setting the streams list will populate a dropdown in the UI's Live view that allows you to choose between the streams. This stream setting is _per device_ and is saved in your browser's local storage.
Additionally, when creating and editing camera groups in the UI, you can choose the stream you want to use for your camera group's Live dashboard.
:::note
-Frigate's default dashboard ("All Cameras") will always use the first entry you've defined in `streams:` when playing live streams from your cameras.
+Frigate's default dashboard ("All Cameras") will always use the first entry you've defined in streams when playing live streams from your cameras.
:::
-Configure the `streams` option with a "friendly name" for your stream followed by the go2rtc stream name.
+Configure a "friendly name" for your stream followed by the go2rtc stream name. Using Frigate's internal version of go2rtc is required to use this feature. You cannot specify paths in the streams configuration, only go2rtc stream names.
-Using Frigate's internal version of go2rtc is required to use this feature. You cannot specify paths in the `streams` configuration, only go2rtc stream names.
+
+
+
+1. Navigate to , then select your camera.
+ - Under **Live stream names**, add entries mapping a friendly name to each go2rtc stream name (e.g., `Main Stream` mapped to `test_cam`, `Sub Stream` mapped to `test_cam_sub`).
+
+
+
```yaml {3,6,8,25-29}
go2rtc:
@@ -109,6 +120,9 @@ cameras:
Special Stream: test_cam_another_sub
```
+
+
+
### WebRTC extra configuration:
WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration:
@@ -185,7 +199,7 @@ To prevent go2rtc from blocking other applications from accessing your camera's
Frigate provides a dialog in the Camera Group Edit pane with several options for streaming on a camera group's dashboard. These settings are _per device_ and are saved in your device's local storage.
-- Stream selection using the `live -> streams` configuration option (see _Setting Streams For Live UI_ above)
+- Stream selection using the streams configuration option (see _Setting Streams For Live UI_ above)
- Streaming type:
- _No streaming_: Camera images will only update once per minute and no live streaming will occur.
- _Smart Streaming_ (default, recommended setting): Smart streaming will update your camera image once per minute when no detectable activity is occurring to conserve bandwidth and resources, since a static picture is the same as a streaming image with no motion or objects. When motion or objects are detected, the image seamlessly switches to a live stream.
@@ -203,6 +217,40 @@ Use a camera group if you want to change any of these settings from the defaults
:::
+### jsmpeg Stream Quality
+
+The jsmpeg live view resolution and encoding quality can be adjusted globally or per camera. These settings only affect the jsmpeg player and do not apply when go2rtc is used for live view.
+
+
+
+
+Navigate to for global defaults, or and select a camera for per-camera overrides.
+
+| Field | Description |
+| ---------------- | --------------------------------------------------------------------------------------------------- |
+| **Live height** | Height in pixels for the jsmpeg live stream; must be less than or equal to the detect stream height |
+| **Live quality** | Encoding quality for the jsmpeg stream (1 = highest, 31 = lowest) |
+
+
+
+
+```yaml
+# Global defaults
+live:
+ height: 720
+ quality: 8
+
+# Per-camera override
+cameras:
+ front_door:
+ live:
+ height: 480
+ quality: 4
+```
+
+
+
+
### Disabling cameras
Cameras can be temporarily disabled through the Frigate UI and through [MQTT](/integrations/mqtt#frigatecamera_nameenabledset) to conserve system resources. When disabled, Frigate's ffmpeg processes are terminated — recording stops, object detection is paused, and the Live dashboard displays a blank image with a disabled message. Review items, tracked objects, and historical footage for disabled cameras can still be accessed via the UI.
@@ -276,7 +324,7 @@ When your browser runs into problems playing back your camera streams, it will l
4. Look for messages prefixed with the camera name.
These logs help identify if the issue is player-specific (MSE vs. WebRTC) or related to camera configuration (e.g., go2rtc streams, codecs). If you see frequent errors:
- - Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera_settings_recommendations)).
+ - Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera-settings-recommendations)).
- Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS).
- Test with a different stream via the UI dropdown (if `live -> streams` is configured).
- For WebRTC-specific issues, ensure port 8555 is forwarded and candidates are set (see (WebRTC Extra Configuration)(#webrtc-extra-configuration)).
diff --git a/docs/docs/configuration/masks.md b/docs/docs/configuration/masks.md
index 32280531d..e497de2c1 100644
--- a/docs/docs/configuration/masks.md
+++ b/docs/docs/configuration/masks.md
@@ -3,6 +3,10 @@ id: masks
title: Masks
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
## Motion masks
Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the Debug feed (Settings --> Debug) with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. _Over-masking will make it more difficult for objects to be tracked._
@@ -17,17 +21,15 @@ Object filter masks can be used to filter out stubborn false positives in fixed

-## Using the mask creator
+## Creating masks
-To create a poly mask:
+
+
-1. Visit the Web UI
-2. Click/tap the gear icon and open "Settings"
-3. Select "Mask / zone editor"
-4. At the top right, select the camera you wish to create a mask or zone for
-5. Click the plus icon under the type of mask or zone you would like to create
-6. Click on the camera's latest image to create the points for a masked area. Click the first point again to close the polygon.
-7. When you've finished creating your mask, press Save.
+Navigate to and select a camera. Use the mask editor to draw motion masks and object filter masks directly on the camera feed. Each mask can be given a friendly name and toggled on or off.
+
+
+
Your config file will be updated with the relative coordinates of the mask/zone:
@@ -59,7 +61,7 @@ motion:
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456"
```
-Object filter masks can also be created through the UI or manually in the config. They are configured under the object filters section for each object type:
+Object filter masks are configured under the object filters section for each object type:
```yaml
objects:
@@ -78,6 +80,9 @@ objects:
coordinates: "0.000,0.700,1.000,0.700,1.000,1.000,0.000,1.000"
```
+
+
+
## Enabling/Disabling Masks
Both motion masks and object filter masks can be toggled on or off without removing them from the configuration. Disabled masks are completely ignored at runtime - they will not affect motion detection or object filtering. This is useful for temporarily disabling a mask during certain seasons or times of day without modifying the configuration.
diff --git a/docs/docs/configuration/metrics.md b/docs/docs/configuration/metrics.md
index 662404205..d857d5eee 100644
--- a/docs/docs/configuration/metrics.md
+++ b/docs/docs/configuration/metrics.md
@@ -3,19 +3,42 @@ id: metrics
title: Metrics
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
# Metrics
Frigate exposes Prometheus metrics at the `/api/metrics` endpoint that can be used to monitor the performance and health of your Frigate instance.
+## Enabling Telemetry
+
+Prometheus metrics are exposed via the telemetry configuration. Enable or configure telemetry to control metric availability.
+
+
+
+
+Navigate to to configure metrics and telemetry settings.
+
+
+
+
+Metrics are available at `/api/metrics` by default. No additional Frigate configuration is required to expose them.
+
+
+
+
## Available Metrics
### System Metrics
+
- `frigate_cpu_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process CPU usage percentage
- `frigate_mem_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process memory usage percentage
- `frigate_gpu_usage_percent{gpu_name=""}` - GPU utilization percentage
- `frigate_gpu_mem_usage_percent{gpu_name=""}` - GPU memory usage percentage
### Camera Metrics
+
- `frigate_camera_fps{camera_name=""}` - Frames per second being consumed from your camera
- `frigate_detection_fps{camera_name=""}` - Number of times detection is run per second
- `frigate_process_fps{camera_name=""}` - Frames per second being processed
@@ -25,21 +48,25 @@ Frigate exposes Prometheus metrics at the `/api/metrics` endpoint that can be us
- `frigate_audio_rms{camera_name=""}` - Audio RMS for camera
### Detector Metrics
+
- `frigate_detector_inference_speed_seconds{name=""}` - Time spent running object detection in seconds
- `frigate_detection_start{name=""}` - Detector start time (unix timestamp)
### Storage Metrics
+
- `frigate_storage_free_bytes{storage=""}` - Storage free bytes
- `frigate_storage_total_bytes{storage=""}` - Storage total bytes
- `frigate_storage_used_bytes{storage=""}` - Storage used bytes
- `frigate_storage_mount_type{mount_type="", storage=""}` - Storage mount type info
### Service Metrics
+
- `frigate_service_uptime_seconds` - Uptime in seconds
- `frigate_service_last_updated_timestamp` - Stats recorded time (unix timestamp)
- `frigate_device_temperature{device=""}` - Device Temperature
### Event Metrics
+
- `frigate_camera_events{camera="", label=""}` - Count of camera events since exporter started
## Configuring Prometheus
@@ -48,10 +75,10 @@ To scrape metrics from Frigate, add the following to your Prometheus configurati
```yaml
scrape_configs:
- - job_name: 'frigate'
- metrics_path: '/api/metrics'
+ - job_name: "frigate"
+ metrics_path: "/api/metrics"
static_configs:
- - targets: ['frigate:5000']
+ - targets: ["frigate:5000"]
scrape_interval: 15s
```
diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md
index 53e63272a..3f31d27db 100644
--- a/docs/docs/configuration/motion_detection.md
+++ b/docs/docs/configuration/motion_detection.md
@@ -3,6 +3,10 @@ id: motion_detection
title: Motion Detection
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
# Tuning Motion Detection
Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection.
@@ -21,7 +25,7 @@ First, mask areas with regular motion not caused by the objects you want to dete
## Prepare For Testing
-The easiest way to tune motion detection is to use the Frigate UI under Settings > Motion Tuner. This screen allows the changing of motion detection values live to easily see the immediate effect on what is detected as motion.
+The recommended way to tune motion detection is to use the built-in Motion Tuner. Navigate to and select the camera you want to tune. This screen lets you adjust motion detection values live and immediately see the effect on what is detected as motion, making it the fastest way to find optimal settings for each camera.
## Tuning Motion Detection During The Day
@@ -37,6 +41,20 @@ Remember that motion detection is just used to determine when object detection s
The threshold value dictates how much of a change in a pixels luminance is required to be considered motion.
+
+
+
+Navigate to to set the threshold globally.
+
+To override for a specific camera, navigate to and select the camera, or use the to adjust it live.
+
+| Field | Description |
+| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **Motion threshold** | The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. The value should be between 1 and 255. (default: 30) |
+
+
+
+
```yaml
motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
@@ -45,12 +63,29 @@ motion:
threshold: 30
```
+
+
+
Lower values mean motion detection is more sensitive to changes in color, making it more likely for example to detect motion when a brown dogs blends in with a brown fence or a person wearing a red shirt blends in with a red car. If the threshold is too low however, it may detect things like grass blowing in the wind, shadows, etc. to be detected as motion.
Watching the motion boxes in the debug view, increase the threshold until you only see motion that is visible to the eye. Once this is done, it is important to test and ensure that desired motion is still detected.
### Contour Area
+
+
+
+Navigate to to set the contour area globally.
+
+To override for a specific camera, navigate to and select the camera, or use the to adjust it live.
+
+| Field | Description |
+| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **Contour area** | Minimum size in pixels in the resized motion image that counts as motion. Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller moving objects. As a rule of thumb: 10 = high sensitivity, 30 = medium sensitivity, 50 = low sensitivity. (default: 10) |
+
+
+
+
```yaml
motion:
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
@@ -63,6 +98,9 @@ motion:
contour_area: 10
```
+
+
+
Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera.
Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving.
@@ -81,6 +119,20 @@ However, if the preferred day settings do not work well at night it is recommend
### Lightning Threshold
+
+
+
+Navigate to and expand the advanced fields to find the lightning threshold setting.
+
+To override for a specific camera, navigate to and select the camera.
+
+| Field | Description |
+| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **Lightning threshold** | The percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. (default: 0.8) |
+
+
+
+
```yaml
motion:
# Optional: The percentage of the image used to detect lightning or
@@ -94,6 +146,9 @@ motion:
lightning_threshold: 0.8
```
+
+
+
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. `lightning_threshold` defines the percentage of the image used to detect these substantial changes. Increasing this value makes motion detection more likely to treat large changes (like IR mode switches) as valid motion. Decreasing it makes motion detection more likely to ignore large amounts of motion, such as a person approaching a doorbell camera.
Note that `lightning_threshold` does **not** stop motion-based recordings from being saved — it only prevents additional motion analysis after the threshold is exceeded, reducing false positive object detections during high-motion periods (e.g. storms or PTZ sweeps) without interfering with recordings.
@@ -106,6 +161,20 @@ Some cameras, like doorbell cameras, may have missed detections when someone wal
### Skip Motion On Large Scene Changes
+
+
+
+Navigate to and expand the advanced fields to find the skip motion threshold setting.
+
+To override for a specific camera, navigate to and select the camera.
+
+| Field | Description |
+| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **Skip motion threshold** | Fraction of the frame that must change in a single update before Frigate will completely ignore any motion in that frame. Values range between 0.0 and 1.0; leave unset (null) to disable. For example, setting this to 0.7 causes Frigate to skip reporting motion boxes when more than 70% of the image appears to change (e.g. during lightning storms, IR/color mode switches, or other sudden lighting events). |
+
+
+
+
```yaml
motion:
# Optional: Fraction of the frame that must change in a single update
@@ -118,6 +187,9 @@ motion:
skip_motion_threshold: 0.7
```
+
+
+
This option is handy when you want to prevent large transient changes from triggering recordings or object detection. It differs from `lightning_threshold` because it completely suppresses motion instead of just forcing a recalibration.
:::warning
diff --git a/docs/docs/configuration/notifications.md b/docs/docs/configuration/notifications.md
index b5e1600e4..0ba84b8aa 100644
--- a/docs/docs/configuration/notifications.md
+++ b/docs/docs/configuration/notifications.md
@@ -3,6 +3,10 @@ id: notifications
title: Notifications
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
# Notifications
Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption.
@@ -18,15 +22,27 @@ In order to use notifications the following requirements must be met:
### Configuration
-To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save.
+Enable notifications and fill out the required fields.
-Optionally, you can change the default cooldown period for notifications through the `cooldown` parameter in your config file. This parameter can also be overridden at the camera level.
+Optionally, change the default cooldown period for notifications. The cooldown can also be overridden at the camera level.
Notifications will be prevented if either:
- The global cooldown period hasn't elapsed since any camera's last notification
- The camera-specific cooldown period hasn't elapsed for the specific camera
+#### Global notifications
+
+
+
+
+1. Navigate to .
+ - Set **Email** to your email address
+ - Enable notifications for the desired cameras
+
+
+
+
```yaml
notifications:
enabled: True
@@ -34,6 +50,21 @@ notifications:
cooldown: 10 # wait 10 seconds before sending another notification from any camera
```
+
+
+
+#### Per-camera notifications
+
+
+
+
+1. Navigate to and select the desired camera.
+ - Set **Enable notifications** to on
+ - Set **Cooldown period** to the desired number of seconds to wait before sending another notification from this camera (e.g. `30`)
+
+
+
+
```yaml
cameras:
doorbell:
@@ -43,6 +74,9 @@ cameras:
cooldown: 30 # wait 30 seconds before sending another notification from the doorbell camera
```
+
+
+
### Registration
Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent.
diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md
index c16d3f5dc..5d74ec392 100644
--- a/docs/docs/configuration/object_detectors.md
+++ b/docs/docs/configuration/object_detectors.md
@@ -4,6 +4,9 @@ title: Object Detectors
---
import CommunityBadge from '@site/src/components/CommunityBadge';
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
# Supported Hardware
@@ -53,7 +56,6 @@ Frigate supports multiple different detectors that work on different types of ha
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
-
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@@ -86,6 +88,14 @@ See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edg
### Single USB Coral
+
+
+
+Navigate to and select the **EdgeTPU** detector type with device set to `usb`.
+
+
+
+
```yaml
detectors:
coral:
@@ -93,8 +103,19 @@ detectors:
device: usb
```
+
+
+
### Multiple USB Corals
+
+
+
+Navigate to and add multiple Edge TPU detectors, specifying `usb:0` and `usb:1` as the device for each.
+
+
+
+
```yaml
detectors:
coral1:
@@ -105,10 +126,21 @@ detectors:
device: usb:1
```
+
+
+
### Native Coral (Dev Board)
_warning: may have [compatibility issues](https://github.com/blakeblackshear/frigate/issues/1706) after `v0.9.x`_
+
+
+
+Navigate to and select the **EdgeTPU** detector type with the device field left empty.
+
+
+
+
```yaml
detectors:
coral:
@@ -116,8 +148,19 @@ detectors:
device: ""
```
+
+
+
### Single PCIE/M.2 Coral
+
+
+
+Navigate to and select the **EdgeTPU** detector type with device set to `pci`.
+
+
+
+
```yaml
detectors:
coral:
@@ -125,8 +168,19 @@ detectors:
device: pci
```
+
+
+
### Multiple PCIE/M.2 Corals
+
+
+
+Navigate to and add multiple Edge TPU detectors, specifying `pci:0` and `pci:1` as the device for each.
+
+
+
+
```yaml
detectors:
coral1:
@@ -137,8 +191,19 @@ detectors:
device: pci:1
```
+
+
+
### Mixing Corals
+
+
+
+Navigate to and add multiple Edge TPU detectors with different device types (e.g., `usb` and `pci`).
+
+
+
+
```yaml
detectors:
coral_usb:
@@ -149,6 +214,9 @@ detectors:
device: pci
```
+
+
+
### EdgeTPU Supported Models
| Model | Notes |
@@ -173,7 +241,23 @@ YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are s
YOLOv9 Setup & Config
-After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration:
+After placing the downloaded files for the tflite model and labels in your config folder, use the following configuration:
+
+
+
+
+Navigate to and select the **EdgeTPU** detector type with device set to `usb`. Then navigate to and configure the model settings:
+
+| Field | Value |
+| ---------------------------------------- | ----------------------------------------------------------------- |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` (should match the imgsize of the model) |
+| **Object detection model input height** | `320` (should match the imgsize of the model) |
+| **Custom object detector model path** | `/config/model_cache/yolov9-s-relu6-best_320_int8_edgetpu.tflite` |
+| **Label map for custom object detector** | `/config/labels-coco17.txt` |
+
+
+
```yaml
detectors:
@@ -189,6 +273,9 @@ model:
labelmap_path: /config/labels-coco17.txt
```
+
+
+
Note that due to hardware limitations of the Coral, the labelmap is a subset of the COCO labels and includes only 17 object classes.
@@ -199,7 +286,7 @@ Note that due to hardware limitations of the Coral, the labelmap is a subset of
This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleration Modules. The integration automatically detects your hardware architecture via the Hailo CLI and selects the appropriate default model if no custom model is specified.
-See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the Hailo hardware.
+See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware.
### Configuration
@@ -213,6 +300,26 @@ Use this configuration for YOLO-based models. When no custom model path or URL i
- **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
- **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
+
+
+
+Navigate to and select the **Hailo-8/Hailo-8L** detector type with device set to `PCIe`. Then navigate to and configure the model settings:
+
+| Field | Value |
+| ---------------------------------------- | ----------------------- |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Model Input Pixel Color Format** | `rgb` |
+| **Model Input D Type** | `int` |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+The detector automatically selects the default model based on your hardware. Optionally, specify a local model path or URL to override.
+
+
+
+
```yaml
detectors:
hailo:
@@ -242,10 +349,31 @@ model:
# just make sure to give it the write configuration based on the model
```
+
+
+
#### SSD
For SSD-based models, provide either a model path or URL to your compiled SSD model. The integration will first check the local path before downloading if necessary.
+
+
+
+Navigate to and select the **Hailo-8/Hailo-8L** detector type with device set to `PCIe`. Then navigate to and configure the model settings:
+
+| Field | Value |
+| --------------------------------------- | ------ |
+| **Object detection model input width** | `300` |
+| **Object detection model input height** | `300` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Model Input Pixel Color Format** | `rgb` |
+| **Object Detection Model Type** | `ssd` |
+
+Specify the local model path or URL for SSD MobileNet v1.
+
+
+
+
```yaml
detectors:
hailo:
@@ -266,10 +394,21 @@ model:
# path: https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8l/ssd_mobilenet_v1.hef
```
+
+
+
#### Custom Models
The Hailo detector supports all YOLO models compiled for Hailo hardware that include post-processing. You can specify a custom URL or a local path to download or use your model directly. If both are provided, the detector checks the local path first.
+
+
+
+Navigate to and select the **Hailo-8/Hailo-8L** detector type with device set to `PCIe`. Then navigate to and configure the model settings to match your custom model dimensions and format.
+
+
+
+
```yaml
detectors:
hailo:
@@ -291,6 +430,9 @@ model:
# path: https://custom-model-url.com/path/to/model.hef
```
+
+
+
For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo
Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation.
@@ -314,6 +456,14 @@ OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will al
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
+
+
+
+Navigate to and add multiple **OpenVINO** detectors, each targeting `GPU` or `NPU`.
+
+
+
+
```yaml
detectors:
ov_0:
@@ -324,6 +474,9 @@ detectors:
device: GPU # or NPU
```
+
+
+
:::
### OpenVINO Supported Models
@@ -346,6 +499,23 @@ An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobil
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
+
+
+
+Navigate to and select the **OpenVINO** detector type with device set to `GPU` (or `NPU`). Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------ |
+| **Object detection model input width** | `300` |
+| **Object detection model input height** | `300` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Model Input Pixel Color Format** | `bgr` |
+| **Custom object detector model path** | `/openvino-model/ssdlite_mobilenet_v2.xml` |
+| **Label map for custom object detector** | `/openvino-model/coco_91cl_bkgr.txt` |
+
+
+
+
```yaml
detectors:
ov:
@@ -361,6 +531,9 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
```
+
+
+
#### YOLOX
@@ -374,7 +547,25 @@ This detector also supports YOLOX. Frigate does not come with any YOLOX models p
YOLO-NAS Setup & Config
-After placing the downloaded onnx model in your config folder, you can use the following configuration:
+After placing the downloaded onnx model in your config folder, use the following configuration:
+
+
+
+
+Navigate to and select the **OpenVINO** detector type with device set to `GPU`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------------- |
+| **Object Detection Model Type** | `yolonas` |
+| **Object detection model input width** | `320` (should match whatever was set in notebook) |
+| **Object detection model input height** | `320` (should match whatever was set in notebook) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input Pixel Color Format** | `bgr` |
+| **Custom object detector model path** | `/config/yolo_nas_s.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -392,6 +583,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -415,7 +609,25 @@ If you are using a Frigate+ model, you should not define any of the below `model
:::
-After placing the downloaded onnx model in your config folder, you can use the following configuration:
+After placing the downloaded onnx model in your config folder, use the following configuration:
+
+
+
+
+Navigate to and select the **OpenVINO** detector type with device set to `GPU` (or `NPU`). Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | -------------------------------------------------------- |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` (should match the imgsize set during model export) |
+| **Object detection model input height** | `320` (should match the imgsize set during model export) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/yolo.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -433,6 +645,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -450,7 +665,24 @@ Due to the size and complexity of the RF-DETR model, it is only recommended to b
RF-DETR Setup & Config
-After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
+After placing the downloaded onnx model in your `config/model_cache` folder, use the following configuration:
+
+
+
+
+Navigate to and select the **OpenVINO** detector type with device set to `GPU`. Then navigate to and configure:
+
+| Field | Value |
+| --------------------------------------- | --------------------------------- |
+| **Object Detection Model Type** | `rfdetr` |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/rfdetr.onnx` |
+
+
+
```yaml
detectors:
@@ -467,6 +699,9 @@ model:
path: /config/model_cache/rfdetr.onnx
```
+
+
+
#### D-FINE
@@ -482,7 +717,25 @@ Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to
D-FINE Setup & Config
-After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
+After placing the downloaded onnx model in your config/model_cache folder, use the following configuration:
+
+
+
+
+Navigate to and select the **OpenVINO** detector type with device set to `CPU`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ---------------------------------- |
+| **Object Detection Model Type** | `dfine` |
+| **Object detection model input width** | `640` |
+| **Object detection model input height** | `640` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/dfine-s.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -500,6 +753,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -517,6 +773,14 @@ The NPU in Apple Silicon can't be accessed from within a container, so the [Appl
Using the detector config below will connect to the client:
+
+
+
+Navigate to and select the **ZMQ IPC** detector type with the endpoint set to `tcp://host.docker.internal:5555`.
+
+
+
+
```yaml
detectors:
apple-silicon:
@@ -524,6 +788,9 @@ detectors:
endpoint: tcp://host.docker.internal:5555
```
+
+
+
### Apple Silicon Supported Models
There is no default model provided, the following formats are supported:
@@ -540,6 +807,24 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
When Frigate is started with the following config it will connect to the detector client and transfer the model automatically:
+
+
+
+Navigate to and select the **ZMQ IPC** detector type with the endpoint set to `tcp://host.docker.internal:5555`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | -------------------------------------------------------- |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` (should match the imgsize set during model export) |
+| **Object detection model input height** | `320` (should match the imgsize set during model export) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/yolo.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors:
apple-silicon:
@@ -556,13 +841,16 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## AMD/ROCm GPU detector
### Setup
-Support for AMD GPUs is provided using the [ONNX detector](#ONNX). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
+Support for AMD GPUs is provided using the [ONNX detector](#onnx). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
### Docker settings for GPU access
@@ -680,6 +968,14 @@ If the correct build is used for your GPU then the GPU will be detected and used
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
+
+
+
+Navigate to and add multiple **ONNX** detectors.
+
+
+
+
```yaml
detectors:
onnx_0:
@@ -688,6 +984,9 @@ detectors:
type: onnx
```
+
+
+
:::
### ONNX Supported Models
@@ -715,7 +1014,25 @@ If you are using a Frigate+ YOLO-NAS model, you should not define any of the bel
:::
-After placing the downloaded onnx model in your config folder, you can use the following configuration:
+After placing the downloaded onnx model in your config folder, use the following configuration:
+
+
+
+
+Navigate to and select the **ONNX** detector type. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------------- |
+| **Object Detection Model Type** | `yolonas` |
+| **Object detection model input width** | `320` (should match whatever was set in notebook) |
+| **Object detection model input height** | `320` (should match whatever was set in notebook) |
+| **Model Input Pixel Color Format** | `bgr` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Custom object detector model path** | `/config/yolo_nas_s.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -732,6 +1049,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
#### YOLO (v3, v4, v7, v9)
@@ -753,7 +1073,25 @@ If you are using a Frigate+ model, you should not define any of the below `model
:::
-After placing the downloaded onnx model in your config folder, you can use the following configuration:
+After placing the downloaded onnx model in your config folder, use the following configuration:
+
+
+
+
+Navigate to and select the **ONNX** detector type. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | -------------------------------------------------------- |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` (should match the imgsize set during model export) |
+| **Object detection model input height** | `320` (should match the imgsize set during model export) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/yolo.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -770,6 +1108,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -781,7 +1122,25 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
YOLOx Setup & Config
-After placing the downloaded onnx model in your config folder, you can use the following configuration:
+After placing the downloaded onnx model in your config folder, use the following configuration:
+
+
+
+
+Navigate to and select the **ONNX** detector type. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | -------------------------------------------------------- |
+| **Object Detection Model Type** | `yolox` |
+| **Object detection model input width** | `416` (should match the imgsize set during model export) |
+| **Object detection model input height** | `416` (should match the imgsize set during model export) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float_denorm` |
+| **Custom object detector model path** | `/config/model_cache/yolox_tiny.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -798,6 +1157,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -809,7 +1171,24 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
RF-DETR Setup & Config
-After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
+After placing the downloaded onnx model in your `config/model_cache` folder, use the following configuration:
+
+
+
+
+Navigate to and select the **ONNX** detector type. Then navigate to and configure:
+
+| Field | Value |
+| --------------------------------------- | --------------------------------- |
+| **Object Detection Model Type** | `rfdetr` |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/rfdetr.onnx` |
+
+
+
```yaml
detectors:
@@ -825,6 +1204,9 @@ model:
path: /config/model_cache/rfdetr.onnx
```
+
+
+
#### D-FINE
@@ -834,7 +1216,25 @@ model:
D-FINE Setup & Config
-After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
+After placing the downloaded onnx model in your `config/model_cache` folder, use the following configuration:
+
+
+
+
+Navigate to and select the **ONNX** detector type. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------- |
+| **Object Detection Model Type** | `dfine` |
+| **Object detection model input width** | `640` |
+| **Object detection model input height** | `640` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Custom object detector model path** | `/config/model_cache/dfine_m_obj2coco.onnx` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
```yaml
detectors:
@@ -851,6 +1251,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -869,6 +1272,14 @@ The number of threads used by the interpreter can be specified using the `"num_t
A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
+
+
+
+Navigate to and select the **CPU** detector type. Configure the number of threads and add additional CPU detectors as needed (one per camera is recommended).
+
+
+
+
```yaml
detectors:
cpu1:
@@ -882,6 +1293,9 @@ model:
path: "/custom_model.tflite"
```
+
+
+
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
## Deepstack / CodeProject.AI Server Detector
@@ -892,7 +1306,15 @@ The Deepstack / CodeProject.AI Server detector for Frigate allows you to integra
To get started with CodeProject.AI, visit their [official website](https://www.codeproject.com/Articles/5322557/CodeProject-AI-Server-AI-the-easy-way) to follow the instructions to download and install the AI server on your preferred device. Detailed setup instructions for CodeProject.AI are outside the scope of the Frigate documentation.
-To integrate CodeProject.AI into Frigate, you'll need to make the following changes to your Frigate configuration file:
+To integrate CodeProject.AI into Frigate, configure the detector as follows:
+
+
+
+
+Navigate to and select the **DeepStack** detector type. Set the API URL to point to your CodeProject.AI server (e.g., `http://:/v1/vision/detection`).
+
+
+
```yaml
detectors:
@@ -902,6 +1324,9 @@ detectors:
api_timeout: 0.1 # seconds
```
+
+
+
Replace `` and `` with the IP address and port of your CodeProject.AI server.
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
@@ -922,6 +1347,14 @@ To configure the MemryX detector, use the following example configuration:
#### Single PCIe MemryX MX3
+
+
+
+Navigate to and select the **MemryX** detector type with device set to `PCIe:0`.
+
+
+
+
```yaml
detectors:
memx0:
@@ -929,8 +1362,19 @@ detectors:
device: PCIe:0
```
+
+
+
#### Multiple PCIe MemryX MX3 Modules
+
+
+
+Navigate to and add multiple **MemryX** detectors, specifying `PCIe:0`, `PCIe:1`, `PCIe:2`, etc. as the device for each.
+
+
+
+
```yaml
detectors:
memx0:
@@ -946,6 +1390,9 @@ detectors:
device: PCIe:2
```
+
+
+
### Supported Models
MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the container at `/memryx_models/model_folder/`.
@@ -964,6 +1411,23 @@ The input size for **YOLO-NAS** can be set to either **320x320** (default) or **
Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector:
+
+
+
+Navigate to and select the **MemryX** detector type with device set to `PCIe:0`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------------- |
+| **Object Detection Model Type** | `yolonas` |
+| **Object detection model input width** | `320` (can be set to `640` for higher resolution) |
+| **Object detection model input height** | `320` (can be set to `640` for higher resolution) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors:
memx0:
@@ -984,6 +1448,9 @@ model:
# └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network)
```
+
+
+
#### YOLOv9
The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
@@ -992,6 +1459,23 @@ The YOLOv9s model included in this detector is downloaded from [the original Git
Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector:
+
+
+
+Navigate to and select the **MemryX** detector type with device set to `PCIe:0`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------------- |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` (can be set to `640` for higher resolution) |
+| **Object detection model input height** | `320` (can be set to `640` for higher resolution) |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors:
memx0:
@@ -1011,6 +1495,9 @@ model:
# ├── yolov9.dfp (a file ending with .dfp)
```
+
+
+
#### YOLOX
The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) and precompiled to DFP.
@@ -1019,6 +1506,23 @@ The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/openc
Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector:
+
+
+
+Navigate to and select the **MemryX** detector type with device set to `PCIe:0`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ----------------------- |
+| **Object Detection Model Type** | `yolox` |
+| **Object detection model input width** | `640` |
+| **Object detection model input height** | `640` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float_denorm` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors:
memx0:
@@ -1038,6 +1542,9 @@ model:
# ├── yolox.dfp (a file ending with .dfp)
```
+
+
+
#### SSDLite MobileNet v2
The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmmlab.com/model/mmdet-det/ssdlite-e8679f.onnx) and has been converted to DFP.
@@ -1046,6 +1553,23 @@ The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmml
Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector:
+
+
+
+Navigate to and select the **MemryX** detector type with device set to `PCIe:0`. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ----------------------- |
+| **Object Detection Model Type** | `ssd` |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input D Type** | `float` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors:
memx0:
@@ -1066,6 +1590,9 @@ model:
# └── ssdlite_mobilenet_post.onnx (optional; only if the model includes a cropped post-processing network)
```
+
+
+
#### Using a Custom Model
To use your own model:
@@ -1165,6 +1692,23 @@ The TensorRT detector uses `.trt` model files that are located in `/config/model
Use the config below to work with generated TRT models:
+
+
+
+Navigate to and select the **TensorRT** detector type with the device set to `0` (the default GPU index). Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------------------------ |
+| **Custom object detector model path** | `/config/model_cache/tensorrt/yolov7-320.trt` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+| **Model Input Tensor Shape** | `nchw` |
+| **Model Input Pixel Color Format** | `rgb` |
+| **Object detection model input width** | `320` (MUST match the chosen model, e.g., yolov7-320 -> 320) |
+| **Object detection model input height** | `320` (MUST match the chosen model, e.g., yolov7-320 -> 320) |
+
+
+
+
```yaml
detectors:
tensorrt:
@@ -1180,6 +1724,9 @@ model:
height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416
```
+
+
+
## Synaptics
Hardware accelerated object detection is supported on the following SoCs:
@@ -1202,6 +1749,22 @@ A synap model is provided in the container at /mobilenet.synap and is used by th
Use the model configuration shown below when using the synaptics detector with the default synap model:
+
+
+
+Navigate to and select the **Synaptics** detector type. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ---------------------------- |
+| **Custom object detector model path** | `/synaptics/mobilenet.synap` |
+| **Object detection model input width** | `224` |
+| **Object detection model input height** | `224` |
+| **Tensor format** | `nhwc` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors: # required
synap_npu: # required
@@ -1215,6 +1778,9 @@ model: # required
labelmap_path: /labelmap/coco-80.txt # required
```
+
+
+
## Rockchip platform
Hardware accelerated object detection is supported on the following SoCs:
@@ -1231,6 +1797,14 @@ This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airoc
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be:
+
+
+
+Navigate to and add multiple **RKNN** detectors, each with `num_cores` set to `0` for automatic selection.
+
+
+
+
```yaml
detectors:
rknn_0:
@@ -1241,6 +1815,9 @@ detectors:
num_cores: 0
```
+
+
+
:::
### Prerequisites
@@ -1262,6 +1839,14 @@ $ cat /sys/kernel/debug/rknpu/load
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
+
+
+
+Navigate to and select the **RKNN** detector type. Set `num_cores` to `0` for automatic selection (increase for better performance on multicore NPUs, e.g., set to `3` on rk3588).
+
+
+
+
```yaml
detectors: # required
rknn: # required
@@ -1272,6 +1857,9 @@ detectors: # required
num_cores: 0
```
+
+
+
The inference time was determined on a rk3588 with 3 NPU cores.
| Model | Size in mb | Inference time in ms |
@@ -1288,6 +1876,24 @@ The inference time was determined on a rk3588 with 3 NPU cores.
#### YOLO-NAS
+
+
+
+Navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ----------------------------------------------------------------------- |
+| **Custom object detector model path** | `deci-fp16-yolonas_s` (or `deci-fp16-yolonas_m`, `deci-fp16-yolonas_l`) |
+| **Object Detection Model Type** | `yolonas` |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input Pixel Color Format** | `bgr` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
model: # required
# name of model (will be automatically downloaded) or path to your own .rknn model file
@@ -1305,6 +1911,9 @@ model: # required
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
:::warning
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
@@ -1313,6 +1922,23 @@ The pre-trained YOLO-NAS weights from DeciAI are subject to their license and ca
#### YOLO (v9)
+
+
+
+Navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | -------------------------------------------------- |
+| **Custom object detector model path** | `frigate-fp16-yolov9-t` (or other yolov9 variants) |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
model: # required
# name of model (will be automatically downloaded) or path to your own .rknn model file
@@ -1331,8 +1957,28 @@ model: # required
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
#### YOLOx
+
+
+
+Navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ---------------------------------------------- |
+| **Custom object detector model path** | `rock-i8-yolox_nano` (or other yolox variants) |
+| **Object Detection Model Type** | `yolox` |
+| **Object detection model input width** | `416` |
+| **Object detection model input height** | `416` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
model: # required
# name of model (will be automatically downloaded) or path to your own .rknn model file
@@ -1350,6 +1996,9 @@ model: # required
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
### Converting your own onnx model to rknn format
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
@@ -1405,7 +2054,15 @@ degirum_detector:
All supported hardware will automatically be found on your AI server host as long as relevant runtimes and drivers are properly installed on your machine. Refer to [DeGirum's docs site](https://docs.degirum.com/pysdk/runtimes-and-drivers) if you have any trouble.
-Once completed, changing the `config.yml` file is simple.
+Once completed, configure the detector as follows:
+
+
+
+
+Navigate to and select the **DeGirum** detector type. Set the location to your AI server (e.g., service name, container name, or `host:port`), the zoo to `degirum/public`, and provide your authentication token if needed.
+
+
+
```yaml
degirum_detector:
@@ -1415,6 +2072,9 @@ degirum_detector:
token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the [AI Hub](https://hub.degirum.com). This can be left blank if you're pulling a model from the public zoo and running inferences on your local hardware using @local or a local DeGirum AI Server
```
+
+
+
Setting up a model in the `config.yml` is similar to setting up an AI server.
You can set it to:
@@ -1437,7 +2097,15 @@ It is also possible to eliminate the need for an AI server and run the hardware
1. Ensuring that the frigate docker container has the runtime you want to use. So for instance, running `@local` for Hailo means making sure the container you're using has the Hailo runtime installed.
2. To double check the runtime is detected by the DeGirum detector, make sure the `degirum sys-info` command properly shows whatever runtimes you mean to install.
-3. Create a DeGirum detector in your `config.yml` file.
+3. Create a DeGirum detector in your configuration.
+
+
+
+
+Navigate to and select the **DeGirum** detector type. Set the location to `@local`, the zoo to `degirum/public`, and provide your authentication token.
+
+
+
```yaml
degirum_detector:
@@ -1447,6 +2115,9 @@ degirum_detector:
token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the [AI Hub](https://hub.degirum.com). This can be left blank if you're pulling a model from the public zoo and running inferences on your local hardware using @local or a local DeGirum AI Server
```
+
+
+
Once `degirum_detector` is setup, you can choose a model through 'model' section in the `config.yml` file.
```yaml
@@ -1463,7 +2134,15 @@ If you do not possess whatever hardware you want to run, there's also the option
1. Sign up at [DeGirum's AI Hub](https://hub.degirum.com).
2. Get an access token.
-3. Create a DeGirum detector in your `config.yml` file.
+3. Create a DeGirum detector in your configuration.
+
+
+
+
+Navigate to and select the **DeGirum** detector type. Set the location to `@cloud`, the zoo to `degirum/public`, and provide your authentication token.
+
+
+
```yaml
degirum_detector:
@@ -1473,6 +2152,9 @@ degirum_detector:
token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the (AI Hub)[https://hub.degirum.com).
```
+
+
+
Once `degirum_detector` is setup, you can choose a model through 'model' section in the `config.yml` file.
```yaml
@@ -1504,6 +2186,24 @@ A yolov9 model is provided in the container at `/axmodels` and is used by this d
Use the model configuration shown below when using the axengine detector with the default axmodel:
+
+
+
+Navigate to and select the **AXEngine NPU** detector type. Then navigate to and configure:
+
+| Field | Value |
+| ---------------------------------------- | ----------------------- |
+| **Custom object detector model path** | `frigate-yolov9-tiny` |
+| **Object Detection Model Type** | `yolo-generic` |
+| **Object detection model input width** | `320` |
+| **Object detection model input height** | `320` |
+| **Model Input D Type** | `int` |
+| **Model Input Pixel Color Format** | `bgr` |
+| **Label map for custom object detector** | `/labelmap/coco-80.txt` |
+
+
+
+
```yaml
detectors:
axengine:
@@ -1519,6 +2219,9 @@ model:
labelmap_path: /labelmap/coco-80.txt
```
+
+
+
# Models
Some model types are not included in Frigate by default.
diff --git a/docs/docs/configuration/object_filters.md b/docs/docs/configuration/object_filters.md
index 3f36086c0..dfea51804 100644
--- a/docs/docs/configuration/object_filters.md
+++ b/docs/docs/configuration/object_filters.md
@@ -3,11 +3,15 @@ id: object_filters
title: Filters
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
There are several types of object filters that can be used to reduce false positive rates.
## Object Scores
-For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85:
+For object filters, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85:
| Frame | Current Score | Score History | Computed Score | Detected Object |
| ----- | ------------- | --------------------------------- | -------------- | --------------- |
@@ -28,6 +32,46 @@ Any detection below `min_score` will be immediately thrown out and never tracked
`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an tracked object. If `threshold` is too high then true positive tracked objects may be missed due to the object never scoring high enough.
+## Configuring Object Scores
+
+
+
+
+Navigate to to set score filters globally.
+
+| Field | Description |
+| --------------------------------------- | ---------------------------------------------------------------- |
+| **Object filters > Person > Min Score** | Minimum score for a single detection to initiate tracking |
+| **Object filters > Person > Threshold** | Minimum computed (median) score to be considered a true positive |
+
+To override score filters for a specific camera, navigate to and select the camera.
+
+
+
+
+```yaml
+objects:
+ filters:
+ person:
+ min_score: 0.5
+ threshold: 0.7
+```
+
+To override at the camera level:
+
+```yaml
+cameras:
+ front_door:
+ objects:
+ filters:
+ person:
+ min_score: 0.5
+ threshold: 0.7
+```
+
+
+
+
## Object Shape
False positives can also be reduced by filtering a detection based on its shape.
@@ -46,6 +90,50 @@ Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "
:::
+### Configuring Shape Filters
+
+
+
+
+Navigate to to set shape filters globally.
+
+| Field | Description |
+| --------------------------------------- | ------------------------------------------------------------------------ |
+| **Object filters > Person > Min Area** | Minimum bounding box area in pixels (or decimal for percentage of frame) |
+| **Object filters > Person > Max Area** | Maximum bounding box area in pixels (or decimal for percentage of frame) |
+| **Object filters > Person > Min Ratio** | Minimum width/height ratio of the bounding box |
+| **Object filters > Person > Max Ratio** | Maximum width/height ratio of the bounding box |
+
+To override shape filters for a specific camera, navigate to and select the camera.
+
+
+
+
+```yaml
+objects:
+ filters:
+ person:
+ min_area: 5000
+ max_area: 100000
+ min_ratio: 0.5
+ max_ratio: 2.0
+```
+
+To override at the camera level:
+
+```yaml
+cameras:
+ front_door:
+ objects:
+ filters:
+ person:
+ min_area: 5000
+ max_area: 100000
+```
+
+
+
+
## Other Tools
### Zones
@@ -54,4 +142,4 @@ Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "
### Object Masks
-[Object Filter Masks](/configuration/masks) are a last resort but can be useful when false positives are in the relatively same place but can not be filtered due to their size or shape.
+[Object Filter Masks](/configuration/masks) are a last resort but can be useful when false positives are in the relatively same place but can not be filtered due to their size or shape. Object filter masks can be configured in .
diff --git a/docs/docs/configuration/objects.md b/docs/docs/configuration/objects.md
index 796d31258..9925ae8fe 100644
--- a/docs/docs/configuration/objects.md
+++ b/docs/docs/configuration/objects.md
@@ -3,6 +3,9 @@ id: objects
title: Available Objects
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
import labels from "../../../labelmap.txt";
Frigate includes the object labels listed below from the Google Coral test data.
@@ -10,7 +13,7 @@ Frigate includes the object labels listed below from the Google Coral test data.
Please note:
- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.
-- `person` is the only tracked object by default. See the [full configuration reference](reference.md) for an example of expanding the list of tracked objects.
+- `person` is the only tracked object by default. To track additional objects, configure them in the objects settings.
{labels.split("\n").map((label) => (
@@ -18,6 +21,135 @@ Please note:
))}
+## Configuring Tracked Objects
+
+By default, Frigate only tracks `person`. To track additional object types, add them to the tracked objects list.
+
+
+
+
+1. Navigate to .
+ - Add the desired object types to the **Objects to track** list (e.g., `person`, `car`, `dog`)
+
+To override the tracked objects list for a specific camera:
+
+1. Navigate to .
+ - Add the desired object types to the **Objects to track** list
+
+
+
+
+```yaml
+objects:
+ track:
+ - person
+ - car
+ - dog
+```
+
+To override at the camera level:
+
+```yaml
+cameras:
+ front_door:
+ objects:
+ track:
+ - person
+ - car
+```
+
+
+
+
+## Filtering Objects
+
+Object filters help reduce false positives by constraining the size, shape, and confidence thresholds for each object type. Filters can be configured globally or per camera.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| --------------------------------------- | ------------------------------------------------------------------------ |
+| **Object filters > Person > Min Area** | Minimum bounding box area in pixels (or decimal for percentage of frame) |
+| **Object filters > Person > Max Area** | Maximum bounding box area in pixels (or decimal for percentage of frame) |
+| **Object filters > Person > Min Ratio** | Minimum width/height ratio of the bounding box |
+| **Object filters > Person > Max Ratio** | Maximum width/height ratio of the bounding box |
+| **Object filters > Person > Min Score** | Minimum score for the object to initiate tracking |
+| **Object filters > Person > Threshold** | Minimum computed score to be considered a true positive |
+
+To override filters for a specific camera, navigate to .
+
+
+
+
+```yaml
+objects:
+ filters:
+ person:
+ min_area: 5000
+ max_area: 100000
+ min_ratio: 0.5
+ max_ratio: 2.0
+ min_score: 0.5
+ threshold: 0.7
+```
+
+To override at the camera level:
+
+```yaml
+cameras:
+ front_door:
+ objects:
+ filters:
+ person:
+ min_area: 5000
+ threshold: 0.7
+```
+
+
+
+
+## Object Filter Masks
+
+Object filter masks prevent specific object types from being detected in certain areas of the camera frame. These masks check the bottom center of the bounding box. A global mask applies to all object types, while per-object masks apply only to the specified type.
+
+
+
+
+Navigate to and select a camera. Use the mask editor to draw object filter masks directly on the camera feed. Global object masks and per-object masks can both be configured from this view.
+
+
+
+
+```yaml
+objects:
+ # Global mask applied to all object types
+ mask:
+ mask1:
+ friendly_name: "Object filter mask area"
+ enabled: true
+ coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278"
+ # Per-object mask
+ filters:
+ person:
+ mask:
+ mask1:
+ friendly_name: "Person filter mask"
+ enabled: true
+ coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278"
+```
+
+
+
+
+:::note
+
+The global mask is combined with any object-specific mask. Both are checked based on the bottom center of the bounding box.
+
+:::
+
## Custom Models
Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use your own models with volume mounts:
diff --git a/docs/docs/configuration/profiles.md b/docs/docs/configuration/profiles.md
index ef0778e18..c37fca7db 100644
--- a/docs/docs/configuration/profiles.md
+++ b/docs/docs/configuration/profiles.md
@@ -3,6 +3,10 @@ id: profiles
title: Profiles
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Profiles allow you to define named sets of camera configuration overrides that can be activated and deactivated at runtime without restarting Frigate. This is useful for scenarios like switching between "Home" and "Away" modes, daytime and nighttime configurations, or any situation where you want to quickly change how multiple cameras behave.
## How Profiles Work
@@ -24,16 +28,18 @@ Profile changes are applied in-memory and take effect immediately — no restart
The easiest way to define profiles is to use the Frigate UI. Profiles can also be configured manually in your configuration file.
-### Using the UI
+### Creating and Managing Profiles
-To create and manage profiles from the UI, open **Settings**. From there you can:
+
+
-1. **Create a profile** — Navigate to **Profiles**. Click the **Add Profile** button, enter a name (and optionally a profile ID).
-2. **Configure overrides** — Navigate to a camera configuration section (e.g. Motion detection, Record, Notifications). In the top right, two buttons will appear - choose a camera and a profile from the profile selector to edit overrides for that camera and section. Only the fields you change will be stored as overrides — fields that require a restart are hidden since profiles are applied at runtime. You can click the **Remove Profile Override** button
-3. **Activate a profile** — Use the **Profiles** option in Frigate's main menu to choose a profile. Alternatively, in Settings, navigate to **Profiles**, then choose a profile in the Active Profile dropdown to activate it. The active profile is also shown in the status bar at the bottom of the screen on desktop browsers.
-4. **Delete a profile** — Navigate to **Profiles**, then click the trash icon for a profile. This removes the profile definition and all camera overrides associated with it.
+1. **Create a profile** — Navigate to . Click the **Add Profile** button, enter a name (and optionally a profile ID).
+2. **Configure overrides** — Navigate to a camera configuration section (e.g. Motion detection, Record, Notifications). In the top right, two buttons will appear - choose a camera and a profile from the profile selector to edit overrides for that camera and section. Only the fields you change will be stored as overrides — fields that require a restart are hidden since profiles are applied at runtime. You can click the **Remove Profile Override** button to clear overrides.
+3. **Activate a profile** — Use the **Profiles** option in Frigate's main menu to choose a profile. Alternatively, in Settings, navigate to , then choose a profile in the Active Profile dropdown to activate it. The active profile is also shown in the status bar at the bottom of the screen on desktop browsers.
+4. **Delete a profile** — Navigate to , then click the trash icon for a profile. This removes the profile definition and all camera overrides associated with it.
-### Defining Profiles in YAML
+
+
First, define your profiles at the top level of your Frigate config. Every profile name referenced by a camera must be defined here.
@@ -47,8 +53,6 @@ profiles:
friendly_name: Night Mode
```
-### Camera Profile Overrides
-
Under each camera, add a `profiles` section with overrides for each profile. You only need to include the settings you want to change.
```yaml
@@ -91,6 +95,9 @@ cameras:
- person
```
+
+
+
### Supported Override Sections
The following camera configuration sections can be overridden in a profile:
@@ -125,6 +132,17 @@ Profiles can be activated and deactivated from the Frigate UI. Open the Settings
A common use case is having different detection and notification settings based on whether you are home or away.
+
+
+
+1. Navigate to and create two profiles: **Home** and **Away**.
+2. For the **front_door** camera, configure the **Away** profile to enable notifications and set alert labels to `person` and `car`. Configure the **Home** profile to disable notifications.
+3. For the **indoor_cam** camera, configure the **Away** profile to enable the camera, detection, and recording. Configure the **Home** profile to disable the camera entirely for privacy.
+4. Activate the desired profile from or from the **Profiles** option in Frigate's main menu.
+
+
+
+
```yaml
profiles:
home:
@@ -181,6 +199,9 @@ cameras:
enabled: false
```
+
+
+
In this example:
- **Away profile**: The front door camera enables notifications and tracks specific alert labels. The indoor camera is fully enabled with detection and recording.
diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md
index afd26c641..d98f51491 100644
--- a/docs/docs/configuration/record.md
+++ b/docs/docs/configuration/record.md
@@ -3,7 +3,11 @@ id: record
title: Recording
---
-Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed.
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
+Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed.
New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy.
@@ -13,7 +17,23 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br
### Most conservative: Ensure all video is saved
-For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
+For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following configuration will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
+
+
+
+
+Navigate to .
+
+- Set **Enable recording** to on
+- Set **Continuous retention > Retention days** to `3`
+- Set **Motion retention > Retention days** to `7`
+- Set **Alert retention > Event retention > Retention days** to `30`
+- Set **Alert retention > Event retention > Retention mode** to `all`
+- Set **Detection retention > Event retention > Retention days** to `30`
+- Set **Detection retention > Event retention > Retention mode** to `all`
+
+
+
```yaml
record:
@@ -32,9 +52,27 @@ record:
mode: all
```
+
+
+
### Reduced storage: Only saving video when motion is detected
-In order to reduce storage requirements, you can adjust your config to only retain video where motion / activity was detected.
+To reduce storage requirements, configure recording to only retain video where motion or activity was detected.
+
+
+
+
+Navigate to .
+
+- Set **Enable recording** to on
+- Set **Motion retention > Retention days** to `3`
+- Set **Alert retention > Event retention > Retention days** to `30`
+- Set **Alert retention > Event retention > Retention mode** to `motion`
+- Set **Detection retention > Event retention > Retention days** to `30`
+- Set **Detection retention > Event retention > Retention mode** to `motion`
+
+
+
```yaml
record:
@@ -51,9 +89,25 @@ record:
mode: motion
```
+
+
+
### Minimum: Alerts only
-If you only want to retain video that occurs during activity caused by tracked object(s), this config will discard video unless an alert is ongoing.
+If you only want to retain video that occurs during activity caused by tracked object(s), this configuration will discard video unless an alert is ongoing.
+
+
+
+
+Navigate to .
+
+- Set **Enable recording** to on
+- Set **Continuous retention > Retention days** to `0`
+- Set **Alert retention > Event retention > Retention days** to `30`
+- Set **Alert retention > Event retention > Retention mode** to `motion`
+
+
+
```yaml
record:
@@ -66,6 +120,9 @@ record:
mode: motion
```
+
+
+
## Will Frigate delete old recordings if my storage runs out?
As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted.
@@ -82,7 +139,21 @@ Retention configs support decimals meaning they can be configured to retain `0.5
### Continuous and Motion Recording
-The number of days to retain continuous and motion recordings can be set via the following config where X is a number, by default continuous recording is disabled.
+The number of days to retain continuous and motion recordings can be configured. By default, continuous recording is disabled.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ----------------------------------------- | -------------------------------------------- |
+| **Enable recording** | Enable or disable recording for all cameras |
+| **Continuous retention > Retention days** | Number of days to keep continuous recordings |
+| **Motion retention > Retention days** | Number of days to keep motion recordings |
+
+
+
```yaml
record:
@@ -93,11 +164,28 @@ record:
days: 2 # <- number of days to keep motion recordings
```
-Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
+
+
+
+Continuous recording supports different retention modes [which are described below](#configuring-recording-retention).
### Object Recording
-The number of days to record review items can be specified for review items classified as alerts as well as tracked objects.
+The number of days to retain recordings for review items can be specified for items classified as alerts as well as tracked objects.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ---------------------------------------------------------- | ------------------------------------------- |
+| **Enable recording** | Enable or disable recording for all cameras |
+| **Alert retention > Event retention > Retention days** | Number of days to keep alert recordings |
+| **Detection retention > Event retention > Retention days** | Number of days to keep detection recordings |
+
+
+
```yaml
record:
@@ -110,9 +198,10 @@ record:
days: 10 # <- number of days to keep detections recordings
```
-This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs.
+
+
-**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
+This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs.
## Can I have "continuous" recordings, but only at certain times?
@@ -128,7 +217,7 @@ Time lapse exporting is available only via the [HTTP API](../integrations/api/ex
When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS.
-To configure the speed-up factor, the frame rate and further custom settings, the configuration parameter `timelapse_args` can be used. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS:
+To configure the speed-up factor, the frame rate and further custom settings, use the `timelapse_args` parameter. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS:
```yaml {3-4}
record:
@@ -139,7 +228,7 @@ record:
:::tip
-When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
+When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set the camera-level export hwaccel_args with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
:::
diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md
index ac3bcc503..af4d635c6 100644
--- a/docs/docs/configuration/restream.md
+++ b/docs/docs/configuration/restream.md
@@ -3,6 +3,10 @@ id: restream
title: Restream
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
## RTSP
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
@@ -52,6 +56,16 @@ Some cameras only support one active connection or you may just want to have a s
One connection is made to the camera. One for the restream, `detect` and `record` connect to the restream.
+Configure the go2rtc stream and point the camera inputs at the local restream.
+
+
+
+
+Navigate to and add stream entries for each camera. Then navigate to for each camera and set the input paths to use the local restream URL (`rtsp://127.0.0.1:8554/`).
+
+
+
+
```yaml
go2rtc:
streams:
@@ -87,10 +101,21 @@ cameras:
- audio # <- only necessary if audio detection is enabled
```
+
+
+
### With Sub Stream
Two connections are made to the camera. One for the sub stream, one for the restream, `record` connects to the restream.
+
+
+
+Navigate to and add stream entries for each camera and its sub stream. Then navigate to for each camera and configure separate inputs for the main and sub streams using the local restream URLs.
+
+
+
+
```yaml
go2rtc:
streams:
@@ -138,6 +163,9 @@ cameras:
- detect
```
+
+
+
## Handling Complex Passwords
go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.
diff --git a/docs/docs/configuration/review.md b/docs/docs/configuration/review.md
index d8769749b..4f39611db 100644
--- a/docs/docs/configuration/review.md
+++ b/docs/docs/configuration/review.md
@@ -3,6 +3,10 @@ id: review
title: Review
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
The Review page of the Frigate UI is for quickly reviewing historical footage of interest from your cameras. _Review items_ are indicated on a vertical timeline and displayed as a grid of previews - bandwidth-optimized, low frame rate, low resolution videos. Hovering over or swiping a preview plays the video and marks it as reviewed. If more in-depth analysis is required, the preview can be clicked/tapped and the full frame rate, full resolution recording is displayed.
Review items are filterable by date, object type, and camera.
@@ -23,7 +27,7 @@ Not every segment of video captured by Frigate may be of the same level of inter
:::note
-Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add the following to your config:
+Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add more labels via or and select your camera. Alternatively, add the following to your config:
```yaml
objects:
@@ -38,7 +42,17 @@ See the [objects documentation](objects.md) for the list of objects that Frigate
## Restricting alerts to specific labels
-By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config:
+By default a review item will only be marked as an alert if a person or car is detected. Configure the alert labels to include any object or audio label.
+
+
+
+
+Navigate to or and select your camera.
+
+Expand **Alerts config** and configure which labels and zones should generate alerts.
+
+
+
```yaml
# can be overridden at the camera level
@@ -52,10 +66,23 @@ review:
- speech
```
+
+
+
## Restricting detections to specific labels
By default all detections that do not qualify as an alert qualify as a detection. However, detections can further be filtered to only include certain labels or certain zones.
+
+
+
+Navigate to or and select your camera.
+
+Expand **Detections config** and configure which labels should qualify as detections.
+
+
+
+
```yaml
# can be overridden at the camera level
review:
@@ -65,11 +92,23 @@ review:
- dog
```
+
+
+
## Excluding a camera from alerts or detections
-To exclude a specific camera from alerts or detections, simply provide an empty list to the alerts or detections field _at the camera level_.
+To exclude a specific camera from alerts or detections, provide an empty list to the alerts or detections labels field at the camera level.
-For example, to exclude objects on the camera _gatecamera_ from any detections, include this in your config:
+For example, to exclude objects on the camera _gatecamera_ from any detections:
+
+
+
+
+1. Navigate to and select the **gatecamera** camera.
+ - Expand **Detections config** and turn off all of the object label switches.
+
+
+
```yaml {3-5}
cameras:
@@ -79,6 +118,9 @@ cameras:
labels: []
```
+
+
+
## Restricting review items to specific zones
By default a review item will be created if any `review -> alerts -> labels` and `review -> detections -> labels` are detected anywhere in the camera frame. You will likely want to configure review items to only be created when the object enters an area of interest, [see the zone docs for more information](./zones.md#restricting-alerts-and-detections-to-specific-zones)
diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md
index 4c646f79a..49e0db88a 100644
--- a/docs/docs/configuration/semantic_search.md
+++ b/docs/docs/configuration/semantic_search.md
@@ -3,6 +3,10 @@ id: semantic_search
title: Semantic Search
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and save embeddings to Frigate's database. All of this runs locally.
@@ -19,7 +23,17 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
## Configuration
-Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Enrichments Settings page before it can be used. Semantic Search is a global configuration setting.
+Semantic Search is disabled by default and must be enabled before it can be used. Semantic Search is a global configuration setting.
+
+
+
+
+Navigate to .
+
+- Set **Enable semantic search** to on
+
+
+
```yaml
semantic_search:
@@ -27,6 +41,9 @@ semantic_search:
reindex: False
```
+
+
+
:::tip
The embeddings database can be re-indexed from the existing tracked objects in your database by pressing the "Reindex" button in the Enrichments Settings in the UI or by adding `reindex: True` to your `semantic_search` configuration and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing.
@@ -41,7 +58,20 @@ The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a visio
The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the object description docs](/configuration/genai/objects.md) for more information on how to automatically generate tracked object descriptions.
-Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`:
+Differently weighted versions of the Jina models are available and can be selected by setting the model size.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ------------------------------------------------ | -------------------------------------------------------------------------- |
+| **Semantic search model or GenAI provider name** | Select `jinav1` to use the Jina AI CLIP V1 model |
+| **Model size** | `small` (quantized, CPU-friendly) or `large` (full model, GPU-accelerated) |
+
+
+
```yaml
semantic_search:
@@ -50,6 +80,9 @@ semantic_search:
model_size: small
```
+
+
+
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
@@ -59,7 +92,20 @@ Frigate also supports the [V2 model from Jina](https://huggingface.co/jinaai/jin
V2 offers only a 3% performance improvement over V1 in both text-image and text-text retrieval tasks, an upgrade that is unlikely to yield noticeable real-world benefits. Additionally, V2 has _significantly_ higher RAM and GPU requirements, leading to increased inference time and memory usage. If you plan to use V2, ensure your system has ample RAM and a discrete GPU. CPU inference (with the `small` model) using V2 is not recommended.
-To use the V2 model, update the `model` parameter in your config:
+To use the V2 model, set the model to `jinav2`.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ------------------------------------------------ | ----------------------------------------------------- |
+| **Semantic search model or GenAI provider name** | Select `jinav2` to use the Jina AI CLIP V2 model |
+| **Model size** | `large` is recommended for V2 (requires discrete GPU) |
+
+
+
```yaml
semantic_search:
@@ -68,6 +114,9 @@ semantic_search:
model_size: large
```
+
+
+
For most users, especially native English speakers, the V1 model remains the recommended choice.
:::note
@@ -82,9 +131,23 @@ Frigate can use a GenAI provider for semantic search embeddings when that provid
To use llama.cpp for semantic search:
-1. Configure a GenAI provider in your config with `embeddings` in its `roles`.
-2. Set `semantic_search.model` to the GenAI config key (e.g. `default`).
-3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support:
+1. Configure a GenAI provider with `embeddings` in its `roles`.
+2. Set the semantic search model to the GenAI config key (e.g. `default`).
+3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ------------------------------------------------ | ---------------------------------------------------------------------------------------------- |
+| **Semantic search model or GenAI provider name** | Set to the GenAI config key (e.g. `default`) to use a configured GenAI provider for embeddings |
+
+The GenAI provider must also be configured with the `embeddings` role under .
+
+
+
```yaml
genai:
@@ -102,6 +165,9 @@ semantic_search:
model: default
```
+
+
+
The llama.cpp server must be started with `--embeddings` for the embeddings API, and a multi-modal embeddings model. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for details.
:::note
@@ -114,6 +180,19 @@ Switching between Jina models and a GenAI provider requires reindexing. Embeddin
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
+
+
+
+Navigate to .
+
+| Field | Description |
+| -------------- | ---------------------------------------------------------------------- |
+| **Model size** | Set to `large` to enable GPU acceleration |
+| **Device** | (Optional) Specify a GPU device index in a multi-GPU system (e.g. `0`) |
+
+
+
+
```yaml
semantic_search:
enabled: True
@@ -122,6 +201,9 @@ semantic_search:
device: 0
```
+
+
+
:::info
If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU will be detected and used automatically.
@@ -153,16 +235,15 @@ Semantic Search must be enabled to use Triggers.
### Configuration
-Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires - `notification`, `sub_label`, and `attribute`.
+Triggers are defined within the `semantic_search` configuration for each camera. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires - `notification`, `sub_label`, and `attribute`.
Triggers are best configured through the Frigate UI.
#### Managing Triggers in the UI
-1. Navigate to the **Settings** page and select the **Triggers** tab.
-2. Choose a camera from the dropdown menu to view or manage its triggers.
-3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one.
-4. In the **Create Trigger** wizard:
+1. Navigate to and select a camera from the dropdown menu.
+2. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one.
+3. In the **Create Trigger** wizard:
- Enter a **Name** for the trigger (e.g., "Red Car Alert").
- Enter a descriptive **Friendly Name** for the trigger (e.g., "Red car on the driveway camera").
- Select the **Type** (`Thumbnail` or `Description`).
@@ -173,14 +254,14 @@ Triggers are best configured through the Frigate UI.
If native webpush notifications are enabled, check the `Send Notification` box to send a notification.
Check the `Add Sub Label` box to add the trigger's friendly name as a sub label to any triggering tracked objects.
Check the `Add Attribute` box to add the trigger's internal ID (e.g., "red_car_alert") to a data attribute on the tracked object that can be processed via the API or MQTT.
-5. Save the trigger to update the configuration and store the embedding in the database.
+4. Save the trigger to update the configuration and store the embedding in the database.
When a trigger fires, the UI highlights the trigger with a blue dot for 3 seconds for easy identification. Additionally, the UI will show the last date/time and tracked object ID that activated your trigger. The last triggered timestamp is not saved to the database or persisted through restarts of Frigate.
### Usage and Best Practices
1. **Thumbnail Triggers**: Select a representative image (event ID) from the Explore page that closely matches the object you want to detect. For best results, choose images where the object is prominent and fills most of the frame.
-2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object’s description. Avoid vague terms to improve matching accuracy.
+2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object's description. Avoid vague terms to improve matching accuracy.
3. **Threshold Tuning**: Adjust the threshold to balance sensitivity and specificity. A higher threshold (e.g., 0.8) requires closer matches, reducing false positives but potentially missing similar objects. A lower threshold (e.g., 0.6) is more inclusive but may trigger more often.
4. **Using Explore**: Use the context menu or right-click / long-press on a tracked object in the Grid View in Explore to quickly add a trigger based on the tracked object's thumbnail.
5. **Editing triggers**: For the best experience, triggers should be edited via the UI. However, Frigate will ensure triggers edited in the config will be synced with triggers created and edited in the UI.
@@ -195,6 +276,6 @@ When a trigger fires, the UI highlights the trigger with a blue dot for 3 second
#### Why can't I create a trigger on thumbnails for some text, like "person with a blue shirt" and have it trigger when a person with a blue shirt is detected?
-TL;DR: Text-to-image triggers aren’t supported because CLIP can confuse similar images and give inconsistent scores, making automation unreliable. The same word–image pair can give different scores and the score ranges can be too close together to set a clear cutoff.
+TL;DR: Text-to-image triggers aren't supported because CLIP can confuse similar images and give inconsistent scores, making automation unreliable. The same word-image pair can give different scores and the score ranges can be too close together to set a clear cutoff.
-Text-to-image triggers are not supported due to fundamental limitations of CLIP-based similarity search. While CLIP works well for exploratory, manual queries, it is unreliable for automated triggers based on a threshold. Issues include embedding drift (the same text–image pair can yield different cosine distances over time), lack of true semantic grounding (visually similar but incorrect matches), and unstable thresholding (distance distributions are dataset-dependent and often too tightly clustered to separate relevant from irrelevant results). Instead, it is recommended to set up a workflow with thumbnail triggers: first use text search to manually select 3–5 representative reference tracked objects, then configure thumbnail triggers based on that visual similarity. This provides robust automation without the semantic ambiguity of text to image matching.
+Text-to-image triggers are not supported due to fundamental limitations of CLIP-based similarity search. While CLIP works well for exploratory, manual queries, it is unreliable for automated triggers based on a threshold. Issues include embedding drift (the same text-image pair can yield different cosine distances over time), lack of true semantic grounding (visually similar but incorrect matches), and unstable thresholding (distance distributions are dataset-dependent and often too tightly clustered to separate relevant from irrelevant results). Instead, it is recommended to set up a workflow with thumbnail triggers: first use text search to manually select 3-5 representative reference tracked objects, then configure thumbnail triggers based on that visual similarity. This provides robust automation without the semantic ambiguity of text to image matching.
diff --git a/docs/docs/configuration/snapshots.md b/docs/docs/configuration/snapshots.md
index 2f339b210..675e68a9c 100644
--- a/docs/docs/configuration/snapshots.md
+++ b/docs/docs/configuration/snapshots.md
@@ -3,19 +3,134 @@ id: snapshots
title: Snapshots
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `--clean.webp`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx)
Snapshots are accessible in the UI in the Explore pane. This allows for quick submission to the Frigate+ service.
To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones)
-Snapshots sent via MQTT are configured in the [config file](/configuration) under `cameras -> your_camera -> mqtt`
+Snapshots sent via MQTT are configured separately under the camera MQTT settings, not here.
+
+## Enabling Snapshots
+
+Enable snapshot saving and configure the default settings that apply to all cameras.
+
+
+
+
+Navigate to .
+
+- Set **Enable snapshots** to on
+
+
+
+
+```yaml
+snapshots:
+ enabled: True
+```
+
+
+
+
+To override snapshot settings for a specific camera:
+
+
+
+
+Navigate to and select your camera.
+
+- Set **Enable snapshots** to on
+
+
+
+
+```yaml
+cameras:
+ front_door:
+ snapshots:
+ enabled: True
+```
+
+
+
+
+## Snapshot Options
+
+Configure how snapshots are rendered and stored. These settings control the defaults applied when snapshots are requested via the API.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ------------------------ | ------------------------------------------------------------------------------ |
+| **Enable snapshots** | Enable or disable saving snapshots for tracked objects |
+| **Timestamp overlay** | Overlay a timestamp on snapshots from API |
+| **Bounding box overlay** | Draw bounding boxes for tracked objects on snapshots from API |
+| **Crop snapshot** | Crop snapshots from API to the detected object's bounding box |
+| **Snapshot height** | Height in pixels to resize snapshots to; leave empty to preserve original size |
+| **Snapshot quality** | Encode quality for saved snapshots (0-100) |
+| **Required zones** | Zones an object must enter for a snapshot to be saved |
+
+
+
+
+```yaml
+snapshots:
+ enabled: True
+ timestamp: False
+ bounding_box: True
+ crop: False
+ height: 175
+ required_zones: []
+ quality: 60
+```
+
+
+
+
+## Snapshot Retention
+
+Configure how long snapshots are retained on disk. Per-object retention overrides allow different retention periods for specific object types.
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| -------------------------------------------------- | ----------------------------------------------------------------------------------- |
+| **Snapshot retention > Default retention** | Number of days to retain snapshots (default: 10) |
+| **Snapshot retention > Retention mode** | Retention mode: `all`, `motion`, or `active_objects` |
+| **Snapshot retention > Object retention > Person** | Per-object overrides for retention days (e.g., keep `person` snapshots for 15 days) |
+
+
+
+
+```yaml
+snapshots:
+ enabled: True
+ retain:
+ default: 10
+ mode: motion
+ objects:
+ person: 15
+```
+
+
+
## Frame Selection
Frigate does not save every frame. It picks a single "best" frame for each tracked object based on detection confidence, object size, and the presence of key attributes like faces or license plates. Frames where the object touches the edge of the frame are deprioritized. That best frame is written to disk once tracking ends.
-MQTT snapshots are published more frequently — each time a better thumbnail frame is found during tracking, or when the current best image is older than `best_image_timeout` (default: 60s). These use their own annotation settings configured under `cameras -> your_camera -> mqtt`.
+MQTT snapshots are published more frequently — each time a better thumbnail frame is found during tracking, or when the current best image is older than `best_image_timeout` (default: 60s). These use their own annotation settings configured under the camera MQTT settings.
## Rendering
@@ -28,4 +143,4 @@ Frigate stores a single clean snapshot on disk:
| `/api/events//snapshot-clean.webp` | Returns the same stored snapshot without annotations |
| [Frigate+](/plus/first_model) submission | Uses the same stored clean snapshot |
-MQTT snapshots are configured separately under `cameras -> your_camera -> mqtt` and are unrelated to the stored event snapshot.
+MQTT snapshots are configured separately under the camera MQTT settings and are unrelated to the stored event snapshot.
diff --git a/docs/docs/configuration/stationary_objects.md b/docs/docs/configuration/stationary_objects.md
index 341d1ea57..63d03374c 100644
--- a/docs/docs/configuration/stationary_objects.md
+++ b/docs/docs/configuration/stationary_objects.md
@@ -1,14 +1,29 @@
# Stationary Objects
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
An object is considered stationary when it is being tracked and has been in a very similar position for a certain number of frames. This number is defined in the configuration under `detect -> stationary -> threshold`, and is 10x the frame rate (or 10 seconds) by default. Once an object is considered stationary, it will remain stationary until motion occurs within the object at which point object detection will start running again. If the object changes location, it will be considered active.
## Why does it matter if an object is stationary?
-Once an object becomes stationary, object detection will not be continually run on that object. This serves to reduce resource usage and redundant detections when there has been no motion near the tracked object. This also means that Frigate is contextually aware, and can for example [filter out recording segments](record.md#what-do-the-different-retain-modes-mean) to only when the object is considered active. Motion alone does not determine if an object is "active" for active_objects segment retention. Lighting changes for a parked car won't make an object active.
+Once an object becomes stationary, object detection will not be continually run on that object. This serves to reduce resource usage and redundant detections when there has been no motion near the tracked object. This also means that Frigate is contextually aware, and can for example [filter out recording segments](record.md#configuring-recording-retention) to only when the object is considered active. Motion alone does not determine if an object is "active" for active_objects segment retention. Lighting changes for a parked car won't make an object active.
## Tuning stationary behavior
-The default config is:
+Configure how Frigate handles stationary objects.
+
+
+
+
+Navigate to .
+
+- Set **Stationary objects config > Stationary interval** to the frequency for running detection on stationary objects (default: 50). Once stationary, detection runs every nth frame to verify the object is still present. There is no way to disable stationary object tracking with this value.
+- Set **Stationary objects config > Stationary threshold** to the number of frames an object must remain relatively still before it is considered stationary (default: 50)
+
+
+
```yaml
detect:
@@ -17,11 +32,8 @@ detect:
threshold: 50
```
-`interval` is defined as the frequency for running detection on stationary objects. This means that by default once an object is considered stationary, detection will not be run on it until motion is detected or until the interval (every 50th frame by default). With `interval >= 1`, every nth frames detection will be run to make sure the object is still there.
-
-NOTE: There is no way to disable stationary object tracking with this value.
-
-`threshold` is the number of frames an object needs to remain relatively still before it is considered stationary.
+
+
## Why does Frigate track stationary objects?
diff --git a/docs/docs/configuration/tls.md b/docs/docs/configuration/tls.md
index b4bfc1842..9757a7816 100644
--- a/docs/docs/configuration/tls.md
+++ b/docs/docs/configuration/tls.md
@@ -3,19 +3,36 @@ id: tls
title: TLS
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
# TLS
Frigate's integrated NGINX server supports TLS certificates. By default Frigate will generate a self signed certificate that will be used for port 8971. Frigate is designed to make it easy to use whatever tool you prefer to manage certificates.
Frigate is often running behind a reverse proxy that manages TLS certificates for multiple services. You will likely need to set your reverse proxy to allow self signed certificates or you can disable TLS in Frigate's config. However, if you are running on a dedicated device that's separate from your proxy or if you expose Frigate directly to the internet, you may want to configure TLS with valid certificates.
-In many deployments, TLS will be unnecessary. It can be disabled in the config with the following yaml:
+In many deployments, TLS will be unnecessary. Disable it as follows:
+
+
+
+
+Navigate to .
+
+- Set **Enable TLS** to off if running behind a reverse proxy that handles TLS (default: on)
+
+
+
```yaml
tls:
enabled: False
```
+
+
+
## Certificates
TLS certificates can be mounted at `/etc/letsencrypt/live/frigate` using a bind mount or docker volume.
diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md
index ba86b4a86..2cb3c8ebe 100644
--- a/docs/docs/configuration/zones.md
+++ b/docs/docs/configuration/zones.md
@@ -3,6 +3,10 @@ id: zones
title: Zones
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Presence in a zone is evaluated based on the bottom center of the bounding box for the object. It does not matter how much of the bounding box overlaps with the zone.
For example, the cat in this image is currently in Zone 1, but **not** Zone 2.
@@ -16,11 +20,51 @@ Zones can be toggled on or off without removing them from the configuration. Dis
During testing, enable the Zones option for the Debug view of your camera (Settings --> Debug) so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.
-To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead.
+## Creating a Zone
+
+
+
+
+1. Navigate to and select the desired camera.
+2. Under the **Zones** section, click the plus icon to add a new zone.
+3. Click on the camera's latest image to create the points for the zone boundary. Click the first point again to close the polygon.
+4. Configure zone options such as **Friendly name**, **Objects**, **Loitering time**, and **Inertia** in the zone editor.
+5. Press **Save** when finished.
+
+
+
+
+Follow [the steps for creating a mask](masks.md), but use the zone section of the web UI instead. Alternatively, define zones directly in your configuration file:
+
+```yaml
+cameras:
+ name_of_your_camera:
+ zones:
+ entire_yard:
+ friendly_name: Entire yard
+ coordinates: 0.123,0.456,0.789,0.012,...
+```
+
+
+
### Restricting alerts and detections to specific zones
-Often you will only want alerts to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to have an alert created when an object enters your entire_yard zone, the config would be:
+Often you will only want alerts to be created when an object enters areas of interest. This is done by combining zones with required zones for review items.
+
+To create an alert only when an object enters the `entire_yard` zone:
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| ---------------------------------- | ----------------------------------------------------------------------------------------- |
+| **Alerts config > Required zones** | Zones that an object must enter to be considered an alert; leave empty to allow any zone. |
+
+
+
```yaml {6,8}
cameras:
@@ -35,7 +79,23 @@ cameras:
coordinates: ...
```
-You may also want to filter detections to only be created when an object enters a secondary area of interest. This is done using zones along with setting required_zones. Let's say you want alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard, the config would be
+
+
+
+You may also want to filter detections to only be created when an object enters a secondary area of interest. For example, to trigger alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard:
+
+
+
+
+Navigate to .
+
+| Field | Description |
+| -------------------------------------- | -------------------------------------------------------------------------------------------- |
+| **Alerts config > Required zones** | Zones that an object must enter to be considered an alert; leave empty to allow any zone. |
+| **Detections config > Required zones** | Zones that an object must enter to be considered a detection; leave empty to allow any zone. |
+
+
+
```yaml
cameras:
@@ -56,8 +116,22 @@ cameras:
coordinates: ...
```
+
+
+
### Restricting snapshots to specific zones
+To only save snapshots when an object enters a specific zone:
+
+
+
+
+1. Navigate to and select your camera.
+ - Set **Required zones** to `entire_yard`
+
+
+
+
```yaml
cameras:
name_of_your_camera:
@@ -70,9 +144,24 @@ cameras:
coordinates: ...
```
+
+
+
### Restricting zones to specific objects
-Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example will limit one zone to person objects and the other to cars.
+Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example limits one zone to person objects and the other to cars.
+
+
+
+
+1. Navigate to and select the desired camera.
+2. Create a zone named `entire_yard` covering everywhere you want to track a person.
+ - Under **Objects**, add `person`
+3. Create a second zone named `front_yard_street` covering just the street.
+ - Under **Objects**, add `car`
+
+
+
```yaml
cameras:
@@ -88,6 +177,9 @@ cameras:
- car
```
+
+
+
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
### Zone Loitering
@@ -103,6 +195,17 @@ When using loitering zones, a review item will behave in the following way:
:::
+
+
+
+1. Navigate to and select the desired camera.
+2. Edit or create the zone (e.g., `sidewalk`).
+ - Set **Loitering time** to the desired number of seconds (e.g., `4`)
+ - Under **Objects**, add the relevant object types (e.g., `person`)
+
+
+
+
```yaml
cameras:
name_of_your_camera:
@@ -114,9 +217,22 @@ cameras:
- person
```
+
+
+
### Zone Inertia
-Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames. This value can be configured:
+Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames.
+
+
+
+
+1. Navigate to and select the desired camera.
+2. Edit or create the zone (e.g., `front_yard`).
+ - Set **Inertia** to the desired number of consecutive frames (e.g., `3`)
+
+
+
```yaml
cameras:
@@ -129,8 +245,21 @@ cameras:
- person
```
+
+
+
There may also be cases where you expect an object to quickly enter and exit a zone, like when a car is pulling into the driveway, and you may want to have the object be considered present in the zone immediately:
+
+
+
+1. Navigate to and select the desired camera.
+2. Edit or create the zone (e.g., `driveway_entrance`).
+ - Set **Inertia** to `1`
+
+
+
+
```yaml
cameras:
name_of_your_camera:
@@ -142,6 +271,9 @@ cameras:
- car
```
+
+
+
### Speed Estimation
Frigate can be configured to estimate the speed of objects moving through a zone. This works by combining data from Frigate's object tracker and "real world" distance measurements of the edges of the zone. The recommended use case for this feature is to track the speed of vehicles on a road as they move through the zone.
@@ -152,7 +284,19 @@ Your zone must be defined with exactly 4 points and should be aligned to the gro
Speed estimation requires a minimum number of frames for your object to be tracked before a valid estimate can be calculated, so create your zone away from places where objects enter and exit for the best results. The object's bounding box must be stable and remain a constant size as it enters and exits the zone. _Your zone should not take up the full frame, and the zone does **not** need to be the same size or larger than the objects passing through it._ An object's speed is tracked while it passes through the zone and then saved to Frigate's database.
-Accurate real-world distance measurements are required to estimate speeds. These distances can be specified in your zone config through the `distances` field.
+Accurate real-world distance measurements are required to estimate speeds. These distances can be specified through the `distances` field. Each number represents the real-world distance between consecutive points in the `coordinates` list. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI.
+
+
+
+
+1. Navigate to and select the desired camera.
+2. Create or edit a zone with exactly 4 points aligned to the ground plane.
+3. In the zone editor, enter the real-world **Distances** between each pair of consecutive points.
+ - For example, if the distance between the first and second points is 10 meters, between the second and third is 12 meters, etc.
+4. Distances are measured in meters (metric) or feet (imperial), depending on the **Unit system** setting.
+
+
+
```yaml
cameras:
@@ -163,16 +307,34 @@ cameras:
distances: 10,12,11,13.5 # in meters or feet
```
-Each number in the `distance` field represents the real-world distance between the points in the `coordinates` list. So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI.
+So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on.
+
+
+
The `distance` values are measured in meters (metric) or feet (imperial), depending on how `unit_system` is configured in your `ui` config:
+
+
+
+Navigate to .
+
+| Field | Description |
+| --------------- | -------------------------------------------------------------------- |
+| **Unit system** | Set to `metric` (kilometers per hour) or `imperial` (miles per hour) |
+
+
+
+
```yaml
ui:
# can be "metric" or "imperial", default is metric
unit_system: metric
```
+
+
+
The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents).
These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph). For miles per hour, set `unit_system` to `imperial`. For kilometers per hour, set `unit_system` to `metric`.
@@ -191,6 +353,17 @@ These speed values are output as a number in miles per hour (mph) or kilometers
Zones can be configured with a minimum speed requirement, meaning an object must be moving at or above this speed to be considered inside the zone. Zone `distances` must be defined as described above.
+
+
+
+1. Navigate to and select the desired camera.
+2. Edit or create the zone with distances configured.
+ - Set **Speed threshold** to the desired minimum speed (e.g., `20`)
+ - The unit is kph or mph, depending on the **Unit system** setting
+
+
+
+
```yaml
cameras:
name_of_your_camera:
@@ -202,3 +375,6 @@ cameras:
# highlight-next-line
speed_threshold: 20 # unit is in kph or mph, depending on how unit_system is set (see above)
```
+
+
+
diff --git a/docs/docs/frigate/camera_setup.md b/docs/docs/frigate/camera_setup.md
index 64c650c13..4cb56dc50 100644
--- a/docs/docs/frigate/camera_setup.md
+++ b/docs/docs/frigate/camera_setup.md
@@ -34,7 +34,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
- Encode Mode: H.264
- Resolution: 2688\*1520
- Frame Rate(FPS): 15
-- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](/configuration/live#camera_settings_recommendations) for more info)
+- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](/configuration/live#camera-settings-recommendations) for more info)
**Sub Stream (Detection)**
diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md
index 86afbfa53..afbd95aaf 100644
--- a/docs/docs/frigate/hardware.md
+++ b/docs/docs/frigate/hardware.md
@@ -95,7 +95,7 @@ Frigate supports multiple different detectors that work on different types of ha
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs to provide efficient object detection.
- - [Supports limited model architectures](../../configuration/object_detectors#choosing-a-model)
+ - [Supports limited model architectures](../../configuration/object_detectors#rockchip-supported-models)
- Runs best with tiny or small size models
- Runs efficiently on low power hardware
@@ -263,7 +263,7 @@ Inference speeds may vary depending on the host platform. The above data was mea
### Nvidia Jetson
-Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
+Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md
index a115ecf97..2f2e55fa0 100644
--- a/docs/docs/frigate/installation.md
+++ b/docs/docs/frigate/installation.md
@@ -271,7 +271,7 @@ If you are using `docker run`, add this option to your command `--device /dev/ha
#### Configuration
-Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup.
+Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8) to complete the setup.
### MemryX MX3
diff --git a/docs/docs/frigate/updating.md b/docs/docs/frigate/updating.md
index 841a3e2d5..a4dfb7f0a 100644
--- a/docs/docs/frigate/updating.md
+++ b/docs/docs/frigate/updating.md
@@ -5,7 +5,7 @@ title: Updating
# Updating Frigate
-The current stable version of Frigate is **0.17.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.17.0).
+The current stable version of Frigate is **0.18.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.18.0).
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant App, etc.). Below are instructions for the most common setups.
@@ -31,21 +31,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
2. **Update and Pull the Latest Image**:
- If using Docker Compose:
- - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.4`). For example:
+ - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.18.0` instead of `0.17.1`). For example:
```yaml
services:
frigate:
- image: ghcr.io/blakeblackshear/frigate:0.17.0
+ image: ghcr.io/blakeblackshear/frigate:0.18.0
```
- Then pull the image:
```bash
- docker pull ghcr.io/blakeblackshear/frigate:0.17.0
+ docker pull ghcr.io/blakeblackshear/frigate:0.18.0
```
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
- If using `docker run`:
- - Pull the image with the appropriate tag (e.g., `0.17.0`, `0.17.0-tensorrt`, or `stable`):
+ - Pull the image with the appropriate tag (e.g., `0.18.0`, `0.18.0-tensorrt`, or `stable`):
```bash
- docker pull ghcr.io/blakeblackshear/frigate:0.17.0
+ docker pull ghcr.io/blakeblackshear/frigate:0.18.0
```
3. **Start the Container**:
@@ -77,6 +77,7 @@ For users running Frigate as a Home Assistant App:
- If an update is available, you’ll see an "Update" button.
2. **Update the App**:
+ - Make a backup of the current version of the app.
- Click the "Update" button next to the Frigate app.
- Wait for the process to complete. Home Assistant will handle downloading and installing the new version.
@@ -99,7 +100,7 @@ If an update causes issues:
1. Stop Frigate.
2. Restore your backed-up config file and database.
3. Revert to the previous image version:
- - For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`) in your `docker run` command.
+ - For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.17.1`) in your `docker run` command.
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`), and re-run `docker compose up -d`.
- For Home Assistant: Restore from the app/addon backup you took before you updated.
4. Verify the old version is running again.
diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md
index 4d632fdd6..26fb26644 100644
--- a/docs/docs/guides/configuring_go2rtc.md
+++ b/docs/docs/guides/configuring_go2rtc.md
@@ -17,7 +17,7 @@ First, you will want to configure go2rtc to connect to your camera stream by add
For the best experience, you should set the stream name under `go2rtc` to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera.
-See [the live view docs](../configuration/live.md#setting-stream-for-live-ui) for more information.
+See [the live view docs](../configuration/live.md#setting-streams-for-live-ui) for more information.
:::
diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md
index 30f4ce016..cd456f201 100644
--- a/docs/docs/guides/getting_started.md
+++ b/docs/docs/guides/getting_started.md
@@ -3,6 +3,10 @@ id: getting_started
title: Getting started
---
+import ConfigTabs from "@site/src/components/ConfigTabs";
+import TabItem from "@theme/TabItem";
+import NavPath from "@site/src/components/NavPath";
+
# Getting Started
:::tip
@@ -85,7 +89,7 @@ This section shows how to create a minimal directory structure for a Docker inst
### Setup directories
-Frigate will create a config file if one does not exist on the initial startup. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation.
+Frigate will create a config file if one does not exist on the initial startup. The following directory structure is the bare minimum to get started.
```
.
@@ -128,7 +132,7 @@ services:
- "8554:8554" # RTSP feeds
```
-Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. On startup, an admin user and password will be created and outputted in the logs. You can see this by running `docker logs frigate`. Frigate should now be accessible at `https://server_ip:8971` where you can login with the `admin` user and finish the configuration using the built-in configuration editor.
+Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. On startup, an admin user and password will be created and outputted in the logs. You can see this by running `docker logs frigate`. Frigate should now be accessible at `https://server_ip:8971` where you can login with the `admin` user and finish configuration using the Settings UI.
## Configuring Frigate
@@ -140,15 +144,15 @@ At this point you should be able to start Frigate and a basic config will be cre
### Step 2: Add a camera
-You can click the `Add Camera` button to use the camera setup wizard to get your first camera added into Frigate.
+Click the **Add Camera** button in to use the camera setup wizard to get your first camera added into Frigate.
### Step 3: Configure hardware acceleration (recommended)
-Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) config reference for examples applicable to your hardware.
+Now that you have a working camera configuration, set up hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) docs for examples applicable to your hardware.
-Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md):
+:::note
-`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
+Hardware acceleration requires passing the appropriate device to the Docker container. For Intel and AMD GPUs, add the device to your `docker-compose.yml`:
```yaml {4,5}
services:
@@ -159,7 +163,17 @@ services:
...
```
-`config.yml`
+After modifying, run `docker compose up -d` to apply changes.
+
+:::
+
+
+
+
+Navigate to and set **Hardware acceleration arguments** to the appropriate preset for your hardware (e.g., `VAAPI (Intel/AMD GPU)` for most Intel processors).
+
+
+
```yaml
mqtt: ...
@@ -173,6 +187,9 @@ cameras:
detect: ...
```
+
+
+
### Step 4: Configure detectors
By default, Frigate will use a single CPU detector.
@@ -184,6 +201,24 @@ In many cases, the integrated graphics on Intel CPUs provides sufficient perform
You need to refer to **Configure hardware acceleration** above to enable the container to use the GPU.
+
+
+
+1. Navigate to and add a detector with **Type** `OpenVINO` and **Device** `GPU`
+2. Navigate to and configure the model settings for OpenVINO:
+
+| Field | Value |
+| ---------------------------------------- | ------------------------------------------ |
+| **Object detection model input width** | `300` |
+| **Object detection model input height** | `300` |
+| **Model Input Tensor Shape** | `nhwc` |
+| **Model Input Pixel Color Format** | `bgr` |
+| **Custom object detector model path** | `/openvino-model/ssdlite_mobilenet_v2.xml` |
+| **Label map for custom object detector** | `/openvino-model/coco_91cl_bkgr.txt` |
+
+
+
+
```yaml {3-6,9-15,20-21}
mqtt: ...
@@ -209,6 +244,9 @@ cameras:
...
```
+
+
+
If you have a USB Coral, you will need to add a detectors section to your config.
@@ -216,7 +254,9 @@ If you have a USB Coral, you will need to add a detectors section to your config
Use USB Coral detector
-`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
+:::note
+
+You need to pass the USB Coral device to the Docker container. Add the following to your `docker-compose.yml` and run `docker compose up -d`:
```yaml {4-6}
services:
@@ -228,6 +268,16 @@ services:
...
```
+:::
+
+
+
+
+Navigate to and add a detector with **Type** `EdgeTPU` and **Device** `usb`.
+
+
+
+
```yaml {3-6,11-12}
mqtt: ...
@@ -244,17 +294,20 @@ cameras:
...
```
+
+
+
More details on available detectors can be found [here](../configuration/object_detectors.md).
-Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/reference.md).
+Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they can be configured in or via the [configuration file reference](../configuration/reference.md).
### Step 5: Setup motion masks
-Now that you have optimized your configuration for decoding the video stream, you will want to check to see where to implement motion masks. To do this, navigate to the camera in the UI, select "Debug" at the top, and enable "Motion boxes" in the options below the video feed. Watch for areas that continuously trigger unwanted motion to be detected. Common areas to mask include camera timestamps and trees that frequently blow in the wind. The goal is to avoid wasting object detection cycles looking at these areas.
+Now that you have optimized your configuration for decoding the video stream, you will want to check to see where to implement motion masks. Click on the camera from the main dashboard, then select the gear icon in the top right, enable Debug View, and finally enable the switch for Motion Boxes. Watch for areas that continuously trigger unwanted motion to be detected. Common areas to mask include camera timestamps and trees that frequently blow in the wind. The goal is to avoid wasting object detection cycles looking at these areas.
-Now that you know where you need to mask, use the "Mask & Zone creator" in the options pane to generate the coordinates needed for your config file. More information about masks can be found [here](../configuration/masks.md).
+Use the mask editor to draw polygon masks directly on the camera feed. Navigate to and set up a motion mask over the area. More information about masks can be found [here](../configuration/masks.md).
:::warning
@@ -262,7 +315,7 @@ Note that motion masks should not be used to mark out areas where you do not wan
:::
-Your configuration should look similar to this now.
+If you are using YAML to configure Frigate instead of the UI, your configuration should look similar to this now:
```yaml {16-18}
mqtt:
@@ -292,7 +345,14 @@ cameras:
In order to review activity in the Frigate UI, recordings need to be enabled.
-To enable recording video, add the `record` role to a stream and enable it in the config. If record is disabled in the config, it won't be possible to enable it in the UI.
+
+
+
+1. If you have separate streams for detect and record, navigate to , select your camera, and add a second input with the `record` role pointing to your high-resolution stream
+2. Navigate to (or for a specific camera) and set **Enable recording** to on
+
+
+
```yaml {16-17}
mqtt: ...
@@ -315,6 +375,9 @@ cameras:
motion: ...
```
+
+
+
If you don't have separate streams for detect and record, you would just add the record role to the list on the first input.
:::note
diff --git a/docs/scripts/README.md b/docs/scripts/README.md
new file mode 100644
index 000000000..347536a07
--- /dev/null
+++ b/docs/scripts/README.md
@@ -0,0 +1,184 @@
+# Documentation Scripts
+
+## generate_ui_tabs.py
+
+Automatically generates "Frigate UI" tab content for documentation files based on the YAML config examples already in the docs.
+
+Instead of manually writing UI instructions for every YAML block, this script reads three data sources from the codebase and generates the UI tabs:
+
+1. **JSON Schema** (from Pydantic config models) -- field names, types, defaults
+2. **i18n translation files** -- the exact labels shown in the Settings UI
+3. **Section mappings** (from Settings.tsx) -- config key to UI navigation path
+
+### Prerequisites
+
+Run from the repository root. The script imports Frigate's Python config models directly, so the `frigate` package must be importable:
+
+```bash
+# From repo root -- no extra install needed if your environment can import frigate
+python3 docs/scripts/generate_ui_tabs.py --help
+```
+
+### Usage
+
+#### Preview (default)
+
+Shows what would be generated for each bare YAML block, without modifying any files:
+
+```bash
+# Single file
+python3 docs/scripts/generate_ui_tabs.py docs/docs/configuration/record.md
+
+# All config docs
+python3 docs/scripts/generate_ui_tabs.py docs/docs/configuration/
+```
+
+#### Inject
+
+Wraps bare YAML blocks with `` and inserts the generated UI tab. Also adds the required imports (`ConfigTabs`, `TabItem`, `NavPath`) after the frontmatter if missing.
+
+Already-wrapped blocks are skipped (idempotent).
+
+```bash
+python3 docs/scripts/generate_ui_tabs.py --inject docs/docs/configuration/record.md
+```
+
+#### Check
+
+Compares existing UI tabs against what the script would generate from the current schema and i18n files. Prints a unified diff for each drifted block and exits with code 1 if any drift is found.
+
+Use this in CI to catch stale docs after schema or i18n changes.
+
+```bash
+python3 docs/scripts/generate_ui_tabs.py --check docs/docs/configuration/
+```
+
+#### Regenerate
+
+Replaces the UI tab content in existing `` blocks with freshly generated content. The YAML tab is preserved exactly as-is. Only blocks that have actually changed are rewritten.
+
+```bash
+# Preview changes without writing
+python3 docs/scripts/generate_ui_tabs.py --regenerate --dry-run docs/docs/configuration/
+
+# Apply changes
+python3 docs/scripts/generate_ui_tabs.py --regenerate docs/docs/configuration/
+```
+
+#### Output to directory (`--outdir`)
+
+Write generated files to a separate directory instead of modifying the originals. The source directory structure is mirrored. Files without changes are copied as-is so the output is a complete snapshot suitable for diffing.
+
+Works with `--inject` and `--regenerate`.
+
+```bash
+# Generate into a named directory
+python3 docs/scripts/generate_ui_tabs.py --inject --outdir /tmp/generated docs/docs/configuration/
+
+# Then diff original vs generated
+diff -rq docs/docs/configuration/ /tmp/generated/
+
+# Or let an AI agent compare them
+diff -ru docs/docs/configuration/record.md /tmp/generated/record.md
+```
+
+This is useful for AI agents that need to review the generated output before applying it, or for previewing what `--inject` or `--regenerate` would do across an entire directory.
+
+#### Verbose mode
+
+Add `-v` to any mode for detailed diagnostics (skipped blocks, reasons, unchanged blocks):
+
+```bash
+python3 docs/scripts/generate_ui_tabs.py -v docs/docs/configuration/
+```
+
+### Typical workflow
+
+```bash
+# 1. Preview what would be generated (output to temp dir, originals untouched)
+python3 docs/scripts/generate_ui_tabs.py --inject --outdir /tmp/ui-preview docs/docs/configuration/
+# Compare: diff -ru docs/docs/configuration/ /tmp/ui-preview/
+
+# 2. Apply: inject UI tabs into the actual docs
+python3 docs/scripts/generate_ui_tabs.py --inject docs/docs/configuration/
+
+# 3. Review and hand-edit where needed (the script gets you 90% there)
+
+# 4. Later, after schema or i18n changes, check for drift
+python3 docs/scripts/generate_ui_tabs.py --check docs/docs/configuration/
+
+# 5. If drifted, preview then regenerate
+python3 docs/scripts/generate_ui_tabs.py --regenerate --outdir /tmp/ui-regen docs/docs/configuration/
+# Compare: diff -ru docs/docs/configuration/ /tmp/ui-regen/
+
+# 6. Apply regeneration
+python3 docs/scripts/generate_ui_tabs.py --regenerate docs/docs/configuration/
+```
+
+### How it decides what to generate
+
+The script detects two patterns from the YAML block content:
+
+**Pattern A -- Field table.** When the YAML has inline comments (e.g., `# <- description`), the script generates a markdown table with field names and descriptions:
+
+```markdown
+Navigate to .
+
+| Field | Description |
+|-------|-------------|
+| **Continuous retention > Retention days** | Days to retain recordings. |
+| **Motion retention > Retention days** | Days to retain recordings. |
+```
+
+**Pattern B -- Set instructions.** When the YAML has concrete values without comments, the script generates step-by-step instructions:
+
+```markdown
+Navigate to .
+
+- Set **Enable recording** to on
+- Set **Continuous retention > Retention days** to `3`
+- Set **Alert retention > Event retention > Retention days** to `30`
+- Set **Alert retention > Event retention > Retention mode** to `all`
+```
+
+**Camera-level config** is auto-detected when the YAML is nested under `cameras:`. The output uses a generic camera reference rather than the example camera name from the YAML:
+
+```markdown
+1. Navigate to and select your camera.
+ - Set **Enable recording** to on
+ - Set **Continuous retention > Retention days** to `5`
+```
+
+### What gets skipped
+
+- YAML blocks already inside `` (for `--inject`)
+- YAML blocks whose top-level key is not a known config section (e.g., `go2rtc`, `docker-compose`, `scrape_configs`)
+- Fields listed in `hiddenFields` in the section configs (e.g., `enabled_in_config`)
+
+### File structure
+
+```
+docs/scripts/
+├── generate_ui_tabs.py # CLI entry point
+├── README.md # This file
+└── lib/
+ ├── __init__.py
+ ├── schema_loader.py # Loads JSON schema from Pydantic models
+ ├── i18n_loader.py # Loads i18n translation JSON files
+ ├── section_config_parser.py # Parses TS section configs (hiddenFields, etc.)
+ ├── yaml_extractor.py # Extracts YAML blocks and ConfigTabs from markdown
+ ├── ui_generator.py # Generates UI tab markdown content
+ └── nav_map.py # Maps config sections to Settings UI nav paths
+```
+
+### Data sources
+
+| Source | Path | What it provides |
+|--------|------|------------------|
+| Pydantic models | `frigate/config/` | Field names, types, defaults, nesting |
+| JSON schema | Generated from Pydantic at runtime | Full schema with `$defs` and `$ref` |
+| i18n (global) | `web/public/locales/en/config/global.json` | Field labels for global settings |
+| i18n (cameras) | `web/public/locales/en/config/cameras.json` | Field labels for camera settings |
+| i18n (menu) | `web/public/locales/en/views/settings.json` | Sidebar menu labels |
+| Section configs | `web/src/components/config-form/section-configs/*.ts` | Hidden fields, advanced fields, field order |
+| Navigation map | Hardcoded from `web/src/pages/Settings.tsx` | Config section to UI path mapping |
diff --git a/docs/scripts/generate_ui_tabs.py b/docs/scripts/generate_ui_tabs.py
new file mode 100644
index 000000000..fa468922c
--- /dev/null
+++ b/docs/scripts/generate_ui_tabs.py
@@ -0,0 +1,660 @@
+#!/usr/bin/env python3
+"""Generate Frigate UI tab content for documentation files.
+
+This script reads YAML code blocks from documentation markdown files and
+generates corresponding "Frigate UI" tab instructions based on:
+- JSON Schema (from Pydantic config models)
+- i18n translation files (for UI field labels)
+- Section configs (for hidden/advanced field info)
+- Navigation mappings (for Settings UI paths)
+
+Usage:
+ # Preview generated UI tabs for a single file
+ python docs/scripts/generate_ui_tabs.py docs/docs/configuration/record.md
+
+ # Preview all config docs
+ python docs/scripts/generate_ui_tabs.py docs/docs/configuration/
+
+ # Inject UI tabs into files (wraps bare YAML blocks with ConfigTabs)
+ python docs/scripts/generate_ui_tabs.py --inject docs/docs/configuration/record.md
+
+ # Regenerate existing UI tabs from current schema/i18n
+ python docs/scripts/generate_ui_tabs.py --regenerate docs/docs/configuration/
+
+ # Check for drift between existing UI tabs and what would be generated
+ python docs/scripts/generate_ui_tabs.py --check docs/docs/configuration/
+
+ # Write generated files to a temp directory for comparison (originals unchanged)
+ python docs/scripts/generate_ui_tabs.py --inject --outdir /tmp/generated docs/docs/configuration/
+
+ # Show detailed warnings and diagnostics
+ python docs/scripts/generate_ui_tabs.py --verbose docs/docs/configuration/
+"""
+
+import argparse
+import difflib
+import shutil
+import sys
+import tempfile
+from pathlib import Path
+
+# Ensure frigate package is importable
+sys.path.insert(0, str(Path(__file__).resolve().parents[1].parent))
+
+from lib.i18n_loader import load_i18n
+from lib.nav_map import ALL_CONFIG_SECTIONS
+from lib.schema_loader import load_schema
+from lib.section_config_parser import load_section_configs
+from lib.ui_generator import generate_ui_content, wrap_with_config_tabs
+from lib.yaml_extractor import (
+ extract_config_tabs_blocks,
+ extract_yaml_blocks,
+)
+
+
+def process_file(
+ filepath: Path,
+ schema: dict,
+ i18n: dict,
+ section_configs: dict,
+ inject: bool = False,
+ verbose: bool = False,
+ outpath: Path | None = None,
+) -> dict:
+ """Process a single markdown file for initial injection of bare YAML blocks.
+
+ Args:
+ outpath: If set, write the result here instead of modifying filepath.
+
+ Returns:
+ Stats dict with counts of blocks found, generated, skipped, etc.
+ """
+ content = filepath.read_text()
+ blocks = extract_yaml_blocks(content)
+
+ stats = {
+ "file": str(filepath),
+ "total_blocks": len(blocks),
+ "config_blocks": 0,
+ "already_wrapped": 0,
+ "generated": 0,
+ "skipped": 0,
+ "warnings": [],
+ }
+
+ if not blocks:
+ return stats
+
+ # For injection, we need to track replacements
+ replacements: list[tuple[int, int, str]] = []
+
+ for block in blocks:
+ # Skip non-config YAML blocks
+ if block.section_key is None or (
+ block.section_key not in ALL_CONFIG_SECTIONS
+ and not block.is_camera_level
+ ):
+ stats["skipped"] += 1
+ if verbose and block.config_keys:
+ stats["warnings"].append(
+ f" Line {block.line_start}: Skipped block with keys "
+ f"{block.config_keys} (not a known config section)"
+ )
+ continue
+
+ stats["config_blocks"] += 1
+
+ # Skip already-wrapped blocks
+ if block.inside_config_tabs:
+ stats["already_wrapped"] += 1
+ if verbose:
+ stats["warnings"].append(
+ f" Line {block.line_start}: Already inside ConfigTabs, skipping"
+ )
+ continue
+
+ # Generate UI content
+ ui_content = generate_ui_content(
+ block, schema, i18n, section_configs
+ )
+
+ if ui_content is None:
+ stats["skipped"] += 1
+ if verbose:
+ stats["warnings"].append(
+ f" Line {block.line_start}: Could not generate UI content "
+ f"for section '{block.section_key}'"
+ )
+ continue
+
+ stats["generated"] += 1
+
+ if inject:
+ full_block = wrap_with_config_tabs(
+ ui_content, block.raw, block.highlight
+ )
+ replacements.append((block.line_start, block.line_end, full_block))
+ else:
+ # Preview mode: print to stdout
+ print(f"\n{'='*60}")
+ print(f"File: {filepath}")
+ print(f"Line {block.line_start}: section={block.section_key}, "
+ f"camera={block.is_camera_level}")
+ print(f"{'='*60}")
+ print()
+ print("--- Generated UI tab ---")
+ print(ui_content)
+ print()
+ print("--- Would produce ---")
+ print(wrap_with_config_tabs(ui_content, block.raw, block.highlight))
+ print()
+
+ # Apply injections in reverse order (to preserve line numbers)
+ if inject and replacements:
+ lines = content.split("\n")
+ for start, end, replacement in reversed(replacements):
+ # start/end are 1-based line numbers
+ # The YAML block spans from the ``` line before start to the ``` line at end
+ # We need to replace from the opening ``` to the closing ```
+ block_start = start - 2 # 0-based index of ```yaml line
+ block_end = end - 1 # 0-based index of closing ``` line
+
+ replacement_lines = replacement.split("\n")
+ lines[block_start : block_end + 1] = replacement_lines
+
+ new_content = "\n".join(lines)
+
+ # Ensure imports are present
+ new_content = _ensure_imports(new_content)
+
+ target = outpath or filepath
+ target.parent.mkdir(parents=True, exist_ok=True)
+ target.write_text(new_content)
+ print(f" Injected {len(replacements)} ConfigTabs block(s) into {target}")
+ elif outpath is not None:
+ # No changes but outdir requested -- copy original so the output
+ # directory contains a complete set of files for diffing.
+ outpath.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(filepath, outpath)
+
+ return stats
+
+
+def regenerate_file(
+ filepath: Path,
+ schema: dict,
+ i18n: dict,
+ section_configs: dict,
+ dry_run: bool = False,
+ verbose: bool = False,
+ outpath: Path | None = None,
+) -> dict:
+ """Regenerate UI tabs in existing ConfigTabs blocks.
+
+ Strips the current UI tab content and regenerates it from the YAML tab
+ using the current schema and i18n data.
+
+ Args:
+ outpath: If set, write the result here instead of modifying filepath.
+
+ Returns:
+ Stats dict
+ """
+ content = filepath.read_text()
+ tab_blocks = extract_config_tabs_blocks(content)
+
+ stats = {
+ "file": str(filepath),
+ "total_blocks": len(tab_blocks),
+ "regenerated": 0,
+ "unchanged": 0,
+ "skipped": 0,
+ "warnings": [],
+ }
+
+ if not tab_blocks:
+ return stats
+
+ replacements: list[tuple[int, int, str]] = []
+
+ for tab_block in tab_blocks:
+ yaml_block = tab_block.yaml_block
+
+ # Skip non-config blocks
+ if yaml_block.section_key is None or (
+ yaml_block.section_key not in ALL_CONFIG_SECTIONS
+ and not yaml_block.is_camera_level
+ ):
+ stats["skipped"] += 1
+ if verbose:
+ stats["warnings"].append(
+ f" Line {tab_block.line_start}: Skipped (not a config section)"
+ )
+ continue
+
+ # Generate fresh UI content
+ new_ui = generate_ui_content(
+ yaml_block, schema, i18n, section_configs
+ )
+
+ if new_ui is None:
+ stats["skipped"] += 1
+ if verbose:
+ stats["warnings"].append(
+ f" Line {tab_block.line_start}: Could not regenerate "
+ f"for section '{yaml_block.section_key}'"
+ )
+ continue
+
+ # Compare with existing
+ existing_ui = tab_block.ui_content
+ if _normalize_whitespace(new_ui) == _normalize_whitespace(existing_ui):
+ stats["unchanged"] += 1
+ if verbose:
+ stats["warnings"].append(
+ f" Line {tab_block.line_start}: Unchanged"
+ )
+ continue
+
+ stats["regenerated"] += 1
+
+ new_full = wrap_with_config_tabs(
+ new_ui, yaml_block.raw, yaml_block.highlight
+ )
+ replacements.append(
+ (tab_block.line_start, tab_block.line_end, new_full)
+ )
+
+ if dry_run or verbose:
+ print(f"\n{'='*60}")
+ print(f"File: {filepath}, line {tab_block.line_start}")
+ print(f"Section: {yaml_block.section_key}")
+ print(f"{'='*60}")
+ _print_diff(existing_ui, new_ui, filepath, tab_block.line_start)
+
+ # Apply replacements
+ if not dry_run and replacements:
+ lines = content.split("\n")
+ for start, end, replacement in reversed(replacements):
+ block_start = start - 1 # 0-based index of line
+ block_end = end - 1 # 0-based index of line
+ replacement_lines = replacement.split("\n")
+ lines[block_start : block_end + 1] = replacement_lines
+
+ new_content = "\n".join(lines)
+ target = outpath or filepath
+ target.parent.mkdir(parents=True, exist_ok=True)
+ target.write_text(new_content)
+ print(
+ f" Regenerated {len(replacements)} ConfigTabs block(s) in {target}",
+ file=sys.stderr,
+ )
+ elif outpath is not None:
+ outpath.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(filepath, outpath)
+
+ return stats
+
+
+def check_file(
+ filepath: Path,
+ schema: dict,
+ i18n: dict,
+ section_configs: dict,
+ verbose: bool = False,
+) -> dict:
+ """Check for drift between existing UI tabs and what would be generated.
+
+ Returns:
+ Stats dict with drift info. Non-zero "drifted" means the file is stale.
+ """
+ content = filepath.read_text()
+ tab_blocks = extract_config_tabs_blocks(content)
+
+ stats = {
+ "file": str(filepath),
+ "total_blocks": len(tab_blocks),
+ "up_to_date": 0,
+ "drifted": 0,
+ "skipped": 0,
+ "warnings": [],
+ }
+
+ if not tab_blocks:
+ return stats
+
+ for tab_block in tab_blocks:
+ yaml_block = tab_block.yaml_block
+
+ if yaml_block.section_key is None or (
+ yaml_block.section_key not in ALL_CONFIG_SECTIONS
+ and not yaml_block.is_camera_level
+ ):
+ stats["skipped"] += 1
+ continue
+
+ new_ui = generate_ui_content(
+ yaml_block, schema, i18n, section_configs
+ )
+
+ if new_ui is None:
+ stats["skipped"] += 1
+ continue
+
+ existing_ui = tab_block.ui_content
+ if _normalize_whitespace(new_ui) == _normalize_whitespace(existing_ui):
+ stats["up_to_date"] += 1
+ else:
+ stats["drifted"] += 1
+ print(f"\n{'='*60}")
+ print(f"DRIFT: {filepath}, line {tab_block.line_start}")
+ print(f"Section: {yaml_block.section_key}")
+ print(f"{'='*60}")
+ _print_diff(existing_ui, new_ui, filepath, tab_block.line_start)
+
+ return stats
+
+
+def _normalize_whitespace(text: str) -> str:
+ """Normalize whitespace for comparison (strip lines, collapse blanks)."""
+ lines = [line.rstrip() for line in text.strip().splitlines()]
+ # Collapse multiple blank lines into one
+ result: list[str] = []
+ prev_blank = False
+ for line in lines:
+ if line == "":
+ if not prev_blank:
+ result.append(line)
+ prev_blank = True
+ else:
+ result.append(line)
+ prev_blank = False
+ return "\n".join(result)
+
+
+def _print_diff(existing: str, generated: str, filepath: Path, line: int):
+ """Print a unified diff between existing and generated UI content."""
+ existing_lines = existing.strip().splitlines(keepends=True)
+ generated_lines = generated.strip().splitlines(keepends=True)
+
+ diff = difflib.unified_diff(
+ existing_lines,
+ generated_lines,
+ fromfile=f"{filepath}:{line} (existing)",
+ tofile=f"{filepath}:{line} (generated)",
+ lineterm="",
+ )
+ diff_text = "\n".join(diff)
+ if diff_text:
+ print(diff_text)
+ else:
+ print(" (whitespace-only difference)")
+
+
+def _ensure_imports(content: str) -> str:
+ """Ensure ConfigTabs/TabItem/NavPath imports are present in the file."""
+ lines = content.split("\n")
+
+ needed_imports = []
+ if "" in content and 'import ConfigTabs' not in content:
+ needed_imports.append(
+ 'import ConfigTabs from "@site/src/components/ConfigTabs";'
+ )
+ if "outpath mapping
+ file_outpaths: dict[Path, Path | None] = {}
+ for f in files:
+ if outdir is not None:
+ try:
+ rel = f.resolve().relative_to(base_dir)
+ except ValueError:
+ rel = Path(f.name)
+ file_outpaths[f] = outdir / rel
+ else:
+ file_outpaths[f] = None
+
+ # Load data sources
+ print("Loading schema from Pydantic models...", file=sys.stderr)
+ schema = load_schema()
+ print("Loading i18n translations...", file=sys.stderr)
+ i18n = load_i18n()
+ print("Loading section configs...", file=sys.stderr)
+ section_configs = load_section_configs()
+ print(f"Processing {len(files)} file(s)...\n", file=sys.stderr)
+
+ if args.check:
+ _run_check(files, schema, i18n, section_configs, args.verbose)
+ elif args.regenerate:
+ _run_regenerate(
+ files, schema, i18n, section_configs,
+ args.dry_run, args.verbose, file_outpaths,
+ )
+ else:
+ _run_inject(
+ files, schema, i18n, section_configs,
+ args.inject, args.verbose, file_outpaths,
+ )
+
+ if outdir is not None:
+ print(f"\nOutput written to: {outdir}", file=sys.stderr)
+
+
+def _run_inject(files, schema, i18n, section_configs, inject, verbose, file_outpaths):
+ """Run default mode: preview or inject bare YAML blocks."""
+ total_stats = {
+ "files": 0,
+ "total_blocks": 0,
+ "config_blocks": 0,
+ "already_wrapped": 0,
+ "generated": 0,
+ "skipped": 0,
+ }
+
+ for filepath in files:
+ stats = process_file(
+ filepath, schema, i18n, section_configs,
+ inject=inject, verbose=verbose,
+ outpath=file_outpaths.get(filepath),
+ )
+
+ total_stats["files"] += 1
+ for key in ["total_blocks", "config_blocks", "already_wrapped",
+ "generated", "skipped"]:
+ total_stats[key] += stats[key]
+
+ if verbose and stats["warnings"]:
+ print(f"\n{filepath}:", file=sys.stderr)
+ for w in stats["warnings"]:
+ print(w, file=sys.stderr)
+
+ print("\n" + "=" * 60, file=sys.stderr)
+ print("Summary:", file=sys.stderr)
+ print(f" Files processed: {total_stats['files']}", file=sys.stderr)
+ print(f" Total YAML blocks: {total_stats['total_blocks']}", file=sys.stderr)
+ print(f" Config blocks: {total_stats['config_blocks']}", file=sys.stderr)
+ print(f" Already wrapped: {total_stats['already_wrapped']}", file=sys.stderr)
+ print(f" Generated: {total_stats['generated']}", file=sys.stderr)
+ print(f" Skipped: {total_stats['skipped']}", file=sys.stderr)
+ print("=" * 60, file=sys.stderr)
+
+
+def _run_regenerate(files, schema, i18n, section_configs, dry_run, verbose, file_outpaths):
+ """Run regenerate mode: update existing ConfigTabs blocks."""
+ total_stats = {
+ "files": 0,
+ "total_blocks": 0,
+ "regenerated": 0,
+ "unchanged": 0,
+ "skipped": 0,
+ }
+
+ for filepath in files:
+ stats = regenerate_file(
+ filepath, schema, i18n, section_configs,
+ dry_run=dry_run, verbose=verbose,
+ outpath=file_outpaths.get(filepath),
+ )
+
+ total_stats["files"] += 1
+ for key in ["total_blocks", "regenerated", "unchanged", "skipped"]:
+ total_stats[key] += stats[key]
+
+ if verbose and stats["warnings"]:
+ print(f"\n{filepath}:", file=sys.stderr)
+ for w in stats["warnings"]:
+ print(w, file=sys.stderr)
+
+ action = "Would regenerate" if dry_run else "Regenerated"
+ print("\n" + "=" * 60, file=sys.stderr)
+ print("Summary:", file=sys.stderr)
+ print(f" Files processed: {total_stats['files']}", file=sys.stderr)
+ print(f" ConfigTabs blocks: {total_stats['total_blocks']}", file=sys.stderr)
+ print(f" {action}: {total_stats['regenerated']}", file=sys.stderr)
+ print(f" Unchanged: {total_stats['unchanged']}", file=sys.stderr)
+ print(f" Skipped: {total_stats['skipped']}", file=sys.stderr)
+ print("=" * 60, file=sys.stderr)
+
+
+def _run_check(files, schema, i18n, section_configs, verbose):
+ """Run check mode: detect drift without modifying files."""
+ total_stats = {
+ "files": 0,
+ "total_blocks": 0,
+ "up_to_date": 0,
+ "drifted": 0,
+ "skipped": 0,
+ }
+
+ for filepath in files:
+ stats = check_file(
+ filepath, schema, i18n, section_configs, verbose=verbose,
+ )
+
+ total_stats["files"] += 1
+ for key in ["total_blocks", "up_to_date", "drifted", "skipped"]:
+ total_stats[key] += stats[key]
+
+ print("\n" + "=" * 60, file=sys.stderr)
+ print("Summary:", file=sys.stderr)
+ print(f" Files processed: {total_stats['files']}", file=sys.stderr)
+ print(f" ConfigTabs blocks: {total_stats['total_blocks']}", file=sys.stderr)
+ print(f" Up to date: {total_stats['up_to_date']}", file=sys.stderr)
+ print(f" Drifted: {total_stats['drifted']}", file=sys.stderr)
+ print(f" Skipped: {total_stats['skipped']}", file=sys.stderr)
+ print("=" * 60, file=sys.stderr)
+
+ if total_stats["drifted"] > 0:
+ print(
+ f"\n{total_stats['drifted']} block(s) have drifted from schema/i18n. "
+ "Run with --regenerate to update.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+ else:
+ print("\nAll UI tabs are up to date.", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/scripts/lib/__init__.py b/docs/scripts/lib/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/docs/scripts/lib/i18n_loader.py b/docs/scripts/lib/i18n_loader.py
new file mode 100644
index 000000000..7416e86c7
--- /dev/null
+++ b/docs/scripts/lib/i18n_loader.py
@@ -0,0 +1,139 @@
+"""Load i18n translation files for Settings UI field labels."""
+
+import json
+from pathlib import Path
+from typing import Any
+
+# Base path for locale files
+WEB_LOCALES = Path(__file__).resolve().parents[3] / "web" / "public" / "locales" / "en"
+
+
+def load_i18n() -> dict[str, Any]:
+ """Load and merge all relevant i18n files.
+
+ Returns:
+ Dict with keys: "global", "cameras", "settings_menu"
+ """
+ global_path = WEB_LOCALES / "config" / "global.json"
+ cameras_path = WEB_LOCALES / "config" / "cameras.json"
+ settings_path = WEB_LOCALES / "views" / "settings.json"
+
+ result: dict[str, Any] = {}
+
+ with open(global_path) as f:
+ result["global"] = json.load(f)
+
+ with open(cameras_path) as f:
+ result["cameras"] = json.load(f)
+
+ with open(settings_path) as f:
+ settings = json.load(f)
+ result["settings_menu"] = settings.get("menu", {})
+
+ # Build a unified enum value → label lookup from all known sources.
+ # Merges multiple maps so callers don't need to know which file
+ # a particular enum lives in.
+ value_labels: dict[str, str] = {}
+
+ config_form = settings.get("configForm", {})
+
+ # FFmpeg preset labels (preset-vaapi → "VAAPI (Intel/AMD GPU)")
+ value_labels.update(
+ config_form.get("ffmpegArgs", {}).get("presetLabels", {})
+ )
+
+ # Timestamp position (tl → "Top left")
+ value_labels.update(settings.get("timestampPosition", {}))
+
+ # Input role options (detect → "Detect")
+ value_labels.update(
+ config_form.get("inputRoles", {}).get("options", {})
+ )
+
+ # GenAI role options (vision → "Vision")
+ value_labels.update(
+ config_form.get("genaiRoles", {}).get("options", {})
+ )
+
+ result["value_labels"] = value_labels
+
+ return result
+
+
+def get_field_label(
+ i18n: dict[str, Any],
+ section_key: str,
+ field_path: list[str],
+ level: str = "global",
+) -> str | None:
+ """Look up the UI label for a field.
+
+ Args:
+ i18n: Loaded i18n data from load_i18n()
+ section_key: Config section (e.g., "record")
+ field_path: Path within section (e.g., ["continuous", "days"])
+ level: "global" or "cameras"
+
+ Returns:
+ The label string, or None if not found.
+ """
+ source = i18n.get(level, {})
+ node = source.get(section_key, {})
+
+ for key in field_path:
+ if not isinstance(node, dict):
+ return None
+ node = node.get(key, {})
+
+ if isinstance(node, dict):
+ return node.get("label")
+ return None
+
+
+def get_field_description(
+ i18n: dict[str, Any],
+ section_key: str,
+ field_path: list[str],
+ level: str = "global",
+) -> str | None:
+ """Look up the UI description for a field."""
+ source = i18n.get(level, {})
+ node = source.get(section_key, {})
+
+ for key in field_path:
+ if not isinstance(node, dict):
+ return None
+ node = node.get(key, {})
+
+ if isinstance(node, dict):
+ return node.get("description")
+ return None
+
+
+def get_value_label(
+ i18n: dict[str, Any],
+ value: str,
+) -> str | None:
+ """Look up the display label for an enum/option value.
+
+ Args:
+ i18n: Loaded i18n data from load_i18n()
+ value: The raw config value (e.g., "preset-vaapi", "tl")
+
+ Returns:
+ The human-readable label (e.g., "VAAPI (Intel/AMD GPU)"), or None.
+ """
+ return i18n.get("value_labels", {}).get(value)
+
+
+def get_section_label(
+ i18n: dict[str, Any],
+ section_key: str,
+ level: str = "global",
+) -> str | None:
+ """Get the top-level label for a config section."""
+ source = i18n.get(level, {})
+ section = source.get(section_key, {})
+ if isinstance(section, dict):
+ return section.get("label")
+ return None
diff --git a/docs/scripts/lib/nav_map.py b/docs/scripts/lib/nav_map.py
new file mode 100644
index 000000000..80f13d65b
--- /dev/null
+++ b/docs/scripts/lib/nav_map.py
@@ -0,0 +1,120 @@
+"""Map config section keys to Settings UI navigation paths."""
+
+# Derived from web/src/pages/Settings.tsx section mappings
+# and web/public/locales/en/views/settings.json menu labels.
+#
+# Format: section_key -> (group_label, page_label)
+# Navigation path: "Settings > {group_label} > {page_label}"
+
+GLOBAL_NAV: dict[str, tuple[str, str]] = {
+ "detect": ("Global configuration", "Object detection"),
+ "ffmpeg": ("Global configuration", "FFmpeg"),
+ "record": ("Global configuration", "Recording"),
+ "snapshots": ("Global configuration", "Snapshots"),
+ "motion": ("Global configuration", "Motion detection"),
+ "objects": ("Global configuration", "Objects"),
+ "review": ("Global configuration", "Review"),
+ "audio": ("Global configuration", "Audio events"),
+ "live": ("Global configuration", "Live playback"),
+ "timestamp_style": ("Global configuration", "Timestamp style"),
+ "notifications": ("Notifications", "Notifications"),
+}
+
+CAMERA_NAV: dict[str, tuple[str, str]] = {
+ "detect": ("Camera configuration", "Object detection"),
+ "ffmpeg": ("Camera configuration", "FFmpeg"),
+ "record": ("Camera configuration", "Recording"),
+ "snapshots": ("Camera configuration", "Snapshots"),
+ "motion": ("Camera configuration", "Motion detection"),
+ "objects": ("Camera configuration", "Objects"),
+ "review": ("Camera configuration", "Review"),
+ "audio": ("Camera configuration", "Audio events"),
+ "audio_transcription": ("Camera configuration", "Audio transcription"),
+ "notifications": ("Camera configuration", "Notifications"),
+ "live": ("Camera configuration", "Live playback"),
+ "birdseye": ("Camera configuration", "Birdseye"),
+ "face_recognition": ("Camera configuration", "Face recognition"),
+ "lpr": ("Camera configuration", "License plate recognition"),
+ "mqtt": ("Camera configuration", "MQTT"),
+ "onvif": ("Camera configuration", "ONVIF"),
+ "ui": ("Camera configuration", "Camera UI"),
+ "timestamp_style": ("Camera configuration", "Timestamp style"),
+}
+
+ENRICHMENT_NAV: dict[str, tuple[str, str]] = {
+ "semantic_search": ("Enrichments", "Semantic search"),
+ "genai": ("Enrichments", "Generative AI"),
+ "face_recognition": ("Enrichments", "Face recognition"),
+ "lpr": ("Enrichments", "License plate recognition"),
+ "classification": ("Enrichments", "Object classification"),
+ "audio_transcription": ("Enrichments", "Audio transcription"),
+}
+
+SYSTEM_NAV: dict[str, tuple[str, str]] = {
+ "go2rtc_streams": ("System", "go2rtc streams"),
+ "database": ("System", "Database"),
+ "mqtt": ("System", "MQTT"),
+ "tls": ("System", "TLS"),
+ "auth": ("System", "Authentication"),
+ "networking": ("System", "Networking"),
+ "proxy": ("System", "Proxy"),
+ "ui": ("System", "UI"),
+ "logger": ("System", "Logging"),
+ "environment_vars": ("System", "Environment variables"),
+ "telemetry": ("System", "Telemetry"),
+ "birdseye": ("System", "Birdseye"),
+ "detectors": ("System", "Detector hardware"),
+ "model": ("System", "Detection model"),
+}
+
+# All known top-level config section keys
+ALL_CONFIG_SECTIONS = (
+ set(GLOBAL_NAV)
+ | set(CAMERA_NAV)
+ | set(ENRICHMENT_NAV)
+ | set(SYSTEM_NAV)
+ | {"cameras"}
+)
+
+
+def get_nav_path(section_key: str, level: str = "global") -> str | None:
+ """Get the full navigation path for a config section.
+
+ Args:
+ section_key: Config section key (e.g., "record")
+ level: "global", "camera", "enrichment", or "system"
+
+ Returns:
+ NavPath string like "Settings > Global configuration > Recording",
+ or None if not found.
+ """
+ nav_tables = {
+ "global": GLOBAL_NAV,
+ "camera": CAMERA_NAV,
+ "enrichment": ENRICHMENT_NAV,
+ "system": SYSTEM_NAV,
+ }
+
+ table = nav_tables.get(level)
+ if table is None:
+ return None
+
+ entry = table.get(section_key)
+ if entry is None:
+ return None
+
+ group, page = entry
+ return f"Settings > {group} > {page}"
+
+
+def detect_level(section_key: str) -> str:
+ """Detect whether a config section is global, camera, enrichment, or system."""
+ if section_key in SYSTEM_NAV:
+ return "system"
+ if section_key in ENRICHMENT_NAV:
+ return "enrichment"
+ if section_key in GLOBAL_NAV:
+ return "global"
+ if section_key in CAMERA_NAV:
+ return "camera"
+ return "global"
diff --git a/docs/scripts/lib/schema_loader.py b/docs/scripts/lib/schema_loader.py
new file mode 100644
index 000000000..a1e88a989
--- /dev/null
+++ b/docs/scripts/lib/schema_loader.py
@@ -0,0 +1,88 @@
+"""Load JSON schema from Frigate's Pydantic config models."""
+
+from typing import Any
+
+
+def load_schema() -> dict[str, Any]:
+ """Generate and return the full JSON schema for FrigateConfig."""
+ from frigate.config.config import FrigateConfig
+ from frigate.util.schema import get_config_schema
+
+ return get_config_schema(FrigateConfig)
+
+
+def resolve_ref(schema: dict[str, Any], ref: str) -> dict[str, Any]:
+ """Resolve a $ref pointer within the schema."""
+ # ref format: "#/$defs/RecordConfig"
+ parts = ref.lstrip("#/").split("/")
+ node = schema
+ for part in parts:
+ node = node[part]
+ return node
+
+
+def resolve_schema_node(
+ schema: dict[str, Any], node: dict[str, Any]
+) -> dict[str, Any]:
+ """Resolve a schema node, following $ref and allOf if present."""
+ if "$ref" in node:
+ node = resolve_ref(schema, node["$ref"])
+ if "allOf" in node:
+ merged: dict[str, Any] = {}
+ for item in node["allOf"]:
+ resolved = resolve_schema_node(schema, item)
+ merged.update(resolved)
+ return merged
+ return node
+
+
+def get_section_schema(
+ schema: dict[str, Any], section_key: str
+) -> dict[str, Any] | None:
+ """Get the resolved schema for a top-level config section."""
+ props = schema.get("properties", {})
+ if section_key not in props:
+ return None
+ return resolve_schema_node(schema, props[section_key])
+
+
+def get_field_info(
+ schema: dict[str, Any], section_key: str, field_path: list[str]
+) -> dict[str, Any] | None:
+ """Get schema info for a specific field path within a section.
+
+ Args:
+ schema: Full JSON schema
+ section_key: Top-level section (e.g., "record")
+ field_path: List of nested keys (e.g., ["continuous", "days"])
+
+ Returns:
+ Resolved schema node for the field, or None if not found.
+ """
+ section = get_section_schema(schema, section_key)
+ if section is None:
+ return None
+
+ node = section
+ for key in field_path:
+ props = node.get("properties", {})
+ if key not in props:
+ return None
+ node = resolve_schema_node(schema, props[key])
+
+ return node
+
+
+def is_boolean_field(field_schema: dict[str, Any]) -> bool:
+ """Check if a schema node represents a boolean field."""
+ return field_schema.get("type") == "boolean"
+
+
+def is_enum_field(field_schema: dict[str, Any]) -> bool:
+ """Check if a schema node is an enum."""
+ return "enum" in field_schema
+
+
+def is_object_field(field_schema: dict[str, Any]) -> bool:
+ """Check if a schema node is an object with properties."""
+ return field_schema.get("type") == "object" or "properties" in field_schema
diff --git a/docs/scripts/lib/section_config_parser.py b/docs/scripts/lib/section_config_parser.py
new file mode 100644
index 000000000..805ab2145
--- /dev/null
+++ b/docs/scripts/lib/section_config_parser.py
@@ -0,0 +1,130 @@
+"""Parse TypeScript section config files for hidden/advanced field info."""
+
+import json
+import re
+from pathlib import Path
+from typing import Any
+
+SECTION_CONFIGS_DIR = (
+ Path(__file__).resolve().parents[3]
+ / "web"
+ / "src"
+ / "components"
+ / "config-form"
+ / "section-configs"
+)
+
+
+def _extract_string_array(text: str, field_name: str) -> list[str]:
+ """Extract a string array value from TypeScript object literal text."""
+ pattern = rf"{field_name}\s*:\s*\[(.*?)\]"
+ match = re.search(pattern, text, re.DOTALL)
+ if not match:
+ return []
+ content = match.group(1)
+ return re.findall(r'"([^"]*)"', content)
+
+
+def _parse_section_file(filepath: Path) -> dict[str, Any]:
+ """Parse a single section config .ts file."""
+ text = filepath.read_text()
+
+ # Extract base block
+ base_match = re.search(r"base\s*:\s*\{(.*?)\n \}", text, re.DOTALL)
+ base_text = base_match.group(1) if base_match else ""
+
+ # Extract global block
+ global_match = re.search(r"global\s*:\s*\{(.*?)\n \}", text, re.DOTALL)
+ global_text = global_match.group(1) if global_match else ""
+
+ # Extract camera block
+ camera_match = re.search(r"camera\s*:\s*\{(.*?)\n \}", text, re.DOTALL)
+ camera_text = camera_match.group(1) if camera_match else ""
+
+ result: dict[str, Any] = {
+ "fieldOrder": _extract_string_array(base_text, "fieldOrder"),
+ "hiddenFields": _extract_string_array(base_text, "hiddenFields"),
+ "advancedFields": _extract_string_array(base_text, "advancedFields"),
+ }
+
+ # Merge global-level hidden fields
+ global_hidden = _extract_string_array(global_text, "hiddenFields")
+ if global_hidden:
+ result["globalHiddenFields"] = global_hidden
+
+ # Merge camera-level hidden fields
+ camera_hidden = _extract_string_array(camera_text, "hiddenFields")
+ if camera_hidden:
+ result["cameraHiddenFields"] = camera_hidden
+
+ return result
+
+
+def load_section_configs() -> dict[str, dict[str, Any]]:
+ """Load all section configs from TypeScript files.
+
+ Returns:
+ Dict mapping section name to parsed config.
+ """
+ # Read sectionConfigs.ts to get the mapping of section keys to filenames
+ registry_path = SECTION_CONFIGS_DIR.parent / "sectionConfigs.ts"
+ registry_text = registry_path.read_text()
+
+ configs: dict[str, dict[str, Any]] = {}
+
+ for ts_file in SECTION_CONFIGS_DIR.glob("*.ts"):
+ if ts_file.name == "types.ts":
+ continue
+
+ section_name = ts_file.stem
+ configs[section_name] = _parse_section_file(ts_file)
+
+ # Map section config keys from the registry (handles renames like
+ # "timestamp_style: timestampStyle")
+ key_map: dict[str, str] = {}
+ for match in re.finditer(
+ r"(\w+)(?:\s*:\s*\w+)?\s*,", registry_text[registry_text.find("{") :]
+ ):
+ key = match.group(1)
+ key_map[key] = key
+
+ # Handle explicit key mappings like `timestamp_style: timestampStyle`
+ for match in re.finditer(r"(\w+)\s*:\s*(\w+)\s*,", registry_text):
+ key_map[match.group(1)] = match.group(2)
+
+ return configs
+
+
+def get_hidden_fields(
+ configs: dict[str, dict[str, Any]],
+ section_key: str,
+ level: str = "global",
+) -> set[str]:
+ """Get the set of hidden fields for a section at a given level.
+
+ Args:
+ configs: Loaded section configs
+ section_key: Config section name (e.g., "record")
+ level: "global" or "camera"
+
+ Returns:
+ Set of hidden field paths (e.g., {"enabled_in_config", "sync_recordings"})
+ """
+ config = configs.get(section_key, {})
+ hidden = set(config.get("hiddenFields", []))
+
+ if level == "global":
+ hidden.update(config.get("globalHiddenFields", []))
+ elif level == "camera":
+ hidden.update(config.get("cameraHiddenFields", []))
+
+ return hidden
+
+
+def get_advanced_fields(
+ configs: dict[str, dict[str, Any]],
+ section_key: str,
+) -> set[str]:
+ """Get the set of advanced fields for a section."""
+ config = configs.get(section_key, {})
+ return set(config.get("advancedFields", []))
diff --git a/docs/scripts/lib/ui_generator.py b/docs/scripts/lib/ui_generator.py
new file mode 100644
index 000000000..7b9a59286
--- /dev/null
+++ b/docs/scripts/lib/ui_generator.py
@@ -0,0 +1,283 @@
+"""Generate UI tab markdown content from parsed YAML blocks."""
+
+from typing import Any
+
+from .i18n_loader import get_field_description, get_field_label, get_value_label
+from .nav_map import ALL_CONFIG_SECTIONS, detect_level, get_nav_path
+from .schema_loader import is_boolean_field, is_object_field
+from .section_config_parser import get_hidden_fields
+from .yaml_extractor import YamlBlock, get_leaf_paths
+
+
+def _format_value(
+ value: object,
+ field_schema: dict[str, Any] | None,
+ i18n: dict[str, Any] | None = None,
+) -> str:
+ """Format a YAML value for UI display.
+
+ Looks up i18n labels for enum/option values when available.
+ """
+ if field_schema and is_boolean_field(field_schema):
+ return "on" if value else "off"
+ if isinstance(value, bool):
+ return "on" if value else "off"
+ if isinstance(value, list):
+ if len(value) == 0:
+ return "an empty list"
+ items = []
+ for v in value:
+ label = get_value_label(i18n, str(v)) if i18n else None
+ items.append(f"`{label}`" if label else f"`{v}`")
+ return ", ".join(items)
+ if value is None:
+ return "empty"
+
+ # Try i18n label for the raw value (enum translations)
+ if i18n and isinstance(value, str):
+ label = get_value_label(i18n, value)
+ if label:
+ return f"`{label}`"
+
+ return f"`{value}`"
+
+
+def _build_field_label(
+ i18n: dict[str, Any],
+ section_key: str,
+ field_path: list[str],
+ level: str,
+) -> str:
+ """Build the display label for a field using i18n labels.
+
+ For a path like ["continuous", "days"], produces
+ "Continuous retention > Retention days" using the actual i18n labels.
+ """
+ parts: list[str] = []
+
+ for depth in range(len(field_path)):
+ sub_path = field_path[: depth + 1]
+ label = get_field_label(i18n, section_key, sub_path, level)
+
+ if label:
+ parts.append(label)
+ else:
+ # Fallback to title-cased field name
+ parts.append(field_path[depth].replace("_", " ").title())
+
+ return " > ".join(parts)
+
+
+def _is_hidden(
+ field_key: str,
+ full_path: list[str],
+ hidden_fields: set[str],
+) -> bool:
+ """Check if a field should be hidden from UI output."""
+ # Check exact match
+ if field_key in hidden_fields:
+ return True
+
+ # Check dotted path match (e.g., "alerts.enabled_in_config")
+ dotted = ".".join(str(p) for p in full_path)
+ if dotted in hidden_fields:
+ return True
+
+ # Check wildcard patterns (e.g., "filters.*.mask")
+ for pattern in hidden_fields:
+ if "*" in pattern:
+ parts = pattern.split(".")
+ if len(parts) == len(full_path):
+ match = all(
+ p == "*" or p == fp for p, fp in zip(parts, full_path)
+ )
+ if match:
+ return True
+
+ return False
+
+
+def generate_ui_content(
+ block: YamlBlock,
+ schema: dict[str, Any],
+ i18n: dict[str, Any],
+ section_configs: dict[str, dict[str, Any]],
+) -> str | None:
+ """Generate UI tab markdown content for a YAML block.
+
+ Args:
+ block: Parsed YAML block from a doc file
+ schema: Full JSON schema
+ i18n: Loaded i18n translations
+ section_configs: Parsed section config data
+
+ Returns:
+ Generated markdown string for the UI tab, or None if the block
+ can't be converted (not a config block, etc.)
+ """
+ if block.section_key is None:
+ return None
+
+ # Determine which config data to walk
+ if block.is_camera_level:
+ # Camera-level: unwrap cameras.{name}.{section}
+ cam_data = block.parsed.get("cameras", {})
+ cam_name = block.camera_name or next(iter(cam_data), None)
+ if not cam_name:
+ return None
+ inner = cam_data.get(cam_name, {})
+ if not isinstance(inner, dict):
+ return None
+ level = "camera"
+ else:
+ inner = block.parsed
+ # Determine level from section key
+ level = detect_level(block.section_key)
+
+ # Collect sections to process (may span multiple top-level keys)
+ sections_to_process: list[tuple[str, dict]] = []
+ for key in inner:
+ if key in ALL_CONFIG_SECTIONS or key == block.section_key:
+ val = inner[key]
+ if isinstance(val, dict):
+ sections_to_process.append((key, val))
+ else:
+ # Simple scalar at section level (e.g., record.enabled = True)
+ sections_to_process.append((key, {key: val}))
+
+ # If inner is the section itself (e.g., parsed = {"record": {...}})
+ if not sections_to_process and block.section_key in inner:
+ section_data = inner[block.section_key]
+ if isinstance(section_data, dict):
+ sections_to_process = [(block.section_key, section_data)]
+
+ if not sections_to_process:
+ # Try treating the whole inner dict as the section data
+ sections_to_process = [(block.section_key, inner)]
+
+ # Choose pattern based on whether YAML has comments (descriptive) or values
+ use_table = block.has_comments
+
+ lines: list[str] = []
+ step_num = 1
+
+ for section_key, section_data in sections_to_process:
+ # Get navigation path
+ i18n_level = "cameras" if level == "camera" else "global"
+ nav_path = get_nav_path(section_key, level)
+ if nav_path is None:
+ # Try global as fallback
+ nav_path = get_nav_path(section_key, "global")
+ if nav_path is None:
+ continue
+
+ # Get hidden fields for this section
+ hidden = get_hidden_fields(section_configs, section_key, level)
+
+ # Get leaf paths from the YAML data
+ leaves = get_leaf_paths(section_data)
+
+ # Filter out hidden fields
+ visible_leaves: list[tuple[tuple[str, ...], object]] = []
+ for path, value in leaves:
+ path_list = list(path)
+ if not _is_hidden(path_list[-1], path_list, hidden):
+ visible_leaves.append((path, value))
+
+ if not visible_leaves:
+ continue
+
+ if use_table:
+ # Pattern A: Field table with descriptions
+ lines.append(
+ f'Navigate to .'
+ )
+ lines.append("")
+ lines.append("| Field | Description |")
+ lines.append("|-------|-------------|")
+
+ for path, _value in visible_leaves:
+ path_list = list(path)
+ label = _build_field_label(
+ i18n, section_key, path_list, i18n_level
+ )
+ desc = get_field_description(
+ i18n, section_key, path_list, i18n_level
+ )
+ if not desc:
+ desc = ""
+ lines.append(f"| **{label}** | {desc} |")
+ else:
+ # Pattern B: Set instructions
+ multi_section = len(sections_to_process) > 1
+
+ if multi_section:
+ camera_note = ""
+ if block.is_camera_level:
+ camera_note = (
+ " and select your camera"
+ )
+ lines.append(
+ f'{step_num}. Navigate to {camera_note}.'
+ )
+ else:
+ if block.is_camera_level:
+ lines.append(
+ f'1. Navigate to and select your camera.'
+ )
+ else:
+ lines.append(
+ f'Navigate to .'
+ )
+ lines.append("")
+
+ from .schema_loader import get_field_info
+
+ for path, value in visible_leaves:
+ path_list = list(path)
+ label = _build_field_label(
+ i18n, section_key, path_list, i18n_level
+ )
+ field_info = get_field_info(schema, section_key, path_list)
+ formatted = _format_value(value, field_info, i18n)
+
+ if multi_section or block.is_camera_level:
+ lines.append(f" - Set **{label}** to {formatted}")
+ else:
+ lines.append(f"- Set **{label}** to {formatted}")
+
+ step_num += 1
+
+ if not lines:
+ return None
+
+ return "\n".join(lines)
+
+
+def wrap_with_config_tabs(ui_content: str, yaml_raw: str, highlight: str | None = None) -> str:
+ """Wrap UI content and YAML in ConfigTabs markup.
+
+ Args:
+ ui_content: Generated UI tab markdown
+ yaml_raw: Original YAML text
+ highlight: Optional highlight spec (e.g., "{3-4}")
+
+ Returns:
+ Full ConfigTabs MDX block
+ """
+ highlight_str = f" {highlight}" if highlight else ""
+
+ return f"""
+
+
+{ui_content}
+
+
+
+
+```yaml{highlight_str}
+{yaml_raw}
+```
+
+
+"""
diff --git a/docs/scripts/lib/yaml_extractor.py b/docs/scripts/lib/yaml_extractor.py
new file mode 100644
index 000000000..c01451cfc
--- /dev/null
+++ b/docs/scripts/lib/yaml_extractor.py
@@ -0,0 +1,283 @@
+"""Extract YAML code blocks from markdown documentation files."""
+
+import re
+from dataclasses import dataclass, field
+
+import yaml
+
+
+@dataclass
+class YamlBlock:
+ """A YAML code block extracted from a markdown file."""
+
+ raw: str # Original YAML text
+ parsed: dict # Parsed YAML content
+ line_start: int # Line number in the markdown file (1-based)
+ line_end: int # End line number
+ highlight: str | None = None # Highlight spec (e.g., "{3-4}")
+ has_comments: bool = False # Whether the YAML has inline comments
+ inside_config_tabs: bool = False # Already wrapped in ConfigTabs
+ section_key: str | None = None # Detected top-level config section
+ is_camera_level: bool = False # Whether this is camera-level config
+ camera_name: str | None = None # Camera name if camera-level
+ config_keys: list[str] = field(
+ default_factory=list
+ ) # Top-level keys in the YAML
+
+
+def extract_yaml_blocks(content: str) -> list[YamlBlock]:
+ """Extract all YAML fenced code blocks from markdown content.
+
+ Args:
+ content: Markdown file content
+
+ Returns:
+ List of YamlBlock instances
+ """
+ blocks: list[YamlBlock] = []
+ lines = content.split("\n")
+ i = 0
+ in_config_tabs = False
+
+ while i < len(lines):
+ line = lines[i]
+
+ # Track ConfigTabs context
+ if "" in line:
+ in_config_tabs = True
+ elif "" in line:
+ in_config_tabs = False
+
+ # Look for YAML fence opening
+ fence_match = re.match(r"^```yaml\s*(\{[^}]*\})?\s*$", line)
+ if fence_match:
+ highlight = fence_match.group(1)
+ start_line = i + 1 # 1-based
+ yaml_lines: list[str] = []
+ i += 1
+
+ # Collect until closing fence
+ while i < len(lines) and not lines[i].startswith("```"):
+ yaml_lines.append(lines[i])
+ i += 1
+
+ end_line = i + 1 # 1-based, inclusive of closing fence
+ raw = "\n".join(yaml_lines)
+
+ # Check for inline comments
+ has_comments = any(
+ re.search(r"#\s*(<-|[A-Za-z])", yl) for yl in yaml_lines
+ )
+
+ # Parse YAML
+ try:
+ parsed = yaml.safe_load(raw)
+ except yaml.YAMLError:
+ i += 1
+ continue
+
+ if not isinstance(parsed, dict):
+ i += 1
+ continue
+
+ # Detect config section and level
+ config_keys = list(parsed.keys())
+ section_key = None
+ is_camera = False
+ camera_name = None
+
+ if "cameras" in parsed and isinstance(parsed["cameras"], dict):
+ is_camera = True
+ cam_entries = parsed["cameras"]
+ if len(cam_entries) == 1:
+ camera_name = list(cam_entries.keys())[0]
+ inner = cam_entries[camera_name]
+ if isinstance(inner, dict):
+ inner_keys = list(inner.keys())
+ if len(inner_keys) >= 1:
+ section_key = inner_keys[0]
+ elif len(config_keys) >= 1:
+ section_key = config_keys[0]
+
+ blocks.append(
+ YamlBlock(
+ raw=raw,
+ parsed=parsed,
+ line_start=start_line,
+ line_end=end_line,
+ highlight=highlight,
+ has_comments=has_comments,
+ inside_config_tabs=in_config_tabs,
+ section_key=section_key,
+ is_camera_level=is_camera,
+ camera_name=camera_name,
+ config_keys=config_keys,
+ )
+ )
+
+ i += 1
+
+ return blocks
+
+
+@dataclass
+class ConfigTabsBlock:
+ """An existing ConfigTabs block in a markdown file."""
+
+ line_start: int # 1-based line of
+ line_end: int # 1-based line of
+ ui_content: str # Content inside the UI TabItem
+ yaml_block: YamlBlock # The YAML block inside the YAML TabItem
+ raw_text: str # Full raw text of the ConfigTabs block
+
+
+def extract_config_tabs_blocks(content: str) -> list[ConfigTabsBlock]:
+ """Extract existing ConfigTabs blocks from markdown content.
+
+ Parses the structure:
+
+
+ ...ui content...
+
+
+ ```yaml
+ ...yaml...
+ ```
+
+
+
+ Returns:
+ List of ConfigTabsBlock instances
+ """
+ blocks: list[ConfigTabsBlock] = []
+ lines = content.split("\n")
+ i = 0
+
+ while i < len(lines):
+ if "" not in lines[i]:
+ i += 1
+ continue
+
+ block_start = i # 0-based
+
+ # Find
+ j = i + 1
+ while j < len(lines) and "" not in lines[j]:
+ j += 1
+
+ if j >= len(lines):
+ i += 1
+ continue
+
+ block_end = j # 0-based, line with
+ block_text = "\n".join(lines[block_start : block_end + 1])
+
+ # Extract UI content (between and )
+ ui_match = re.search(
+ r'\s*\n(.*?)\n\s*',
+ block_text,
+ re.DOTALL,
+ )
+ ui_content = ui_match.group(1).strip() if ui_match else ""
+
+ # Extract YAML block from inside the yaml TabItem
+ yaml_tab_match = re.search(
+ r'\s*\n(.*?)\n\s*',
+ block_text,
+ re.DOTALL,
+ )
+
+ yaml_block = None
+ if yaml_tab_match:
+ yaml_tab_text = yaml_tab_match.group(1)
+ fence_match = re.search(
+ r"```yaml\s*(\{[^}]*\})?\s*\n(.*?)\n```",
+ yaml_tab_text,
+ re.DOTALL,
+ )
+ if fence_match:
+ highlight = fence_match.group(1)
+ yaml_raw = fence_match.group(2)
+ has_comments = bool(
+ re.search(r"#\s*(<-|[A-Za-z])", yaml_raw)
+ )
+
+ try:
+ parsed = yaml.safe_load(yaml_raw)
+ except yaml.YAMLError:
+ parsed = {}
+
+ if isinstance(parsed, dict):
+ config_keys = list(parsed.keys())
+ section_key = None
+ is_camera = False
+ camera_name = None
+
+ if "cameras" in parsed and isinstance(
+ parsed["cameras"], dict
+ ):
+ is_camera = True
+ cam_entries = parsed["cameras"]
+ if len(cam_entries) == 1:
+ camera_name = list(cam_entries.keys())[0]
+ inner = cam_entries[camera_name]
+ if isinstance(inner, dict):
+ inner_keys = list(inner.keys())
+ if len(inner_keys) >= 1:
+ section_key = inner_keys[0]
+ elif len(config_keys) >= 1:
+ section_key = config_keys[0]
+
+ yaml_block = YamlBlock(
+ raw=yaml_raw,
+ parsed=parsed,
+ line_start=block_start + 1,
+ line_end=block_end + 1,
+ highlight=highlight,
+ has_comments=has_comments,
+ inside_config_tabs=True,
+ section_key=section_key,
+ is_camera_level=is_camera,
+ camera_name=camera_name,
+ config_keys=config_keys,
+ )
+
+ if yaml_block:
+ blocks.append(
+ ConfigTabsBlock(
+ line_start=block_start + 1, # 1-based
+ line_end=block_end + 1, # 1-based
+ ui_content=ui_content,
+ yaml_block=yaml_block,
+ raw_text=block_text,
+ )
+ )
+
+ i = j + 1
+
+ return blocks
+
+
+def get_leaf_paths(
+ data: dict, prefix: tuple[str, ...] = ()
+) -> list[tuple[tuple[str, ...], object]]:
+ """Walk a parsed YAML dict and return all leaf key paths with values.
+
+ Args:
+ data: Parsed YAML dict
+ prefix: Current key path prefix
+
+ Returns:
+ List of (key_path_tuple, value) pairs.
+ e.g., [( ("record", "continuous", "days"), 3 ), ...]
+ """
+ results: list[tuple[tuple[str, ...], object]] = []
+
+ for key, value in data.items():
+ path = prefix + (str(key),)
+ if isinstance(value, dict):
+ results.extend(get_leaf_paths(value, path))
+ else:
+ results.append((path, value))
+
+ return results
diff --git a/docs/src/components/ConfigTabs/index.jsx b/docs/src/components/ConfigTabs/index.jsx
new file mode 100644
index 000000000..0fbc51897
--- /dev/null
+++ b/docs/src/components/ConfigTabs/index.jsx
@@ -0,0 +1,34 @@
+import React, { Children, cloneElement } from "react";
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+export default function ConfigTabs({ children }) {
+ const wrapped = Children.map(children, (child) => {
+ if (child?.props?.value === "ui") {
+ return cloneElement(child, {
+ className: "config-tab-ui",
+ });
+ }
+ if (child?.props?.value === "yaml") {
+ return cloneElement(child, {
+ className: "config-tab-yaml",
+ });
+ }
+ return child;
+ });
+
+ return (
+
+
+ {wrapped}
+
+
+ );
+}
diff --git a/docs/src/components/NavPath/index.jsx b/docs/src/components/NavPath/index.jsx
new file mode 100644
index 000000000..e5ec86bdc
--- /dev/null
+++ b/docs/src/components/NavPath/index.jsx
@@ -0,0 +1,30 @@
+import React from "react";
+
+export default function NavPath({ path }) {
+ const segments = path.split(" > ");
+ return (
+
+ {segments.map((seg, i) => (
+
+ {i > 0 && (
+
+ →
+
+ )}
+ {seg}
+
+ ))}
+
+ );
+}
diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css
index 5d8fc5055..6d9b7c82f 100644
--- a/docs/src/css/custom.css
+++ b/docs/src/css/custom.css
@@ -241,4 +241,50 @@
margin: 0 calc(-1 * var(--ifm-pre-padding));
padding: 0 var(--ifm-pre-padding);
border-left: 3px solid #ff000080;
+}
+
+/* ConfigTabs wrapper */
+.config-tabs-wrapper {
+ border: 1px solid var(--ifm-color-emphasis-300);
+ border-radius: 8px;
+ overflow: hidden;
+ margin-bottom: 16px;
+}
+
+.config-tabs-wrapper .tabs-container {
+ margin-bottom: 0 !important;
+}
+
+.config-tabs-wrapper .tabs {
+ background: var(--ifm-color-emphasis-100);
+ border-bottom: 1px solid var(--ifm-color-emphasis-300);
+ margin-bottom: 0;
+ padding: 0 12px;
+}
+
+.config-tabs-wrapper .tabs__item {
+ padding: 8px 16px;
+ border-radius: 0;
+}
+
+.config-tabs-wrapper .tabs__item--active {
+ border-bottom-color: var(--ifm-color-primary);
+}
+
+.config-tabs-wrapper .config-tab-ui {
+ padding: 4px 16px 16px;
+}
+
+.config-tabs-wrapper .config-tab-ui > :last-child {
+ margin-bottom: 0;
+}
+
+.config-tabs-wrapper div[class*="codeBlockContainer"] {
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+ margin: 0;
+}
+
+.config-tabs-wrapper .tabs-container > .margin-top--md:has(.config-tab-yaml:not([hidden])) {
+ margin-top: 0 !important;
}
\ No newline at end of file