diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 05a75ca5f..81d448f25 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,7 @@ _Please read the [contributing guidelines](https://github.com/blakeblackshear/frigate/blob/dev/CONTRIBUTING.md) before submitting a PR._ ## Proposed change + \s*)?([\s\S]*?)(?=\n## )/ + ); + const proposedContent = proposedChangeMatch + ? proposedChangeMatch[1].trim() + : ''; + if (!proposedContent) { + errors.push( + 'The **Proposed change** section is empty. Please describe what this PR does.' + ); + } + + // Check that at least one "Type of change" checkbox is checked + const typeSection = body.match( + /## Type of change\s*([\s\S]*?)(?=\n## )/ + ); + if (typeSection && !/- \[x\]/i.test(typeSection[1])) { + errors.push( + 'No **Type of change** selected. Please check at least one option.' + ); + } + + // Check that at least one AI disclosure checkbox is checked + const aiSection = body.match( + /## AI disclosure\s*([\s\S]*?)(?=\n## )/ + ); + if (aiSection && !/- \[x\]/i.test(aiSection[1])) { + errors.push( + 'No **AI disclosure** option selected. Please indicate whether AI tools were used.' + ); + } + + // Check that at least one checklist item is checked + const checklistSection = body.match( + /## Checklist\s*([\s\S]*?)$/ + ); + if (checklistSection && !/- \[x\]/i.test(checklistSection[1])) { + errors.push( + 'No **Checklist** items checked. Please review and check the items that apply.' + ); + } + + if (errors.length === 0) { + console.log('PR description passes template validation.'); + return; + } + + const prNumber = context.payload.pull_request.number; + const message = [ + '## PR template validation failed', + '', + 'This PR was automatically closed because the description does not follow the [pull request template](https://github.com/blakeblackshear/frigate/blob/dev/.github/pull_request_template.md).', + '', + '**Issues found:**', + ...errors.map((e) => `- ${e}`), + '', + 'Please update your PR description to include all required sections from the template, then reopen this PR.', + '', + '> If you used an AI tool to generate this PR, please see our [contributing guidelines](https://github.com/blakeblackshear/frigate/blob/dev/CONTRIBUTING.md) for details.', + ].join('\n'); + + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: message, + }); + + await github.rest.pulls.update({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + state: 'closed', + }); + + core.setFailed('PR description does not follow the template.'); diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index bcd3c6bda..d954bdcd5 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -227,16 +227,6 @@ http { include proxy.conf; } - # frontend uses this to fetch the version - location /api/go2rtc/api { - include auth_request.conf; - limit_except GET { - deny all; - } - proxy_pass http://go2rtc/api; - include proxy.conf; - } - # integration uses this to add webrtc candidate location /api/go2rtc/webrtc { include auth_request.conf; diff --git a/docs/.gitignore b/docs/.gitignore index b2d6de306..6e46bafc0 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -7,6 +7,7 @@ # Generated files .docusaurus .cache-loader +docs/integrations/api/ # Misc .DS_Store diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 526207823..e6de72593 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -4,12 +4,29 @@ title: Advanced Options sidebar_label: Advanced Options --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + ### Logging #### Frigate `logger` Change the default log level for troubleshooting purposes. + + + +Navigate to . + +| Field | Description | +| ------------------------- | ------------------------------------------------------- | +| **Logging level** | The default log level for all modules (default: `info`) | +| **Per-process log level** | Override the log level for specific modules | + + + + ```yaml logger: # Optional: default log level (default: shown below) @@ -19,6 +36,9 @@ logger: frigate.mqtt: error ``` + + + Available log levels are: `debug`, `info`, `warning`, `error`, `critical` Examples of available modules are: @@ -48,7 +68,20 @@ This section can be used to set environment variables for those unable to modify Variables prefixed with `FRIGATE_` can be referenced in config fields that support environment variable substitution (such as MQTT host and credentials, camera stream URLs, and ONVIF host and credentials) using the `{FRIGATE_VARIABLE_NAME}` syntax. -Example: + + + +Navigate to to add or edit environment variables. + +| Field | Description | +| --------- | --------------------------------------------------------- | +| **Key** | The environment variable name (e.g., `FRIGATE_MQTT_USER`) | +| **Value** | The value for the variable | + +Variables defined here can be referenced elsewhere in your configuration using the `{FRIGATE_VARIABLE_NAME}` syntax. + + + ```yaml environment_vars: @@ -61,10 +94,27 @@ mqtt: password: "{FRIGATE_MQTT_PASSWORD}" ``` + + + #### TensorFlow Thread Configuration If you encounter thread creation errors during classification model training, you can limit TensorFlow's thread usage: + + + +Navigate to and add the following variables: + +| Variable | Description | +| --------------------------------- | ---------------------------------------------- | +| `TF_INTRA_OP_PARALLELISM_THREADS` | Threads within operations (`0` = use default) | +| `TF_INTER_OP_PARALLELISM_THREADS` | Threads between operations (`0` = use default) | +| `TF_DATASET_THREAD_POOL_SIZE` | Data pipeline threads (`0` = use default) | + + + + ```yaml environment_vars: TF_INTRA_OP_PARALLELISM_THREADS: "2" # Threads within operations (0 = use default) @@ -72,19 +122,35 @@ environment_vars: TF_DATASET_THREAD_POOL_SIZE: "2" # Data pipeline threads (0 = use default) ``` + + + ### `database` Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. -If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary. +If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database if necessary. This may need to be in a custom location if network storage is used for the media folder. + + + +Navigate to . + +- Set **Database path** to the custom path for the Frigate database file (default: `/config/frigate.db`) + + + + ```yaml database: path: /path/to/frigate.db ``` + + + ### `model` If using a custom model, the width and height will need to be specified. @@ -103,6 +169,22 @@ Custom models may also require different input tensor formats. The colorspace co | "nhwc" | | "nchw" | + + + +Navigate to to configure the model path, dimensions, and input format. + +| Field | Description | +| --------------------------------------------- | ------------------------------------ | +| **Custom object detector model path** | Path to the custom model file | +| **Object detection model input width** | Model input width (default: 320) | +| **Object detection model input height** | Model input height (default: 320) | +| **Advanced > Model Input Tensor Shape** | Input tensor shape: `nhwc` or `nchw` | +| **Advanced > Model Input Pixel Color Format** | Pixel format: `rgb`, `bgr`, or `yuv` | + + + + ```yaml # Optional: model config model: @@ -113,6 +195,9 @@ model: input_pixel_format: "bgr" ``` + + + #### `labelmap` :::warning @@ -163,7 +248,15 @@ services: ### Enabling IPv6 -IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows: +IPv6 is disabled by default. Enable it in the Frigate configuration. + + + + +Navigate to and expand **IPv6 configuration**, then enable **Enable IPv6**. + + + ```yaml networking: @@ -171,11 +264,25 @@ networking: enabled: True ``` + + + ### Listen on different ports -You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container. +You can change the ports Nginx uses for listening. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container. -For example: + + + +Navigate to to configure the listen ports. + +| Field | Description | +| ----------------- | --------------------------------------------------------- | +| **Internal port** | The unauthenticated listen address/port (default: `5000`) | +| **External port** | The authenticated listen address/port (default: `8971`) | + + + ```yaml networking: @@ -184,6 +291,9 @@ networking: external: 8971 ``` + + + :::warning This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations. diff --git a/docs/docs/configuration/audio_detectors.md b/docs/docs/configuration/audio_detectors.md index 957667914..bb646e677 100644 --- a/docs/docs/configuration/audio_detectors.md +++ b/docs/docs/configuration/audio_detectors.md @@ -3,6 +3,10 @@ id: audio_detectors title: Audio Detectors --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Frigate provides a builtin audio detector which runs on the CPU. Compared to object detection in images, audio detection is a relatively lightweight operation so the only option is to run the detection on a CPU. ## Configuration @@ -11,7 +15,17 @@ Audio events work by detecting a type of audio and creating an event, the event ### Enabling Audio Events -Audio events can be enabled for all cameras or only for specific cameras. +Audio events can be enabled globally or for specific cameras. + + + + +**Global:** Navigate to and set **Enable audio detection** to on. + +**Per-camera:** Navigate to and set **Enable audio detection** to on for the desired camera. + + + ```yaml @@ -26,6 +40,9 @@ cameras: enabled: True # <- enable audio events for the front_camera ``` + + + If you are using multiple streams then you must set the `audio` role on the stream that is going to be used for audio detection, this can be any stream but the stream must have audio included. :::note @@ -34,6 +51,14 @@ The ffmpeg process for capturing audio will be a separate connection to the came ::: + + + +Navigate to and add an input with the `audio` role pointing to a stream that includes audio. + + + + ```yaml cameras: front_camera: @@ -48,6 +73,9 @@ cameras: - detect ``` + + + ### Configuring Minimum Volume The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that Frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. The Debug view in the Frigate UI has an Audio tab for cameras that have the `audio` role assigned where a graph and the current levels are is displayed. The `min_volume` parameter should be set to the minimum the `RMS` level required to run audio detection. @@ -62,6 +90,17 @@ Volume is considered motion for recordings, this means when the `record -> retai The included audio model has over [500 different types](https://github.com/blakeblackshear/frigate/blob/dev/audio-labelmap.txt) of audio that can be detected, many of which are not practical. By default `bark`, `fire_alarm`, `scream`, `speech`, and `yell` are enabled but these can be customized. + + + +Navigate to . + +- Set **Enable audio detection** to on +- Set **Listen types** to include the audio types you want to detect + + + + ```yaml audio: enabled: True @@ -73,15 +112,32 @@ audio: - yell ``` + + + ### Audio Transcription -Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI’s open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background. +Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background. Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service. #### Configuration -To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features. +To enable transcription, configure it globally and optionally disable for specific cameras. Audio detection must also be enabled as described above. + + + + +**Global:** Navigate to . + +- Set **Enable audio transcription** to on +- Set **Transcription device** to the desired device +- Set **Model size** to the desired size + +**Per-camera:** Navigate to to enable or disable transcription for a specific camera. + + + ```yaml audio_transcription: @@ -100,6 +156,9 @@ cameras: enabled: False ``` + + + :::note Audio detection must be enabled and configured as described above in order to use audio transcription features. @@ -146,7 +205,7 @@ If you have CUDA hardware, you can experiment with the `large` `whisper` model o Any `speech` events in Explore can be transcribed and/or translated through the Transcribe button in the Tracked Object Details pane. -In order to use transcription and translation for past events, you must enable audio detection and define `speech` as an audio type to listen for in your config. To have `speech` events translated into the language of your choice, set the `language` config parameter with the correct [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10). +In order to use transcription and translation for past events, you must enable audio detection and define `speech` as an audio type to listen for. To have `speech` events translated into the language of your choice, set the `language` config parameter with the correct [language code](https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10). The transcribed/translated speech will appear in the description box in the Tracked Object Details pane. If Semantic Search is enabled, embeddings are generated for the transcription text and are fully searchable using the description search type. @@ -162,16 +221,16 @@ Recorded `speech` events will always use a `whisper` model, regardless of the `m 1. Why doesn't Frigate automatically transcribe all `speech` events? - Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That’s a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise. + Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That's a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise. Because transcription is **serialized (one event at a time)** and speech events can be generated far faster than they can be processed, an auto-transcribe toggle would very quickly create an ever-growing backlog and degrade core functionality. For the amount of engineering and risk involved, it adds **very little practical value** for the majority of deployments, which are often on low-powered, edge hardware. - If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control. + If you hear speech that's actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control. Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available. 2. Why don't you save live transcription text and use that for `speech` events? - There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable. + There's no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable. - Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That’s why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event. + Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That's why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event. diff --git a/docs/docs/configuration/authentication.md b/docs/docs/configuration/authentication.md index 694c4bada..0d80d80ce 100644 --- a/docs/docs/configuration/authentication.md +++ b/docs/docs/configuration/authentication.md @@ -3,6 +3,10 @@ id: authentication title: Authentication --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + # Authentication Frigate stores user information in its database. Password hashes are generated using industry standard PBKDF2-SHA256 with 600,000 iterations. Upon successful login, a JWT token is issued with an expiration date and set as a cookie. The cookie is refreshed as needed automatically. This JWT token can also be passed in the Authorization header as a bearer token. @@ -22,13 +26,26 @@ On startup, an admin user and password are generated and printed in the logs. It ## Resetting admin password -In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file. +In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup. + + + + +Navigate to . + +- Set **Reset admin password** to on to reset the admin password and print it in the logs on next startup + + + ```yaml auth: reset_admin_password: true ``` + + + ## Password guidance Constructing secure passwords and managing them properly is important. Frigate requires a minimum length of 12 characters. For guidance on password standards see [NIST SP 800-63B](https://pages.nist.gov/800-63-3/sp800-63b.html). To learn what makes a password truly secure, read this [article](https://medium.com/peerio/how-to-build-a-billion-dollar-password-3d92568d9277). @@ -47,7 +64,20 @@ Restarting Frigate will reset the rate limits. If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated. -If you are running a reverse proxy in the same Docker Compose file as Frigate, here is an example of how your auth config might look: +If you are running a reverse proxy in the same Docker Compose file as Frigate, configure rate limiting and trusted proxies as follows: + + + + +Navigate to . + +| Field | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| **Failed login limits** | Rate limit string for login failures (e.g., `1/second;5/minute;20/hour`) | +| **Trusted proxies** | List of upstream network CIDRs to trust for `X-Forwarded-For` (e.g., `172.18.0.0/16` for internal Docker Compose network) | + + + ```yaml auth: @@ -56,6 +86,9 @@ auth: - 172.18.0.0/16 # <---- this is the subnet for the internal Docker Compose network ``` + + + ## Session Length The default session length for user authentication in Frigate is 24 hours. This setting determines how long a user's authenticated session remains active before a token refresh is required — otherwise, the user will need to log in again. @@ -67,11 +100,24 @@ The default value of `86400` will expire the authentication session after 24 hou - `0`: Setting the session length to 0 will require a user to log in every time they access the application or after a very short, immediate timeout. - `604800`: Setting the session length to 604800 will require a user to log in if the token is not refreshed for 7 days. + + + +Navigate to . + +- Set **Session length** to the duration in seconds before the authentication session expires (default: 86400 / 24 hours) + + + + ```yaml auth: session_length: 86400 ``` + + + ## JWT Token Secret The JWT token secret needs to be kept secure. Anyone with this secret can generate valid JWT tokens to authenticate with Frigate. This should be a cryptographically random string of at least 64 characters. @@ -99,7 +145,18 @@ Frigate can be configured to leverage features of common upstream authentication If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication as there is no correspondence between users in Frigate's database and users authenticated via the proxy. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret. -Here is an example of how to disable Frigate's authentication and also ensure the requests come only from your known proxy. +To disable Frigate's authentication and ensure requests come only from your known proxy: + + + + +1. Navigate to . + - Set **Enable authentication** to off +2. Navigate to . + - Set **Proxy secret** to `` + + + ```yaml auth: @@ -109,6 +166,9 @@ proxy: auth_secret: ``` + + + You can use the following code to generate a random secret. ```shell @@ -119,6 +179,20 @@ python3 -c 'import secrets; print(secrets.token_hex(64))' If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Groups` values. Header names are not case sensitive. Multiple values can be included in the role header. Frigate expects that the character separating the roles is a comma, but this can be specified using the `separator` config entry. + + + +Navigate to and configure the header mapping and separator settings. + +| Field | Description | +| -------------------------------- | ---------------------------------------------------------------------------------------------------- | +| **Separator character** | Character separating multiple roles in the role header (default: comma). Authentik uses a pipe `\|`. | +| **Header mapping > User header** | Header name for the authenticated username (e.g., `x-forwarded-user`) | +| **Header mapping > Role header** | Header name for the authenticated role/groups (e.g., `x-forwarded-groups`) | + + + + ```yaml proxy: ... @@ -128,19 +202,37 @@ proxy: role: x-forwarded-groups ``` + + + Frigate supports `admin`, `viewer`, and custom roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization. A default role can be provided. Any value in the mapped `role` header will override the default. + + + +Navigate to and set the default role. + +| Field | Description | +| ---------------- | ------------------------------------------------------------- | +| **Default role** | Fallback role when no role header is present (e.g., `viewer`) | + + + + ```yaml proxy: ... default_role: viewer ``` + + + ## Role mapping -In some environments, upstream identity providers (OIDC, SAML, LDAP, etc.) do not pass a Frigate-compatible role directly, but instead pass one or more group claims. To handle this, Frigate supports a `role_map` that translates upstream group names into Frigate’s internal roles (`admin`, `viewer`, or custom). +In some environments, upstream identity providers (OIDC, SAML, LDAP, etc.) do not pass a Frigate-compatible role directly, but instead pass one or more group claims. To handle this, Frigate supports a `role_map` that translates upstream group names into Frigate's internal roles (`admin`, `viewer`, or custom). This is configurable via YAML in the configuration file: ```yaml proxy: @@ -175,7 +267,7 @@ In this example: **Authenticated Port (8971)** - Header mapping is **fully supported**. -- The `remote-role` header determines the user’s privileges: +- The `remote-role` header determines the user's privileges: - **admin** → Full access (user management, configuration changes). - **viewer** → Read-only access. - **Custom roles** → Read-only access limited to the cameras defined in `auth.roles[role]`. @@ -232,6 +324,14 @@ The viewer role provides read-only access to all cameras in the UI and API. Cust ### Role Configuration Example + + + +Navigate to to define custom roles and assign which cameras each role can access. + + + + ```yaml {11-16} cameras: front_door: @@ -251,13 +351,16 @@ auth: - side_yard ``` + + + If you want to provide access to all cameras to a specific user, just use the **viewer** role. ### Managing User Roles 1. Log in as an **admin** user via port `8971` (preferred), or unauthenticated via port `5000`. 2. Navigate to **Settings**. -3. In the **Users** section, edit a user’s role by selecting from available roles (admin, viewer, or custom). +3. In the **Users** section, edit a user's role by selecting from available roles (admin, viewer, or custom). 4. In the **Roles** section, add/edit/delete custom roles (select cameras via switches). Deleting a role auto-reassigns users to "viewer". ### Role Enforcement @@ -277,7 +380,7 @@ To use role-based access control, you must connect to Frigate via the **authenti 1. Log in as an **admin** user via port `8971`. 2. Navigate to **Settings > Users**. -3. Edit a user’s role by selecting **admin** or **viewer**. +3. Edit a user's role by selecting **admin** or **viewer**. ## API Authentication Guide diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md index 86179a264..27312eaa9 100644 --- a/docs/docs/configuration/autotracking.md +++ b/docs/docs/configuration/autotracking.md @@ -3,6 +3,10 @@ id: autotracking title: Camera Autotracking --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + An ONVIF-capable, PTZ (pan-tilt-zoom) camera that supports relative movement within the field of view (FOV) can be configured to automatically track moving objects and keep them in the center of the frame. ![Autotracking example with zooming](/img/frigate-autotracking-example.gif) @@ -29,12 +33,44 @@ A growing list of cameras and brands that have been reported by users to work wi First, set up a PTZ preset in your camera's firmware and give it a name. If you're unsure how to do this, consult the documentation for your camera manufacturer's firmware. Some tutorials for common brands: [Amcrest](https://www.youtube.com/watch?v=lJlE9-krmrM), [Reolink](https://www.youtube.com/watch?v=VAnxHUY5i5w), [Dahua](https://www.youtube.com/watch?v=7sNbc5U-k54). -Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. +Configure the ONVIF connection and autotracking parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. An [ONVIF connection](cameras.md) is required for autotracking to function. Also, a [motion mask](masks.md) over your camera's timestamp and any overlay text is recommended to ensure they are completely excluded from scene change calculations when the camera is moving. Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT. + + + +Navigate to for the desired camera. + +**ONVIF Connection** + +| Field | Description | +| ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **ONVIF host** | Host of the camera being connected to. HTTP is assumed by default; prefix with `https://` for HTTPS. | +| **ONVIF port** | ONVIF port for device (default: 8000) | +| **ONVIF username** | Username for login. Some devices require admin to access ONVIF. | +| **ONVIF password** | Password for login | +| **Disable TLS verify** | Skip TLS verification and disable digest auth for ONVIF (default: false) | +| **ONVIF profile** | ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically. | + +**Autotracking** + +| Field | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| **Enable Autotracking** | Enable or disable object autotracking (default: false) | +| **Calibrate on start** | Calibrate the camera on startup by measuring PTZ motor speed (default: false) | +| **Zoom mode** | Zoom mode during autotracking: `disabled`, `absolute`, or `relative` (default: disabled) | +| **Zoom Factor** | Controls zoom behavior on tracked objects, between 0.1 and 0.75. Lower keeps more scene visible; higher zooms in more (default: 0.3) | +| **Tracked objects** | List of object types to track (default: person) | +| **Required Zones** | Zones an object must enter to begin autotracking | +| **Return Preset** | Name of ONVIF preset in camera firmware to return to when tracking ends (default: home) | +| **Return timeout** | Seconds to delay before returning to preset (default: 10) | + + + + ```yaml cameras: ptzcamera: @@ -52,6 +88,10 @@ cameras: password: admin # Optional: Skip TLS verification from the ONVIF server (default: shown below) tls_insecure: False + # Optional: ONVIF media profile to use for PTZ control, matched by token or name. (default: shown below) + # If not set, the first profile with valid PTZ configuration is selected automatically. + # Use this when your camera has multiple ONVIF profiles and you need to select a specific one. + profile: None # Optional: PTZ camera object autotracking. Keeps a moving object in # the center of the frame by automatically moving the PTZ camera. autotracking: @@ -88,13 +128,16 @@ cameras: movement_weights: [] ``` + + + ## Calibration PTZ motors operate at different speeds. Performing a calibration will direct Frigate to measure this speed over a variety of movements and use those measurements to better predict the amount of movement necessary to keep autotracked objects in the center of the frame. Calibration is optional, but will greatly assist Frigate in autotracking objects that move across the camera's field of view more quickly. -To begin calibration, set the `calibrate_on_startup` for your camera to `True` and restart Frigate. Frigate will then make a series of small and large movements with your camera. Don't move the PTZ manually while calibration is in progress. Once complete, camera motion will stop and your config file will be automatically updated with a `movement_weights` parameter to be used in movement calculations. You should not modify this parameter manually. +To begin calibration, set `calibrate_on_startup` for your camera to `True` and restart Frigate. Frigate will then make a series of small and large movements with your camera. Don't move the PTZ manually while calibration is in progress. Once complete, camera motion will stop and your config file will be automatically updated with a `movement_weights` parameter to be used in movement calculations. You should not modify this parameter manually. After calibration has ended, your PTZ will be moved to the preset specified by `return_preset`. diff --git a/docs/docs/configuration/bird_classification.md b/docs/docs/configuration/bird_classification.md index 398729290..75c0b8306 100644 --- a/docs/docs/configuration/bird_classification.md +++ b/docs/docs/configuration/bird_classification.md @@ -3,6 +3,10 @@ id: bird_classification title: Bird Classification --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. ## Minimum System Requirements @@ -15,7 +19,18 @@ The classification model used is the MobileNet INat Bird Classification, [availa ## Configuration -Bird classification is disabled by default, it must be enabled in your config file before it can be used. Bird classification is a global configuration setting. +Bird classification is disabled by default and must be enabled before it can be used. Bird classification is a global configuration setting. + + + + +Navigate to . + +- Set **Bird classification config > Bird classification** to on +- Set **Bird classification config > Minimum score** to the desired confidence score (default: 0.9) + + + ```yaml classification: @@ -23,6 +38,9 @@ classification: enabled: true ``` + + + ## Advanced Configuration Fine-tune bird classification with these optional parameters: diff --git a/docs/docs/configuration/birdseye.md b/docs/docs/configuration/birdseye.md index f48299aec..810449478 100644 --- a/docs/docs/configuration/birdseye.md +++ b/docs/docs/configuration/birdseye.md @@ -1,5 +1,9 @@ # Birdseye +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about. Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras. @@ -22,7 +26,22 @@ A custom icon can be added to the birdseye background by providing a 180x180 ima ### Birdseye view override at camera level -If you want to include a camera in Birdseye view only for specific circumstances, or just don't include it at all, the Birdseye setting can be set at the camera level. +To include a camera in Birdseye view only for specific circumstances, or exclude it entirely, configure Birdseye at the camera level. + + + + +**Global settings:** Navigate to to configure the default Birdseye behavior for all cameras. + +**Per-camera overrides:** Navigate to to override the mode or disable Birdseye for a specific camera. + +| Field | Description | +|-------|-------------| +| **Enable Birdseye** | Whether this camera appears in Birdseye view | +| **Tracking mode** | When to show the camera: `continuous`, `motion`, or `objects` | + + + ```yaml {8-10,12-14} # Include all cameras by default in Birdseye view @@ -41,9 +60,24 @@ cameras: enabled: False ``` + + + ### Birdseye Inactivity -By default birdseye shows all cameras that have had the configured activity in the last 30 seconds, this can be configured: +By default birdseye shows all cameras that have had the configured activity in the last 30 seconds. This threshold can be configured. + + + + +Navigate to . + +| Field | Description | +|-------|-------------| +| **Inactivity threshold** | Seconds of inactivity before a camera is hidden from Birdseye (default: 30) | + + + ```yaml birdseye: @@ -52,12 +86,28 @@ birdseye: inactivity_threshold: 15 ``` + + + ## Birdseye Layout ### Birdseye Dimensions The resolution and aspect ratio of birdseye can be configured. Resolution will increase the quality but does not affect the layout. Changing the aspect ratio of birdseye does affect how cameras are laid out. + + + +Navigate to . + +| Field | Description | +|-------|-------------| +| **Width** | Birdseye output width in pixels (default: 1280) | +| **Height** | Birdseye output height in pixels (default: 720) | + + + + ```yaml birdseye: enabled: True @@ -65,10 +115,20 @@ birdseye: height: 720 ``` + + + ### Sorting cameras in the Birdseye view -It is possible to override the order of cameras that are being shown in the Birdseye view. -The order needs to be set at the camera level. +It is possible to override the order of cameras that are being shown in the Birdseye view. The order is set at the camera level. + + + + +Navigate to for each camera and set the **Position** field to control the display order. + + + ```yaml # Include all cameras by default in Birdseye view @@ -87,13 +147,26 @@ cameras: order: 2 ``` + + + _Note_: Cameras are sorted by default using their name to ensure a constant view inside Birdseye. ### Birdseye Cameras It is possible to limit the number of cameras shown on birdseye at one time. When this is enabled, birdseye will show the cameras with most recent activity. There is a cooldown to ensure that cameras do not switch too frequently. -For example, this can be configured to only show the most recently active camera. + + + +Navigate to . + +| Field | Description | +|-------|-------------| +| **Layout > Max cameras** | Maximum number of cameras shown at once (e.g., `1` for only the most active camera) | + + + ```yaml {3-4} birdseye: @@ -102,13 +175,31 @@ birdseye: max_cameras: 1 ``` + + + ### Birdseye Scaling By default birdseye tries to fit 2 cameras in each row and then double in size until a suitable layout is found. The scaling can be configured with a value between 1.0 and 5.0 depending on use case. + + + +Navigate to . + +| Field | Description | +|-------|-------------| +| **Layout > Scaling factor** | Camera scaling factor between 1.0 and 5.0 (default: 2.0) | + + + + ```yaml {3-4} birdseye: enabled: True layout: scaling_factor: 3.0 ``` + + + diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index eed430b52..8094c9f1c 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -3,6 +3,10 @@ id: cameras title: Camera Configuration --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + ## Setting Up Camera Inputs Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa. @@ -17,6 +21,25 @@ Each role can only be assigned to one input per camera. The options for roles ar | `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | | `audio` | Feed for audio based detection. [docs](audio_detectors.md) | + + + +Navigate to . + +| Field | Description | +| ----------------- | ------------------------------------------------------------------- | +| **Camera inputs** | List of input stream definitions (paths and roles) for this camera. | + +Navigate to . + +| Field | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------ | +| **Detect width** | Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution. | +| **Detect height** | Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution. | + + + + ```yaml mqtt: host: mqtt.server.com @@ -36,7 +59,18 @@ cameras: height: 720 # <- optional, by default Frigate tries to automatically detect resolution ``` -Additional cameras are simply added to the config under the `cameras` entry. + + + +Additional cameras are simply added under the camera configuration section. + + + + +Navigate to and use the add camera button to configure each additional camera. + + + ```yaml mqtt: ... @@ -46,6 +80,9 @@ cameras: side: ... ``` + + + :::note If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section. @@ -64,7 +101,19 @@ Not every PTZ supports ONVIF, which is the standard protocol Frigate uses to com ::: -Add the onvif section to your camera in your configuration file: +Configure the ONVIF connection for your camera to enable PTZ controls. + + + + +1. Navigate to and select your camera. + - Set **ONVIF host** to your camera's IP address, e.g.: `10.0.10.10` + - Set **ONVIF port** to your camera's ONVIF port, e.g.: `8000` + - Set **ONVIF username** to your camera's ONVIF username, e.g.: `admin` + - Set **ONVIF password** to your camera's ONVIF password, e.g.: `password` + + + ```yaml {4-8} cameras: @@ -77,6 +126,9 @@ cameras: password: password ``` + + + If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI. :::note @@ -91,6 +143,8 @@ If your ONVIF camera does not require authentication credentials, you may still ::: +If your camera has multiple ONVIF profiles, you can specify which one to use for PTZ control with the `profile` option, matched by token or name. When not set, Frigate selects the first profile with a valid PTZ configuration. Check the Frigate debug logs (`frigate.ptz.onvif: debug`) to see available profile names and tokens for your camera. + An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs. ## ONVIF PTZ camera recommendations @@ -128,13 +182,15 @@ The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.or ## Setting up camera groups -:::tip +Camera groups let you organize cameras together with a shared name and icon, making it easier to review and filter them. A default group for all cameras is always available. -It is recommended to set up camera groups using the UI. + + -::: +On the Live dashboard, press the **+** icon in the main navigation to add a new camera group. Configure the group name, select which cameras to include, choose an icon, and set the display order. -Cameras can be grouped together and assigned a name and icon, this allows them to be reviewed and filtered together. There will always be the default group for all cameras. + + ```yaml camera_groups: @@ -146,6 +202,9 @@ camera_groups: order: 0 ``` + + + ## Two-Way Audio See the guide [here](/configuration/live/#two-way-talk) diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md index caf05d8f3..6e68d4ba9 100644 --- a/docs/docs/configuration/custom_classification/object_classification.md +++ b/docs/docs/configuration/custom_classification/object_classification.md @@ -3,13 +3,17 @@ id: object_classification title: Object Classification --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API. ## Minimum System Requirements Object classification models are lightweight and run very fast on CPU. -Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer. +Training the model does briefly use a high amount of system resources for about 1-3 minutes per training run. On lower-power devices, training may take longer. A CPU with AVX + AVX2 instructions is required for training and inference. @@ -27,7 +31,7 @@ For object classification: ### Classification Type - **Sub label**: - - Applied to the object’s `sub_label` field. + - Applied to the object's `sub_label` field. - Ideal for a single, more specific identity or type. - Example: `cat` → `Leo`, `Charlie`, `None`. @@ -55,7 +59,7 @@ This two-step verification prevents false positives by requiring consistent pred ### Sub label -- **Known pet vs unknown**: For `dog` objects, set sub label to your pet’s name (e.g., `buddy`) or `none` for others. +- **Known pet vs unknown**: For `dog` objects, set sub label to your pet's name (e.g., `buddy`) or `none` for others. - **Mail truck vs normal car**: For `car`, classify as `mail_truck` vs `car` to filter important arrivals. - **Delivery vs non-delivery person**: For `person`, classify `delivery` vs `visitor` based on uniform/props. @@ -68,7 +72,27 @@ This two-step verification prevents false positives by requiring consistent pred ## Configuration -Object classification is configured as a custom classification model. Each model has its own name and settings. You must list which object labels should be classified. +Object classification is configured as a custom classification model. Each model has its own name and settings. Specify which object labels should be classified. + + + + +Navigate to the **Classification** page from the main navigation sidebar, then click **Add Classification**. + +In the **Create New Classification** dialog: + +| Field | Description | +| ----------------------- | ------------------------------------------------------------- | +| **Name** | A name for your classification model (e.g., `dog`) | +| **Type** | Select **Object** for object classification | +| **Object Label** | The object label to classify (e.g., `dog`, `person`, `car`) | +| **Classification Type** | Whether to assign results as a **Sub Label** or **Attribute** | +| **Classes** | The class names the model will learn to distinguish between | + +The `threshold` (default: `0.8`) can be adjusted in the YAML configuration. + + + ```yaml classification: @@ -82,6 +106,9 @@ classification: An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For object classification models, the default is 200. + + + ## Training the model Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps: @@ -102,9 +129,20 @@ If examples for some of your classes do not appear in the grid, you can continue ### Improving the Model +:::tip Diversity matters far more than volume + +Selecting dozens of nearly identical images is one of the fastest ways to degrade model performance. MobileNetV2 can overfit quickly when trained on homogeneous data — the model learns what _that exact moment_ looked like rather than what actually defines the class. **This is why Frigate does not implement bulk training in the UI.** + +For more detail, see [Frigate Tip: Best Practices for Training Face and Custom Classification Models](https://github.com/blakeblackshear/frigate/discussions/21374). + +::: + +- **Start small and iterate**: Begin with a small, representative set of images per class. Models often begin working well with surprisingly few examples and improve naturally over time. +- **Favor hard examples**: When images appear in the Recent Classifications tab, prioritize images scoring below 90-100% or those captured under new lighting, weather, or distance conditions. +- **Avoid bulk training similar images**: Training large batches of images that already score 100% (or close) adds little new information and increases the risk of overfitting. +- **The wizard is just the starting point**: You don't need to find and label every class upfront. Missing classes will naturally appear in Recent Classifications, and those images tend to be more valuable because they represent new conditions and edge cases. - **Problem framing**: Keep classes visually distinct and relevant to the chosen object types. -- **Data collection**: Use the model’s Recent Classification tab to gather balanced examples across times of day, weather, and distances. -- **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered. +- **Preprocessing**: Ensure examples reflect object crops similar to Frigate's boxes; keep the subject centered. - **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels. - **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation. @@ -114,6 +152,17 @@ To troubleshoot issues with object classification models, enable debug logging t Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change. + + + +Navigate to . + +- Set **Logging level** to `debug` +- Set **Per-process log level > Frigate.Data Processing.Real Time.Custom Classification** to `debug` for verbose classification logging + + + + ```yaml logger: default: info @@ -122,6 +171,9 @@ logger: frigate.data_processing.real_time.custom_classification: debug ``` + + + The debug logs will show: - Classification probabilities for each attempt diff --git a/docs/docs/configuration/custom_classification/state_classification.md b/docs/docs/configuration/custom_classification/state_classification.md index c41d05439..688b8bb0d 100644 --- a/docs/docs/configuration/custom_classification/state_classification.md +++ b/docs/docs/configuration/custom_classification/state_classification.md @@ -3,13 +3,17 @@ id: state_classification title: State Classification --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate//classification/` MQTT topic and in Home Assistant sensors via the official Frigate integration. ## Minimum System Requirements State classification models are lightweight and run very fast on CPU. -Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer. +Training the model does briefly use a high amount of system resources for about 1-3 minutes per training run. On lower-power devices, training may take longer. A CPU with AVX + AVX2 instructions is required for training and inference. @@ -33,7 +37,25 @@ For state classification: ## Configuration -State classification is configured as a custom classification model. Each model has its own name and settings. You must provide at least one camera crop under `state_config.cameras`. +State classification is configured as a custom classification model. Each model has its own name and settings. Provide at least one camera crop under `state_config.cameras`. + + + + +Navigate to the **Classification** page from the main navigation sidebar, select the **States** tab, then click **Add Classification**. + +In the **Create New Classification** dialog: + +| Field | Description | +| ----------- | ------------------------------------------------------------------------------------ | +| **Name** | A name for your state classification model (e.g., `front_door`) | +| **Type** | Select **State** for state classification | +| **Classes** | The state names the model will learn to distinguish between (e.g., `open`, `closed`) | + +After creating the model, the wizard will guide you through selecting the camera crop area and assigning training examples. The `threshold` (default: `0.8`), `motion`, and `interval` settings can be adjusted in the YAML configuration. + + + ```yaml classification: @@ -50,6 +72,9 @@ classification: An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For state classification models, the default is 100. + + + ## Training the model Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps: @@ -70,10 +95,21 @@ Once some images are assigned, training will begin automatically. ### Improving the Model +:::tip Diversity matters far more than volume + +Selecting dozens of nearly identical images is one of the fastest ways to degrade model performance. MobileNetV2 can overfit quickly when trained on homogeneous data — the model learns what _that exact moment_ looked like rather than what actually defines the state. This often leads to models that work perfectly under the original conditions but become unstable when day turns to night, weather changes, or seasonal lighting shifts. **This is why Frigate does not implement bulk training in the UI.** + +For more detail, see [Frigate Tip: Best Practices for Training Face and Custom Classification Models](https://github.com/blakeblackshear/frigate/discussions/21374). + +::: + +- **Start small and iterate**: Begin with a small, representative set of images per class. Models often begin working well with surprisingly few examples and improve naturally over time. - **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary. - **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather. - **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently. -- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting. +- **Favor hard examples**: When images appear in the Recent Classifications tab, prioritize images scoring below 90-100% or those captured under new conditions (e.g., first snow of the year, seasonal changes, objects temporarily in view, insects at night). These represent scenarios different from the default state and help prevent overfitting. +- **Avoid bulk training similar images**: Training large batches of images that already score 100% (or close) adds little new information and increases the risk of overfitting. +- **The wizard is just the starting point**: You don't need to find and label every state upfront. Missing states will naturally appear in Recent Classifications, and those images tend to be more valuable because they represent new conditions and edge cases. ## Debugging Classification Models @@ -81,6 +117,17 @@ To troubleshoot issues with state classification models, enable debug logging to Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change. + + + +Navigate to . + +- Set **Logging level** to `debug` +- Set **Per-process log level > `frigate.data_processing.real_time.custom_classification`** to `debug` for verbose classification logging + + + + ```yaml logger: default: info @@ -89,6 +136,9 @@ logger: frigate.data_processing.real_time.custom_classification: debug ``` + + + The debug logs will show: - Classification probabilities for each attempt diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index c44f76dea..7c23884cc 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -3,6 +3,10 @@ id: face_recognition title: Face Recognition --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. ## Model Requirements @@ -40,50 +44,101 @@ The `large` model is optimized for accuracy, an integrated or discrete GPU / NPU ## Configuration -Face recognition is disabled by default, face recognition must be enabled in the UI or in your config file before it can be used. Face recognition is a global configuration setting. +Face recognition is disabled by default and must be enabled before it can be used. Face recognition is a global configuration setting. + + + + +Navigate to . + +- Set **Enable face recognition** to on + + + ```yaml face_recognition: enabled: true ``` + + + Like the other real-time processors in Frigate, face recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements. ## Advanced Configuration -Fine-tune face recognition with these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled` and `min_area`. +Fine-tune face recognition with these optional parameters. The only optional parameters that can be set at the camera level are `enabled` and `min_area`. ### Detection -- `detection_threshold`: Face detection confidence score required before recognition runs: + + + +Navigate to . + +- **Detection threshold**: Face detection confidence score required before recognition runs. This field only applies to the standalone face detection model; `min_score` should be used to filter for models that have face detection built in. - Default: `0.7` - - Note: This is field only applies to the standalone face detection model, `min_score` should be used to filter for models that have face detection built in. -- `min_area`: Defines the minimum size (in pixels) a face must be before recognition runs. - - Default: `500` pixels. - - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant faces. +- **Minimum face area**: Minimum size (in pixels) a face must be before recognition runs. Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant faces. + - Default: `500` pixels + + + + +```yaml +face_recognition: + enabled: true + detection_threshold: 0.7 + min_area: 500 +``` + + + ### Recognition -- `model_size`: Which model size to use, options are `small` or `large` -- `unknown_score`: Min score to mark a person as a potential match, matches at or below this will be marked as unknown. - - Default: `0.8`. -- `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label. - - Default: `0.9`. -- `min_faces`: Min face recognitions for the sub label to be applied to the person object. + + + +Navigate to . + +- **Model size**: Which model size to use, options are `small` or `large`. +- **Unknown score threshold**: Min score to mark a person as a potential match; matches at or below this will be marked as unknown. + - Default: `0.8` +- **Recognition threshold**: Recognition confidence score required to add the face to the object as a sub label. + - Default: `0.9` +- **Minimum faces**: Min face recognitions for the sub label to be applied to the person object. - Default: `1` -- `save_attempts`: Number of images of recognized faces to save for training. - - Default: `200`. -- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. - - Default: `True`. -- `device`: Target a specific device to run the face recognition model on (multi-GPU installation). - - Default: `None`. - - Note: This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/) +- **Save attempts**: Number of images of recognized faces to save for training. + - Default: `200` +- **Blur confidence filter**: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. + - Default: `True` +- **Device**: Target a specific device to run the face recognition model on (multi-GPU installation). This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/). + - Default: `None` + + + + +```yaml +face_recognition: + enabled: true + model_size: small + unknown_score: 0.8 + recognition_threshold: 0.9 + min_faces: 1 + save_attempts: 200 + blur_confidence_filter: true + device: None +``` + + + ## Usage Follow these steps to begin: -1. **Enable face recognition** in your configuration file and restart Frigate. +1. **Enable face recognition** in your configuration and restart Frigate. 2. **Upload one face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI. Read below for the best practices on expanding your training set. 3. When Frigate detects and attempts to recognize a face, it will appear in the **Train** tab of the Face Library, along with its associated recognition confidence. 4. From the **Train** tab, you can **assign the face** to a new or existing person to improve recognition accuracy for the future. diff --git a/docs/docs/configuration/ffmpeg_presets.md b/docs/docs/configuration/ffmpeg_presets.md index 8bba62e36..333388280 100644 --- a/docs/docs/configuration/ffmpeg_presets.md +++ b/docs/docs/configuration/ffmpeg_presets.md @@ -3,6 +3,10 @@ id: ffmpeg_presets title: FFmpeg presets --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Some presets of FFmpeg args are provided by default to make the configuration easier. All presets can be seen in [this file](https://github.com/blakeblackshear/frigate/blob/master/frigate/ffmpeg_presets.py). ### Hwaccel Presets @@ -21,7 +25,31 @@ See [the hwaccel docs](/configuration/hardware_acceleration_video.md) for more i | preset-nvidia | Nvidia GPU | | | preset-jetson-h264 | Nvidia Jetson with h264 stream | | | preset-jetson-h265 | Nvidia Jetson with h265 stream | | -| preset-rkmpp | Rockchip MPP | Use image with \*-rk suffix and privileged mode | +| preset-rkmpp | Rockchip MPP | Use image with \*-rk suffix and privileged mode | + +Select the appropriate hwaccel preset for your hardware. + + + + +1. Navigate to and set **Hardware acceleration arguments** to the appropriate preset for your hardware. +2. To override for a specific camera, navigate to and set **Hardware acceleration arguments** for that camera. + + + + +```yaml +ffmpeg: + hwaccel_args: preset-vaapi + +cameras: + front_door: + ffmpeg: + hwaccel_args: preset-nvidia +``` + + + ### Input Args Presets @@ -72,7 +100,7 @@ Output args presets help make the config more readable and handle use cases for | Preset | Usage | Other Notes | | -------------------------------- | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| preset-record-generic | Record WITHOUT audio | If your camera doesn’t have audio, or if you don’t want to record audio, use this option | +| preset-record-generic | Record WITHOUT audio | If your camera doesn't have audio, or if you don't want to record audio, use this option | | preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings | | preset-record-generic-audio-aac | Record WITH transcoded aac audio | This is the default when no option is specified. Use it to transcode audio to AAC. If the source is already in AAC format, use preset-record-generic-audio-copy instead to avoid unnecessary re-encoding | | preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead | diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index 4026158b7..e39f88a02 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -3,6 +3,10 @@ id: genai_config title: Configuring Generative AI --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + ## Configuration A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI-Compatible section below. @@ -25,11 +29,11 @@ You must use a vision-capable model with Frigate. The following models are recom | Model | Notes | | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `qwen3-vl` | Strong visual and situational understanding, strong ability to identify smaller objects and interactions with object. | +| `qwen3-vl` | Strong visual and situational understanding, enhanced ability to identify smaller objects and interactions with object. | | `qwen3.5` | Strong situational understanding, but missing DeepStack from qwen3-vl leading to worse performance for identifying objects in people's hand and other small details. | +| `gemma4` | Strong situational understanding, sometimes resorts to more vague terms like 'interacts' instead of assigning a specific action. | | `Intern3.5VL` | Relatively fast with good vision comprehension | | `gemma3` | Slower model with good vision and temporal understanding | -| `qwen2.5-vl` | Fast but capable model with good vision comprehension | :::info @@ -69,6 +73,18 @@ You must use a vision capable model with Frigate. The llama.cpp server supports All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters. + + + +1. Navigate to . + - Set **Provider** to `llamacpp` + - Set **Base URL** to your llama.cpp server address (e.g., `http://localhost:8080`) + - Set **Model** to the name of your model + - Under **Provider Options**, set `context_size` to tell Frigate your context size so it can send the appropriate amount of information + + + + ```yaml genai: provider: llamacpp @@ -78,6 +94,9 @@ genai: context_size: 16000 # Tell Frigate your context size so it can send the appropriate amount of information. ``` + + + ### Ollama [Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. @@ -96,6 +115,18 @@ Note that Frigate will not automatically download the model you specify in your #### Configuration + + + +1. Navigate to . + - Set **Provider** to `ollama` + - Set **Base URL** to your Ollama server address (e.g., `http://localhost:11434`) + - Set **Model** to the model tag (e.g., `qwen3-vl:4b`) + - Under **Provider Options**, set `keep_alive` (e.g., `-1`) and `options.num_ctx` to match your desired context size + + + + ```yaml genai: provider: ollama @@ -107,6 +138,9 @@ genai: num_ctx: 8192 # make sure the context matches other services that are using ollama ``` + + + ### OpenAI-Compatible Frigate supports any provider that implements the OpenAI API standard. This includes self-hosted solutions like [vLLM](https://docs.vllm.ai/), [LocalAI](https://localai.io/), and other OpenAI-compatible servers. @@ -130,6 +164,18 @@ This ensures Frigate uses the correct context window size when generating prompt #### Configuration + + + +1. Navigate to . + - Set **Provider** to `openai` + - Set **Base URL** to your server address (e.g., `http://your-server:port`) + - Set **API key** if required by your server + - Set **Model** to the model name + + + + ```yaml genai: provider: openai @@ -138,6 +184,9 @@ genai: model: your-model-name ``` + + + To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL. ## Cloud Providers @@ -150,6 +199,17 @@ Ollama also supports [cloud models](https://ollama.com/cloud), where your local #### Configuration + + + +1. Navigate to . + - Set **Provider** to `ollama` + - Set **Base URL** to your local Ollama address (e.g., `http://localhost:11434`) + - Set **Model** to the cloud model name + + + + ```yaml genai: provider: ollama @@ -157,6 +217,9 @@ genai: model: cloud-model-name ``` + + + ### Google Gemini Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation. @@ -176,6 +239,17 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt #### Configuration + + + +1. Navigate to . + - Set **Provider** to `gemini` + - Set **API key** to your Gemini API key (or use an environment variable such as `{FRIGATE_GEMINI_API_KEY}`) + - Set **Model** to the desired model (e.g., `gemini-2.5-flash`) + + + + ```yaml genai: provider: gemini @@ -183,6 +257,9 @@ genai: model: gemini-2.5-flash ``` + + + :::note To use a different Gemini-compatible API endpoint, set the `provider_options` with the `base_url` key to your provider's API URL. For example: @@ -213,6 +290,17 @@ To start using OpenAI, you must first [create an API key](https://platform.opena #### Configuration + + + +1. Navigate to . + - Set **Provider** to `openai` + - Set **API key** to your OpenAI API key (or use an environment variable such as `{FRIGATE_OPENAI_API_KEY}`) + - Set **Model** to the desired model (e.g., `gpt-4o`) + + + + ```yaml genai: provider: openai @@ -220,6 +308,9 @@ genai: model: gpt-4o ``` + + + :::note To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL. @@ -257,6 +348,18 @@ To start using Azure OpenAI, you must first [create a resource](https://learn.mi #### Configuration + + + +1. Navigate to . + - Set **Provider** to `azure_openai` + - Set **Base URL** to your Azure resource URL including the `api-version` parameter (e.g., `https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview`) + - Set **Model** to your deployed model name (e.g., `gpt-5-mini`) + - Set **API key** to your Azure OpenAI API key (or use an environment variable such as `{FRIGATE_OPENAI_API_KEY}`) + + + + ```yaml genai: provider: azure_openai @@ -264,3 +367,6 @@ genai: model: gpt-5-mini api_key: "{FRIGATE_OPENAI_API_KEY}" ``` + + + diff --git a/docs/docs/configuration/genai/objects.md b/docs/docs/configuration/genai/objects.md index 3ed826d21..eb8dadef5 100644 --- a/docs/docs/configuration/genai/objects.md +++ b/docs/docs/configuration/genai/objects.md @@ -3,6 +3,10 @@ id: genai_objects title: Object Descriptions --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail. Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle, or can optionally be sent earlier after a number of significantly changed frames, for example in use in more real-time notifications. Descriptions can also be regenerated manually via the Frigate UI. Note that if you are manually entering a description for tracked objects prior to its end, this will be overwritten by the generated response. @@ -15,9 +19,9 @@ Generative AI object descriptions can also be toggled dynamically for a camera v ## Usage and Best Practices -Frigate's thumbnail search excels at identifying specific details about tracked objects – for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate’s default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance. +Frigate's thumbnail search excels at identifying specific details about tracked objects -- for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate's default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance. -While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context. +While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate's default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what's happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they're moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation's context. ## Custom Prompts @@ -33,7 +37,18 @@ Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` t ::: -You are also able to define custom prompts in your configuration. +You can define custom prompts at the global level and per-object type. To configure custom prompts: + + + + +1. Navigate to . + - Expand the **GenAI object config** section + - Set **Caption prompt** to your custom prompt text + - Under **Object prompts**, add entries keyed by object type (e.g., `person`, `car`) with custom prompts for each + + + ```yaml genai: @@ -49,7 +64,25 @@ objects: car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company." ``` -Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. + + + +Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera. To configure camera-level overrides: + + + + +1. Navigate to for the desired camera. + - Expand the **GenAI object config** section + - Set **Enable GenAI** to on + - Set **Use snapshots** to on if desired + - Set **Caption prompt** to a camera-specific prompt + - Under **Object prompts**, add entries keyed by object type with camera-specific prompts + - Set **GenAI objects** to the list of object types that should receive descriptions (e.g., `person`, `cat`) + - Set **Required zones** to limit descriptions to objects in specific zones (e.g., `steps`) + + + ```yaml cameras: @@ -69,6 +102,9 @@ cameras: - steps ``` + + + ### Experiment with prompts Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate. diff --git a/docs/docs/configuration/genai/review_summaries.md b/docs/docs/configuration/genai/review_summaries.md index 8045f5aa3..e492a4893 100644 --- a/docs/docs/configuration/genai/review_summaries.md +++ b/docs/docs/configuration/genai/review_summaries.md @@ -3,6 +3,10 @@ id: genai_review title: Review Summaries --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Generative AI can be used to automatically generate structured summaries of review items. These summaries will show up in Frigate's native notifications as well as in the UI. Generative AI can also be used to take a collection of summaries over a period of time and provide a report, which may be useful to get a quick report of everything that happened while out for some amount of time. Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well. @@ -28,6 +32,30 @@ This will show in multiple places in the UI to give additional context about eac Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response. +To configure the activity context prompt: + + + + +Navigate to . + +- Set **GenAI config > Activity context prompt** to your custom activity context text + + + + +```yaml +review: + genai: + activity_context_prompt: | + ### Normal Activity Indicators (Level 0) + - Known/verified people in any zone at any time + ... +``` + + + +
Default Activity Context Prompt @@ -74,7 +102,18 @@ review: ### Image Source -By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution: +By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, configure Frigate to extract frames directly from recordings at a higher resolution. + + + + +Navigate to . + +- Set **GenAI config > Enable GenAI descriptions** to on +- Set **GenAI config > Review image source** to `recordings` (default is `preview`) + + + ```yaml review: @@ -84,6 +123,9 @@ review: image_source: recordings # Options: "preview" (default) or "recordings" ``` + + + When using `recordings`, frames are extracted at 480px height while maintaining the camera's original aspect ratio, providing better detail for the LLM while being mindful of context window size. This is particularly useful for scenarios where fine details matter, such as identifying license plates, reading text, or analyzing distant objects. The number of frames sent to the LLM is dynamically calculated based on: @@ -103,7 +145,17 @@ If recordings are not available for a given time period, the system will automat ### Additional Concerns -Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example: +Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. Configure these concerns so that review summaries will make note of them if the activity requires additional review. + + + + +Navigate to . + +- Set **GenAI config > Additional concerns** to a list of your concerns (e.g., `animals in the garden`) + + + ```yaml {4,5} review: @@ -113,9 +165,22 @@ review: - animals in the garden ``` + + + ### Preferred Language -By default, review summaries are generated in English. You can configure Frigate to generate summaries in your preferred language by setting the `preferred_language` option: +By default, review summaries are generated in English. Configure Frigate to generate summaries in your preferred language by setting the `preferred_language` option. + + + + +Navigate to . + +- Set **GenAI config > Preferred language** to the desired language (e.g., `Spanish`) + + + ```yaml {4} review: @@ -124,6 +189,9 @@ review: preferred_language: Spanish ``` + + + ## Review Reports Along with individual review item summaries, Generative AI can also produce a single report of review items from all cameras marked "suspicious" over a specified time period (for example, a daily summary of suspicious activity while you're on vacation). diff --git a/docs/docs/configuration/hardware_acceleration_video.md b/docs/docs/configuration/hardware_acceleration_video.md index 318e1b23e..918c23e67 100644 --- a/docs/docs/configuration/hardware_acceleration_video.md +++ b/docs/docs/configuration/hardware_acceleration_video.md @@ -4,6 +4,9 @@ title: Video Decoding --- import CommunityBadge from '@site/src/components/CommunityBadge'; +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; # Video Decoding @@ -78,27 +81,60 @@ See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/00 VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. + + + +Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)`. For per-camera overrides, navigate to . + + + + ```yaml ffmpeg: hwaccel_args: preset-vaapi ``` + + + ### Via Quicksync #### H.264 streams + + + +Navigate to and set **Hardware acceleration arguments** to `Intel QuickSync (H.264)`. For per-camera overrides, navigate to . + + + + ```yaml ffmpeg: hwaccel_args: preset-intel-qsv-h264 ``` + + + #### H.265 streams + + + +Navigate to and set **Hardware acceleration arguments** to `Intel QuickSync (H.265)`. For per-camera overrides, navigate to . + + + + ```yaml ffmpeg: hwaccel_args: preset-intel-qsv-h265 ``` + + + ### Configuring Intel GPU Stats in Docker Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. There are two options: @@ -196,11 +232,22 @@ You need to change the driver to `radeonsi` by adding the following environment VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. + + + +Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)`. For per-camera overrides, navigate to . + + + + ```yaml ffmpeg: hwaccel_args: preset-vaapi ``` + + + ## NVIDIA GPUs While older GPUs may work, it is recommended to use modern, supported GPUs. NVIDIA provides a [matrix of supported GPUs and features](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new). If your card is on the list and supports CUVID/NVDEC, it will most likely work with Frigate for decoding. However, you must also use [a driver version that will work with FFmpeg](https://github.com/FFmpeg/nv-codec-headers/blob/master/README). Older driver versions may be missing symbols and fail to work, and older cards are not supported by newer driver versions. The only way around this is to [provide your own FFmpeg](/configuration/advanced#custom-ffmpeg-build) that will work with your driver version, but this is unsupported and may not work well if at all. @@ -244,11 +291,22 @@ docker run -d \ Using `preset-nvidia` ffmpeg will automatically select the necessary profile for the incoming video, and will log an error if the profile is not supported by your GPU. + + + +Navigate to and set **Hardware acceleration arguments** to `NVIDIA GPU`. For per-camera overrides, navigate to . + + + + ```yaml ffmpeg: hwaccel_args: preset-nvidia ``` + + + If everything is working correctly, you should see a significant improvement in performance. Verify that hardware decoding is working by running `nvidia-smi`, which should show `ffmpeg` processes: @@ -296,6 +354,14 @@ These instructions were originally based on the [Jellyfin documentation](https:/ Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory). If you are using the HA App, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration. + + + +Navigate to and set **Hardware acceleration arguments** to `Raspberry Pi (H.264)` (for H.264 streams) or `Raspberry Pi (H.265)` (for H.265/HEVC streams). For per-camera overrides, navigate to . + + + + ```yaml # if you want to decode a h264 stream ffmpeg: @@ -306,6 +372,9 @@ ffmpeg: hwaccel_args: preset-rpi-64-h265 ``` + + + :::note If running Frigate through Docker, you either need to run in privileged mode or @@ -405,11 +474,22 @@ A list of supported codecs (you can use `ffmpeg -decoders | grep nvmpi` in the c For example, for H264 video, you'll select `preset-jetson-h264`. + + + +Navigate to and set **Hardware acceleration arguments** to `NVIDIA Jetson (H.264)` (or `NVIDIA Jetson (H.265)` for HEVC streams). For per-camera overrides, navigate to . + + + + ```yaml ffmpeg: hwaccel_args: preset-jetson-h264 ``` + + + If everything is working correctly, you should see a significant reduction in ffmpeg CPU load and power consumption. Verify that hardware decoding is working by running `jtop` (`sudo pip3 install -U jetson-stats`), which should show that NVDEC/NVDEC1 are in use. @@ -424,13 +504,24 @@ Make sure to follow the [Rockchip specific installation instructions](/frigate/i ### Configuration -Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing: +Set the FFmpeg hwaccel preset to enable hardware video processing. + + + + +Navigate to and set **Hardware acceleration arguments** to `Rockchip RKMPP`. For per-camera overrides, navigate to . + + + ```yaml ffmpeg: hwaccel_args: preset-rkmpp ``` + + + :::note Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet. @@ -480,7 +571,15 @@ Make sure to follow the [Synaptics specific installation instructions](/frigate/ ### Configuration -Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing: +Set the FFmpeg hwaccel args to enable hardware video processing. + + + + +Navigate to and configure the hardware acceleration args and input args manually for Synaptics hardware. For per-camera overrides, navigate to . + + + ```yaml {2} ffmpeg: @@ -490,6 +589,9 @@ output_args: record: preset-record-generic-audio-aac ``` + + + :::warning Make sure that your SoC supports hardware acceleration for your input stream and your input stream is h264 encoding. For example, if your camera streams with h264 encoding, your SoC must be able to de- and encode with it. If you are unsure whether your SoC meets the requirements, take a look at the datasheet. diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index be546ca30..84f978078 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -3,13 +3,24 @@ id: index title: Frigate Configuration --- -For Home Assistant App installations, the config file should be at `/addon_configs//config.yml`, where `` is specific to the variant of the Frigate App you are running. See the list of directories [here](#accessing-app-config-dir). +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; -For all other installation types, the config file should be mapped to `/config/config.yml` inside the container. +Frigate can be configured through the **Settings UI** or by editing the YAML configuration file directly. The Settings UI is the recommended approach — it provides validation and a guided experience for all configuration options. + +It is recommended to start with a minimal configuration and add to it as described in [the getting started guide](../guides/getting_started.md). + +## Configuration File Location + +For users who prefer to edit the YAML configuration file directly: + +- **Home Assistant App:** `/addon_configs//config.yml` — see [directory list](#accessing-app-config-dir) +- **All other installations:** Map to `/config/config.yml` inside the container It can be named `config.yml` or `config.yaml`, but if both files exist `config.yml` will be preferred and `config.yaml` will be ignored. -It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation. +A minimal starting configuration: ```yaml mqtt: @@ -38,7 +49,7 @@ When running Frigate through the HA App, the Frigate `/config` directory is mapp **Whenever you see `/config` in the documentation, it refers to this directory.** -If for example you are running the standard App variant and use the [VS Code App](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file. +If for example you are running the standard App variant and use the [VS Code App](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in config editor in the Frigate UI. ## VS Code Configuration Schema @@ -81,7 +92,7 @@ genai: ## Common configuration examples -Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values. +Here are some common starter configuration examples. These can be configured through the Settings UI or via YAML. Refer to the [reference config](./reference.md) for detailed information about all config values. ### Raspberry Pi Home Assistant App with USB Coral @@ -94,6 +105,20 @@ Here are some common starter configuration examples. Refer to the [reference con - Save snapshots for 30 days - Motion mask for the camera timestamp + + + +1. Navigate to and configure the MQTT connection to your Home Assistant Mosquitto broker +2. Navigate to and set **Hardware acceleration arguments** to `Raspberry Pi (H.264)` +3. Navigate to and add a detector with **Type** `EdgeTPU` and **Device** `usb` +4. Navigate to and set **Enable recording** to on, **Motion retention > Retention days** to `7`, **Alert retention > Event retention > Retention days** to `30`, **Alert retention > Event retention > Retention mode** to `motion`, **Detection retention > Event retention > Retention days** to `30`, **Detection retention > Event retention > Retention mode** to `motion` +5. Navigate to and set **Enable snapshots** to on, **Snapshot retention > Default retention** to `30` +6. Navigate to and add your camera with the appropriate RTSP stream URL +7. Navigate to to add a motion mask for the camera timestamp + + + + ```yaml mqtt: host: core-mosquitto @@ -145,10 +170,13 @@ cameras: coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` + + + ### Standalone Intel Mini PC with USB Coral - Single camera with 720p, 5fps stream for detect -- MQTT disabled (not integrated with home assistant) +- MQTT disabled (not integrated with Home Assistant) - VAAPI hardware acceleration for decoding video - USB Coral detector - Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not @@ -156,6 +184,20 @@ cameras: - Save snapshots for 30 days - Motion mask for the camera timestamp + + + +1. Navigate to and set **Enable MQTT** to off +2. Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)` +3. Navigate to and add a detector with **Type** `EdgeTPU` and **Device** `usb` +4. Navigate to and set **Enable recording** to on, **Motion retention > Retention days** to `7`, **Alert retention > Event retention > Retention days** to `30`, **Alert retention > Event retention > Retention mode** to `motion`, **Detection retention > Event retention > Retention days** to `30`, **Detection retention > Event retention > Retention mode** to `motion` +5. Navigate to and set **Enable snapshots** to on, **Snapshot retention > Default retention** to `30` +6. Navigate to and add your camera with the appropriate RTSP stream URL +7. Navigate to to add a motion mask for the camera timestamp + + + + ```yaml mqtt: enabled: False @@ -205,17 +247,35 @@ cameras: coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` -### Home Assistant integrated Intel Mini PC with OpenVino + + + +### Home Assistant integrated Intel Mini PC with OpenVINO - Single camera with 720p, 5fps stream for detect -- MQTT connected to same mqtt server as home assistant +- MQTT connected to same MQTT server as Home Assistant - VAAPI hardware acceleration for decoding video -- OpenVino detector +- OpenVINO detector - Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not - Continue to keep all video if it qualified as an alert or detection for 30 days - Save snapshots for 30 days - Motion mask for the camera timestamp + + + +1. Navigate to and configure the connection to your MQTT broker +2. Navigate to and set **Hardware acceleration arguments** to `VAAPI (Intel/AMD GPU)` +3. Navigate to and add a detector with **Type** `openvino` and **Device** `AUTO` +4. Navigate to and configure the OpenVINO model path and settings +5. Navigate to and set **Enable recording** to on, **Motion retention > Retention days** to `7`, **Alert retention > Event retention > Retention days** to `30`, **Alert retention > Event retention > Retention mode** to `motion`, **Detection retention > Event retention > Retention days** to `30`, **Detection retention > Event retention > Retention mode** to `motion` +6. Navigate to and set **Enable snapshots** to on, **Snapshot retention > Default retention** to `30` +7. Navigate to and add your camera with the appropriate RTSP stream URL +8. Navigate to to add a motion mask for the camera timestamp + + + + ```yaml mqtt: host: 192.168.X.X # <---- same mqtt broker that home assistant uses @@ -274,3 +334,6 @@ cameras: enabled: true coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` + + + diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index a44006b63..017cc5e16 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -3,6 +3,10 @@ id: license_plate_recognition title: License Plate Recognition (LPR) --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a [known](#matching) name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street. LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition. @@ -34,14 +38,35 @@ License plate recognition works by running AI models locally on your system. The ## Configuration -License plate recognition is disabled by default. Enable it in your config file: +License plate recognition is disabled by default and must be enabled before it can be used. + + + + +Navigate to . + +- Set **Enable LPR** to on + + + ```yaml lpr: enabled: True ``` -Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You should disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras: + + + +Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. Disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras. + + + + +Navigate to for the desired camera and disable the **Enable LPR** toggle. + + + ```yaml {4,5} cameras: @@ -51,65 +76,144 @@ cameras: enabled: False ``` + + + For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car` or `motorcycle`, and that a car or motorcycle is actually being detected by Frigate. Otherwise, LPR will not run. Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements. ## Advanced Configuration -Fine-tune the LPR feature using these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`. +Fine-tune the LPR feature using these optional parameters. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`. ### Detection -- **`detection_threshold`**: License plate object detection confidence score required before recognition runs. + + + +Navigate to . + +- **Detection threshold**: License plate object detection confidence score required before recognition runs. This field only applies to the standalone license plate detection model; `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in. - Default: `0.7` - - Note: This is field only applies to the standalone license plate detection model, `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in. -- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs. - - Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. - - Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. -- **`device`**: Device to use to run license plate detection _and_ recognition models. +- **Minimum plate area**: Minimum area (in pixels) a license plate must be before recognition runs. This is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image. Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates. + - Default: `1000` pixels +- **Device**: Device to use to run license plate detection _and_ recognition models. Auto-selected by Frigate and can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. - Default: `None` - - This is auto-selected by Frigate and can be `CPU`, `GPU`, or the GPU's device number. For users without a model that detects license plates natively, using a GPU may increase performance of the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. However, for users who run a model that detects `license_plate` natively, there is little to no performance gain reported with running LPR on GPU compared to the CPU. -- **`model_size`**: The size of the model used to identify regions of text on plates. +- **Model size**: The size of the model used to identify regions of text on plates. The `small` model is fast and identifies groups of Latin and Chinese characters. The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. If your country or region does not use multi-line plates, you should use the `small` model. - Default: `small` - - This can be `small` or `large`. - - The `small` model is fast and identifies groups of Latin and Chinese characters. - - The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. It is significantly slower than the `small` model. - - If your country or region does not use multi-line plates, you should use the `small` model as performance is much better for single-line plates. + + + + +```yaml +lpr: + enabled: True + detection_threshold: 0.7 + min_area: 1000 + device: CPU + model_size: small +``` + + + ### Recognition -- **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a `recognized_license_plate` and/or `sub_label`. - - Default: `0.9`. -- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a `recognized_license_plate` and/or `sub_label` to an object. - - Use this to filter out short, incomplete, or incorrect detections. -- **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded. - - `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7" - - `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC" - - Websites like https://regex101.com/ can help test regular expressions for your plates. + + + +Navigate to . + +- **Recognition threshold**: Recognition confidence score required to add the plate to the object as a `recognized_license_plate` and/or `sub_label`. + - Default: `0.9` +- **Min plate length**: Minimum number of characters a detected license plate must have to be added as a `recognized_license_plate` and/or `sub_label`. Use this to filter out short, incomplete, or incorrect detections. +- **Plate format regex**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded. Websites like https://regex101.com/ can help test regular expressions for your plates. + + + + +```yaml +lpr: + enabled: True + recognition_threshold: 0.9 + min_plate_length: 4 + format: "^[A-Z]{2}[0-9]{2} [A-Z]{3}$" +``` + + + ### Matching -- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` and `motorcycle` objects when a recognized plate matches a known value. - - These labels appear in the UI, filters, and notifications. - - Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`. -- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate. - - For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. - - This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`. + + + +Navigate to . + +- **Known plates**: Assign custom `sub_label` values to `car` and `motorcycle` objects when a recognized plate matches a known value. These labels appear in the UI, filters, and notifications. Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`. +- **Match distance**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate. For example, setting to `1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`. This parameter will _not_ operate on known plates that are defined as regular expressions. + + + + +```yaml +lpr: + enabled: True + match_distance: 1 + known_plates: + Wife's Car: + - "ABC-1234" + Johnny: + - "J*N-*234" +``` + + + ### Image Enhancement -- **`enhancement`**: A value between 0 and 10 that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. This preprocessing step can sometimes improve accuracy but may also have the opposite effect. + + + +Navigate to . + +- **Enhancement level**: A value between 0 and 10 that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters. This setting is best adjusted at the camera level if running LPR on multiple cameras. - Default: `0` (no enhancement) - - Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters, actually making them much harder for Frigate to recognize. - - This setting is best adjusted at the camera level if running LPR on multiple cameras. - - If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 5 and adjusting as needed. You should see how different enhancement levels affect your plates. Use the `debug_save_plates` configuration option (see below). + + + + +```yaml +lpr: + enabled: True + enhancement: 1 +``` + + + + +If Frigate is already recognizing plates correctly, leave enhancement at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 3 and adjusting as needed. Use the `debug_save_plates` configuration option (see below) to see how different enhancement levels affect your plates. ### Normalization Rules -- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially and are applied _before_ the `format` regex, if specified. Each rule must have a `pattern` (which can be a string or a regex) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0'). + + -These rules must be defined at the global level of your `lpr` config. +Navigate to . + +Under **Replacement rules**, add regex rules to normalize detected plate strings before matching. Rules fire in order. For example: + +| Pattern | Replacement | Description | +| ---------------- | ----------- | -------------------------------------------------- | +| `[%#*?]` | _(empty)_ | Remove noise symbols | +| `[= ]` | `-` | Normalize `=` or space to dash | +| `O` | `0` | Swap `O` to `0` (common OCR error) | +| `I` | `1` | Swap `I` to `1` | +| `(\w{3})(\w{3})` | `\1-\2` | Split 6 chars into groups (e.g., ABC123 → ABC-123) | + + + ```yaml lpr: @@ -126,6 +230,11 @@ lpr: replacement: '\1-\2' ``` + + + +These rules must be defined at the global level of your `lpr` config. + - Rules fire in order: In the example above: clean noise first, then separators, then swaps, then splits. - Backrefs (`\1`, `\2`) allow dynamic replacements (e.g., capture groups). - Any changes made by the rules are printed to the LPR debug log. @@ -133,13 +242,50 @@ lpr: ### Debugging -- **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `/`, and named based on the capture timestamp. - - These saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection. - - **Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage. + + + +Navigate to . + +- **Save debug plates**: Set to on to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `/`, and named based on the capture timestamp. + + + + +```yaml +lpr: + enabled: True + debug_save_plates: True +``` + + + + +The saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection. + +**Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage. ## Configuration Examples -These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`. +These configuration parameters are available at the global level. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`. + + + + +Navigate to . + +| Field | Description | +| ------------------------------ | ----------------------------------------------------------------------------------------------------- | +| **Enable LPR** | Set to on | +| **Minimum plate area** | Set to `1500` — ignore plates with an area (length x width) smaller than 1500 pixels | +| **Min plate length** | Set to `4` — only recognize plates with 4 or more characters | +| **Known plates > Wife's Car** | `ABC-1234`, `ABC-I234` (accounts for potential confusion between the number one and capital letter I) | +| **Known plates > Johnny** | `J*N-*234` (matches JHN-1234 and JMN-I234; `*` matches any number of characters) | +| **Known plates > Sally** | `[S5]LL 1234` (matches both SLL 1234 and 5LL 1234) | +| **Known plates > Work Trucks** | `EMP-[0-9]{3}[A-Z]` (matches plates like EMP-123A, EMP-456Z) | + + + ```yaml lpr: @@ -158,28 +304,21 @@ lpr: - "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z ``` -```yaml -lpr: - enabled: True - min_area: 4000 # Run recognition on larger plates only (4000 pixels represents a 63x63 pixel square in your image) - recognition_threshold: 0.85 - format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers - match_distance: 1 # Allow one character variation in plate matching - replace_rules: - - pattern: "O" - replacement: "0" # Replace the letter O with the number 0 in every plate - known_plates: - Delivery Van: - - "RJ K5678" - - "UP A1234" - Supervisor: - - "MN D3163" -``` + + :::note If a camera is configured to detect `car` or `motorcycle` but you don't want Frigate to run LPR for that camera, disable LPR at the camera level: + + + +Navigate to for the desired camera and disable the **Enable LPR** toggle. + + + + ```yaml cameras: side_yard: @@ -188,13 +327,16 @@ cameras: ... ``` + + + ::: ## Dedicated LPR Cameras Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night. -To mark a camera as a dedicated LPR camera, add `type: "lpr"` the camera configuration. +To mark a camera as a dedicated LPR camera, set `type: "lpr"` in the camera configuration. :::note @@ -210,6 +352,55 @@ Users running a Frigate+ model (or any model that natively detects `license_plat An example configuration for a dedicated LPR camera using a `license_plate`-detecting model: + + + +Navigate to and set **Enable LPR** to on. Set **Device** to `CPU` (can also be `GPU` if available). + +Navigate to and add your camera streams. + +Navigate to . + +| Field | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| **Enable object detection** | Set to on | +| **Detect FPS** | Set to `5`. Increase to `10` if vehicles move quickly across your frame. Higher than 10 is unnecessary and is not recommended. | +| **Minimum initialization frames** | Set to `2` | +| **Detect width** | Set to `1920` | +| **Detect height** | Set to `1080` | + +Navigate to . + +| Field | Description | +| ---------------------------------------------- | ------------------- | +| **Objects to track** | Add `license_plate` | +| **Object filters > License Plate > Threshold** | Set to `0.7` | + +Navigate to . + +| Field | Description | +| -------------------- | --------------------------------------------------------------------- | +| **Motion threshold** | Set to `30` | +| **Contour area** | Set to `60`. Use an increased value to tune out small motion changes. | +| **Improve contrast** | Set to off | + +Also add a motion mask over your camera's timestamp so it is not incorrectly detected as a license plate. + +Navigate to . + +| Field | Description | +| -------------------- | -------------------------------------------------------- | +| **Enable recording** | Set to on. Disable recording if you only want snapshots. | + +Navigate to . + +| Field | Description | +| -------------------- | ----------- | +| **Enable snapshots** | Set to on | + + + + ```yaml # LPR global configuration lpr: @@ -248,6 +439,9 @@ cameras: - license_plate ``` + + + With this setup: - License plates are treated as normal objects in Frigate. @@ -259,10 +453,65 @@ With this setup: ### Using the Secondary LPR Pipeline (Without Frigate+) -If you are not running a Frigate+ model, you can use Frigate’s built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs. +If you are not running a Frigate+ model, you can use Frigate's built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs. An example configuration for a dedicated LPR camera using the secondary pipeline: + + + +Navigate to and set **Enable LPR** to on. Set **Device** to `CPU` (can also be `GPU` if available and the correct Docker image is used). Set **Detection threshold** to `0.7` (change if necessary). + +Navigate to for your dedicated LPR camera. + +| Field | Description | +| --------------------- | -------------------------------------------------------------------------------- | +| **Enable LPR** | Set to on | +| **Enhancement level** | Set to `3` (optional — enhances the image before trying to recognize characters) | + +Navigate to and add your camera streams. + +Navigate to . + +| Field | Description | +| --------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| **Enable object detection** | Set to off — disables Frigate's standard object detection pipeline | +| **Detect FPS** | Set to `5`. Increase if necessary, though high values may slow down Frigate's enrichments pipeline and use considerable CPU. | +| **Detect width** | Set to `1920` (recommended value, but depends on your camera) | +| **Detect height** | Set to `1080` (recommended value, but depends on your camera) | + +Navigate to . + +| Field | Description | +| -------------------- | -------------------------------------------------------------------------------------- | +| **Objects to track** | Set to an empty list — required when not using a Frigate+ model for dedicated LPR mode | + +Navigate to . + +| Field | Description | +| -------------------- | --------------------------------------------------------------------- | +| **Motion threshold** | Set to `30` | +| **Contour area** | Set to `60`. Use an increased value to tune out small motion changes. | +| **Improve contrast** | Set to off | + +Navigate to and add a motion mask over your camera's timestamp so it is not incorrectly detected as a license plate. + +Navigate to . + +| Field | Description | +| -------------------- | -------------------------------------------------------- | +| **Enable recording** | Set to on. Disable recording if you only want snapshots. | + +Navigate to . + +| Field | Description | +| ----------------------------------------- | --------------- | +| **Detections config > Enable detections** | Set to on | +| **Detections config > Retain > Default** | Set to `7` days | + + + + ```yaml # LPR global configuration lpr: @@ -299,6 +548,9 @@ cameras: default: 7 ``` + + + With this setup: - The standard object detection pipeline is bypassed. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. You must **not** specify `license_plate` as an object to track. @@ -377,12 +629,27 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is 1. Start with a simplified LPR config. - Remove or comment out everything in your LPR config, including `min_area`, `min_plate_length`, `format`, `known_plates`, or `enhancement` values so that the only values left are `enabled` and `debug_save_plates`. This will run LPR with Frigate's default values. - ```yaml - lpr: - enabled: true - device: CPU - debug_save_plates: true - ``` + + + +Navigate to . + +- Set **Enable LPR** to on +- Set **Device** to `CPU` +- Set **Save debug plates** to on + + + + +```yaml +lpr: + enabled: true + device: CPU + debug_save_plates: true +``` + + + 2. Enable debug logs to see exactly what Frigate is doing. - Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only keep this enabled when necessary. Restart Frigate after this change. @@ -391,7 +658,7 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is logger: default: info logs: - # highlight-next-line + # highlight-next-line frigate.data_processing.common.license_plate: debug ``` diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 8e7eff163..18e2054c4 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -3,6 +3,10 @@ id: live title: Live View --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Frigate intelligently displays your camera streams on the Live view dashboard. By default, Frigate employs "smart streaming" where camera images update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any motion or active objects are detected, cameras seamlessly switch to a live stream. ### Live View technologies @@ -63,19 +67,26 @@ go2rtc: ### Setting Streams For Live UI -You can configure Frigate to allow manual selection of the stream you want to view in the Live UI. For example, you may want to view your camera's substream on mobile devices, but the full resolution stream on desktop devices. Setting the `live -> streams` list will populate a dropdown in the UI's Live view that allows you to choose between the streams. This stream setting is _per device_ and is saved in your browser's local storage. +You can configure Frigate to allow manual selection of the stream you want to view in the Live UI. For example, you may want to view your camera's substream on mobile devices, but the full resolution stream on desktop devices. Setting the streams list will populate a dropdown in the UI's Live view that allows you to choose between the streams. This stream setting is _per device_ and is saved in your browser's local storage. Additionally, when creating and editing camera groups in the UI, you can choose the stream you want to use for your camera group's Live dashboard. :::note -Frigate's default dashboard ("All Cameras") will always use the first entry you've defined in `streams:` when playing live streams from your cameras. +Frigate's default dashboard ("All Cameras") will always use the first entry you've defined in streams when playing live streams from your cameras. ::: -Configure the `streams` option with a "friendly name" for your stream followed by the go2rtc stream name. +Configure a "friendly name" for your stream followed by the go2rtc stream name. Using Frigate's internal version of go2rtc is required to use this feature. You cannot specify paths in the streams configuration, only go2rtc stream names. -Using Frigate's internal version of go2rtc is required to use this feature. You cannot specify paths in the `streams` configuration, only go2rtc stream names. + + + +1. Navigate to , then select your camera. + - Under **Live stream names**, add entries mapping a friendly name to each go2rtc stream name (e.g., `Main Stream` mapped to `test_cam`, `Sub Stream` mapped to `test_cam_sub`). + + + ```yaml {3,6,8,25-29} go2rtc: @@ -109,6 +120,9 @@ cameras: Special Stream: test_cam_another_sub ``` + + + ### WebRTC extra configuration: WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration: @@ -185,7 +199,7 @@ To prevent go2rtc from blocking other applications from accessing your camera's Frigate provides a dialog in the Camera Group Edit pane with several options for streaming on a camera group's dashboard. These settings are _per device_ and are saved in your device's local storage. -- Stream selection using the `live -> streams` configuration option (see _Setting Streams For Live UI_ above) +- Stream selection using the streams configuration option (see _Setting Streams For Live UI_ above) - Streaming type: - _No streaming_: Camera images will only update once per minute and no live streaming will occur. - _Smart Streaming_ (default, recommended setting): Smart streaming will update your camera image once per minute when no detectable activity is occurring to conserve bandwidth and resources, since a static picture is the same as a streaming image with no motion or objects. When motion or objects are detected, the image seamlessly switches to a live stream. @@ -203,6 +217,40 @@ Use a camera group if you want to change any of these settings from the defaults ::: +### jsmpeg Stream Quality + +The jsmpeg live view resolution and encoding quality can be adjusted globally or per camera. These settings only affect the jsmpeg player and do not apply when go2rtc is used for live view. + + + + +Navigate to for global defaults, or and select a camera for per-camera overrides. + +| Field | Description | +| ---------------- | --------------------------------------------------------------------------------------------------- | +| **Live height** | Height in pixels for the jsmpeg live stream; must be less than or equal to the detect stream height | +| **Live quality** | Encoding quality for the jsmpeg stream (1 = highest, 31 = lowest) | + + + + +```yaml +# Global defaults +live: + height: 720 + quality: 8 + +# Per-camera override +cameras: + front_door: + live: + height: 480 + quality: 4 +``` + + + + ### Disabling cameras Cameras can be temporarily disabled through the Frigate UI and through [MQTT](/integrations/mqtt#frigatecamera_nameenabledset) to conserve system resources. When disabled, Frigate's ffmpeg processes are terminated — recording stops, object detection is paused, and the Live dashboard displays a blank image with a disabled message. Review items, tracked objects, and historical footage for disabled cameras can still be accessed via the UI. @@ -276,7 +324,7 @@ When your browser runs into problems playing back your camera streams, it will l 4. Look for messages prefixed with the camera name. These logs help identify if the issue is player-specific (MSE vs. WebRTC) or related to camera configuration (e.g., go2rtc streams, codecs). If you see frequent errors: - - Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera_settings_recommendations)). + - Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera-settings-recommendations)). - Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS). - Test with a different stream via the UI dropdown (if `live -> streams` is configured). - For WebRTC-specific issues, ensure port 8555 is forwarded and candidates are set (see (WebRTC Extra Configuration)(#webrtc-extra-configuration)). diff --git a/docs/docs/configuration/masks.md b/docs/docs/configuration/masks.md index 32280531d..e497de2c1 100644 --- a/docs/docs/configuration/masks.md +++ b/docs/docs/configuration/masks.md @@ -3,6 +3,10 @@ id: masks title: Masks --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + ## Motion masks Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the Debug feed (Settings --> Debug) with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. _Over-masking will make it more difficult for objects to be tracked._ @@ -17,17 +21,15 @@ Object filter masks can be used to filter out stubborn false positives in fixed ![object mask](/img/bottom-center-mask.jpg) -## Using the mask creator +## Creating masks -To create a poly mask: + + -1. Visit the Web UI -2. Click/tap the gear icon and open "Settings" -3. Select "Mask / zone editor" -4. At the top right, select the camera you wish to create a mask or zone for -5. Click the plus icon under the type of mask or zone you would like to create -6. Click on the camera's latest image to create the points for a masked area. Click the first point again to close the polygon. -7. When you've finished creating your mask, press Save. +Navigate to and select a camera. Use the mask editor to draw motion masks and object filter masks directly on the camera feed. Each mask can be given a friendly name and toggled on or off. + + + Your config file will be updated with the relative coordinates of the mask/zone: @@ -59,7 +61,7 @@ motion: coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456" ``` -Object filter masks can also be created through the UI or manually in the config. They are configured under the object filters section for each object type: +Object filter masks are configured under the object filters section for each object type: ```yaml objects: @@ -78,6 +80,9 @@ objects: coordinates: "0.000,0.700,1.000,0.700,1.000,1.000,0.000,1.000" ``` + + + ## Enabling/Disabling Masks Both motion masks and object filter masks can be toggled on or off without removing them from the configuration. Disabled masks are completely ignored at runtime - they will not affect motion detection or object filtering. This is useful for temporarily disabling a mask during certain seasons or times of day without modifying the configuration. diff --git a/docs/docs/configuration/metrics.md b/docs/docs/configuration/metrics.md index 662404205..d857d5eee 100644 --- a/docs/docs/configuration/metrics.md +++ b/docs/docs/configuration/metrics.md @@ -3,19 +3,42 @@ id: metrics title: Metrics --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + # Metrics Frigate exposes Prometheus metrics at the `/api/metrics` endpoint that can be used to monitor the performance and health of your Frigate instance. +## Enabling Telemetry + +Prometheus metrics are exposed via the telemetry configuration. Enable or configure telemetry to control metric availability. + + + + +Navigate to to configure metrics and telemetry settings. + + + + +Metrics are available at `/api/metrics` by default. No additional Frigate configuration is required to expose them. + + + + ## Available Metrics ### System Metrics + - `frigate_cpu_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process CPU usage percentage - `frigate_mem_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process memory usage percentage - `frigate_gpu_usage_percent{gpu_name=""}` - GPU utilization percentage - `frigate_gpu_mem_usage_percent{gpu_name=""}` - GPU memory usage percentage ### Camera Metrics + - `frigate_camera_fps{camera_name=""}` - Frames per second being consumed from your camera - `frigate_detection_fps{camera_name=""}` - Number of times detection is run per second - `frigate_process_fps{camera_name=""}` - Frames per second being processed @@ -25,21 +48,25 @@ Frigate exposes Prometheus metrics at the `/api/metrics` endpoint that can be us - `frigate_audio_rms{camera_name=""}` - Audio RMS for camera ### Detector Metrics + - `frigate_detector_inference_speed_seconds{name=""}` - Time spent running object detection in seconds - `frigate_detection_start{name=""}` - Detector start time (unix timestamp) ### Storage Metrics + - `frigate_storage_free_bytes{storage=""}` - Storage free bytes - `frigate_storage_total_bytes{storage=""}` - Storage total bytes - `frigate_storage_used_bytes{storage=""}` - Storage used bytes - `frigate_storage_mount_type{mount_type="", storage=""}` - Storage mount type info ### Service Metrics + - `frigate_service_uptime_seconds` - Uptime in seconds - `frigate_service_last_updated_timestamp` - Stats recorded time (unix timestamp) - `frigate_device_temperature{device=""}` - Device Temperature ### Event Metrics + - `frigate_camera_events{camera="", label=""}` - Count of camera events since exporter started ## Configuring Prometheus @@ -48,10 +75,10 @@ To scrape metrics from Frigate, add the following to your Prometheus configurati ```yaml scrape_configs: - - job_name: 'frigate' - metrics_path: '/api/metrics' + - job_name: "frigate" + metrics_path: "/api/metrics" static_configs: - - targets: ['frigate:5000'] + - targets: ["frigate:5000"] scrape_interval: 15s ``` diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md index 53e63272a..3f31d27db 100644 --- a/docs/docs/configuration/motion_detection.md +++ b/docs/docs/configuration/motion_detection.md @@ -3,6 +3,10 @@ id: motion_detection title: Motion Detection --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + # Tuning Motion Detection Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection. @@ -21,7 +25,7 @@ First, mask areas with regular motion not caused by the objects you want to dete ## Prepare For Testing -The easiest way to tune motion detection is to use the Frigate UI under Settings > Motion Tuner. This screen allows the changing of motion detection values live to easily see the immediate effect on what is detected as motion. +The recommended way to tune motion detection is to use the built-in Motion Tuner. Navigate to and select the camera you want to tune. This screen lets you adjust motion detection values live and immediately see the effect on what is detected as motion, making it the fastest way to find optimal settings for each camera. ## Tuning Motion Detection During The Day @@ -37,6 +41,20 @@ Remember that motion detection is just used to determine when object detection s The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. + + + +Navigate to to set the threshold globally. + +To override for a specific camera, navigate to and select the camera, or use the to adjust it live. + +| Field | Description | +| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Motion threshold** | The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. The value should be between 1 and 255. (default: 30) | + + + + ```yaml motion: # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) @@ -45,12 +63,29 @@ motion: threshold: 30 ``` + + + Lower values mean motion detection is more sensitive to changes in color, making it more likely for example to detect motion when a brown dogs blends in with a brown fence or a person wearing a red shirt blends in with a red car. If the threshold is too low however, it may detect things like grass blowing in the wind, shadows, etc. to be detected as motion. Watching the motion boxes in the debug view, increase the threshold until you only see motion that is visible to the eye. Once this is done, it is important to test and ensure that desired motion is still detected. ### Contour Area + + + +Navigate to to set the contour area globally. + +To override for a specific camera, navigate to and select the camera, or use the to adjust it live. + +| Field | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Contour area** | Minimum size in pixels in the resized motion image that counts as motion. Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller moving objects. As a rule of thumb: 10 = high sensitivity, 30 = medium sensitivity, 50 = low sensitivity. (default: 10) | + + + + ```yaml motion: # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) @@ -63,6 +98,9 @@ motion: contour_area: 10 ``` + + + Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera. Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. @@ -81,6 +119,20 @@ However, if the preferred day settings do not work well at night it is recommend ### Lightning Threshold + + + +Navigate to and expand the advanced fields to find the lightning threshold setting. + +To override for a specific camera, navigate to and select the camera. + +| Field | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Lightning threshold** | The percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. (default: 0.8) | + + + + ```yaml motion: # Optional: The percentage of the image used to detect lightning or @@ -94,6 +146,9 @@ motion: lightning_threshold: 0.8 ``` + + + Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. `lightning_threshold` defines the percentage of the image used to detect these substantial changes. Increasing this value makes motion detection more likely to treat large changes (like IR mode switches) as valid motion. Decreasing it makes motion detection more likely to ignore large amounts of motion, such as a person approaching a doorbell camera. Note that `lightning_threshold` does **not** stop motion-based recordings from being saved — it only prevents additional motion analysis after the threshold is exceeded, reducing false positive object detections during high-motion periods (e.g. storms or PTZ sweeps) without interfering with recordings. @@ -106,6 +161,20 @@ Some cameras, like doorbell cameras, may have missed detections when someone wal ### Skip Motion On Large Scene Changes + + + +Navigate to and expand the advanced fields to find the skip motion threshold setting. + +To override for a specific camera, navigate to and select the camera. + +| Field | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Skip motion threshold** | Fraction of the frame that must change in a single update before Frigate will completely ignore any motion in that frame. Values range between 0.0 and 1.0; leave unset (null) to disable. For example, setting this to 0.7 causes Frigate to skip reporting motion boxes when more than 70% of the image appears to change (e.g. during lightning storms, IR/color mode switches, or other sudden lighting events). | + + + + ```yaml motion: # Optional: Fraction of the frame that must change in a single update @@ -118,6 +187,9 @@ motion: skip_motion_threshold: 0.7 ``` + + + This option is handy when you want to prevent large transient changes from triggering recordings or object detection. It differs from `lightning_threshold` because it completely suppresses motion instead of just forcing a recalibration. :::warning diff --git a/docs/docs/configuration/notifications.md b/docs/docs/configuration/notifications.md index b5e1600e4..0ba84b8aa 100644 --- a/docs/docs/configuration/notifications.md +++ b/docs/docs/configuration/notifications.md @@ -3,6 +3,10 @@ id: notifications title: Notifications --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + # Notifications Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption. @@ -18,15 +22,27 @@ In order to use notifications the following requirements must be met: ### Configuration -To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save. +Enable notifications and fill out the required fields. -Optionally, you can change the default cooldown period for notifications through the `cooldown` parameter in your config file. This parameter can also be overridden at the camera level. +Optionally, change the default cooldown period for notifications. The cooldown can also be overridden at the camera level. Notifications will be prevented if either: - The global cooldown period hasn't elapsed since any camera's last notification - The camera-specific cooldown period hasn't elapsed for the specific camera +#### Global notifications + + + + +1. Navigate to . + - Set **Email** to your email address + - Enable notifications for the desired cameras + + + + ```yaml notifications: enabled: True @@ -34,6 +50,21 @@ notifications: cooldown: 10 # wait 10 seconds before sending another notification from any camera ``` + + + +#### Per-camera notifications + + + + +1. Navigate to and select the desired camera. + - Set **Enable notifications** to on + - Set **Cooldown period** to the desired number of seconds to wait before sending another notification from this camera (e.g. `30`) + + + + ```yaml cameras: doorbell: @@ -43,6 +74,9 @@ cameras: cooldown: 30 # wait 30 seconds before sending another notification from the doorbell camera ``` + + + ### Registration Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent. diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index c16d3f5dc..53aee4747 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -4,6 +4,9 @@ title: Object Detectors --- import CommunityBadge from '@site/src/components/CommunityBadge'; +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; # Supported Hardware @@ -53,7 +56,6 @@ Frigate supports multiple different detectors that work on different types of ha - [AXEngine](#axera): axmodels can run on AXERA AI acceleration. - **For Testing** - [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. @@ -86,6 +88,14 @@ See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edg ### Single USB Coral + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add**, then set device to `usb`. + + + + ```yaml detectors: coral: @@ -93,8 +103,19 @@ detectors: device: usb ``` + + + ### Multiple USB Corals + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add** to add multiple detectors, specifying `usb:0` and `usb:1` as the device for each. + + + + ```yaml detectors: coral1: @@ -105,10 +126,21 @@ detectors: device: usb:1 ``` + + + ### Native Coral (Dev Board) _warning: may have [compatibility issues](https://github.com/blakeblackshear/frigate/issues/1706) after `v0.9.x`_ + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add**, then leave the device field empty. + + + + ```yaml detectors: coral: @@ -116,8 +148,19 @@ detectors: device: "" ``` + + + ### Single PCIE/M.2 Coral + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add**, then set device to `pci`. + + + + ```yaml detectors: coral: @@ -125,8 +168,19 @@ detectors: device: pci ``` + + + ### Multiple PCIE/M.2 Corals + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add** to add multiple detectors, specifying `pci:0` and `pci:1` as the device for each. + + + + ```yaml detectors: coral1: @@ -137,8 +191,19 @@ detectors: device: pci:1 ``` + + + ### Mixing Corals + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add** to add multiple detectors with different device types (e.g., `usb` and `pci`). + + + + ```yaml detectors: coral_usb: @@ -149,6 +214,9 @@ detectors: device: pci ``` + + + ### EdgeTPU Supported Models | Model | Notes | @@ -173,7 +241,23 @@ YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are s
YOLOv9 Setup & Config -After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration: +After placing the downloaded files for the tflite model and labels in your config folder, use the following configuration: + + + + +Navigate to and select **EdgeTPU** from the detector type dropdown and click **Add**, then set device to `usb`. Then navigate to and configure the model settings: + +| Field | Value | +| ---------------------------------------- | ----------------------------------------------------------------- | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` (should match the imgsize of the model) | +| **Object detection model input height** | `320` (should match the imgsize of the model) | +| **Custom object detector model path** | `/config/model_cache/yolov9-s-relu6-best_320_int8_edgetpu.tflite` | +| **Label map for custom object detector** | `/config/labels-coco17.txt` | + + + ```yaml detectors: @@ -189,6 +273,9 @@ model: labelmap_path: /config/labels-coco17.txt ``` + + + Note that due to hardware limitations of the Coral, the labelmap is a subset of the COCO labels and includes only 17 object classes.
@@ -199,7 +286,7 @@ Note that due to hardware limitations of the Coral, the labelmap is a subset of This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleration Modules. The integration automatically detects your hardware architecture via the Hailo CLI and selects the appropriate default model if no custom model is specified. -See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the Hailo hardware. +See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware. ### Configuration @@ -213,6 +300,26 @@ Use this configuration for YOLO-based models. When no custom model path or URL i - **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`) - **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`) + + + +Navigate to and select **Hailo-8/Hailo-8L** from the detector type dropdown and click **Add**, then set device to `PCIe`. Then navigate to and configure the model settings: + +| Field | Value | +| ---------------------------------------- | ----------------------- | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input Tensor Shape** | `nhwc` | +| **Model Input Pixel Color Format** | `rgb` | +| **Model Input D Type** | `int` | +| **Object Detection Model Type** | `yolo-generic` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + +The detector automatically selects the default model based on your hardware. Optionally, specify a local model path or URL to override. + + + + ```yaml detectors: hailo: @@ -242,10 +349,31 @@ model: # just make sure to give it the write configuration based on the model ``` + + + #### SSD For SSD-based models, provide either a model path or URL to your compiled SSD model. The integration will first check the local path before downloading if necessary. + + + +Navigate to and select **Hailo-8/Hailo-8L** from the detector type dropdown and click **Add**, then set device to `PCIe`. Then navigate to and configure the model settings: + +| Field | Value | +| --------------------------------------- | ------ | +| **Object detection model input width** | `300` | +| **Object detection model input height** | `300` | +| **Model Input Tensor Shape** | `nhwc` | +| **Model Input Pixel Color Format** | `rgb` | +| **Object Detection Model Type** | `ssd` | + +Specify the local model path or URL for SSD MobileNet v1. + + + + ```yaml detectors: hailo: @@ -266,10 +394,21 @@ model: # path: https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8l/ssd_mobilenet_v1.hef ``` + + + #### Custom Models The Hailo detector supports all YOLO models compiled for Hailo hardware that include post-processing. You can specify a custom URL or a local path to download or use your model directly. If both are provided, the detector checks the local path first. + + + +Navigate to and select **Hailo-8/Hailo-8L** from the detector type dropdown and click **Add**, then set device to `PCIe`. Then navigate to and configure the model settings to match your custom model dimensions and format. + + + + ```yaml detectors: hailo: @@ -291,6 +430,9 @@ model: # path: https://custom-model-url.com/path/to/model.hef ``` + + + For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation. @@ -314,6 +456,14 @@ OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will al When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be: + + + +Navigate to and select **OpenVINO** from the detector type dropdown and click **Add** to add multiple detectors, each targeting `GPU` or `NPU`. + + + + ```yaml detectors: ov_0: @@ -324,6 +474,9 @@ detectors: device: GPU # or NPU ``` + + + ::: ### OpenVINO Supported Models @@ -346,6 +499,23 @@ An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobil Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model: + + + +Navigate to and select **OpenVINO** from the detector type dropdown and click **Add**, then set device to `GPU` (or `NPU`). Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------ | +| **Object detection model input width** | `300` | +| **Object detection model input height** | `300` | +| **Model Input Tensor Shape** | `nhwc` | +| **Model Input Pixel Color Format** | `bgr` | +| **Custom object detector model path** | `/openvino-model/ssdlite_mobilenet_v2.xml` | +| **Label map for custom object detector** | `/openvino-model/coco_91cl_bkgr.txt` | + + + + ```yaml detectors: ov: @@ -361,6 +531,9 @@ model: labelmap_path: /openvino-model/coco_91cl_bkgr.txt ``` + + +
#### YOLOX @@ -374,7 +547,25 @@ This detector also supports YOLOX. Frigate does not come with any YOLOX models p
YOLO-NAS Setup & Config -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config folder, use the following configuration: + + + + +Navigate to and select **OpenVINO** from the detector type dropdown and click **Add**, then set device to `GPU`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------------- | +| **Object Detection Model Type** | `yolonas` | +| **Object detection model input width** | `320` (should match whatever was set in notebook) | +| **Object detection model input height** | `320` (should match whatever was set in notebook) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input Pixel Color Format** | `bgr` | +| **Custom object detector model path** | `/config/yolo_nas_s.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -392,6 +583,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -415,7 +609,25 @@ If you are using a Frigate+ model, you should not define any of the below `model ::: -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config folder, use the following configuration: + + + + +Navigate to and select **OpenVINO** from the detector type dropdown and click **Add**, then set device to `GPU` (or `NPU`). Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | -------------------------------------------------------- | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` (should match the imgsize set during model export) | +| **Object detection model input height** | `320` (should match the imgsize set during model export) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/yolo.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -433,6 +645,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. @@ -450,7 +665,24 @@ Due to the size and complexity of the RF-DETR model, it is only recommended to b
RF-DETR Setup & Config -After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: +After placing the downloaded onnx model in your `config/model_cache` folder, use the following configuration: + + + + +Navigate to and select **OpenVINO** from the detector type dropdown and click **Add**, then set device to `GPU`. Then navigate to and configure: + +| Field | Value | +| --------------------------------------- | --------------------------------- | +| **Object Detection Model Type** | `rfdetr` | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/rfdetr.onnx` | + + + ```yaml detectors: @@ -467,6 +699,9 @@ model: path: /config/model_cache/rfdetr.onnx ``` + + +
#### D-FINE @@ -482,7 +717,25 @@ Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to
D-FINE Setup & Config -After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: +After placing the downloaded onnx model in your config/model_cache folder, use the following configuration: + + + + +Navigate to and select **OpenVINO** from the detector type dropdown and click **Add**, then set device to `CPU`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ---------------------------------- | +| **Object Detection Model Type** | `dfine` | +| **Object detection model input width** | `640` | +| **Object detection model input height** | `640` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/dfine-s.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -500,6 +753,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -517,6 +773,14 @@ The NPU in Apple Silicon can't be accessed from within a container, so the [Appl Using the detector config below will connect to the client: + + + +Navigate to and select **ZMQ IPC** from the detector type dropdown and click **Add**, then set the endpoint to `tcp://host.docker.internal:5555`. + + + + ```yaml detectors: apple-silicon: @@ -524,6 +788,9 @@ detectors: endpoint: tcp://host.docker.internal:5555 ``` + + + ### Apple Silicon Supported Models There is no default model provided, the following formats are supported: @@ -540,6 +807,24 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv When Frigate is started with the following config it will connect to the detector client and transfer the model automatically: + + + +Navigate to and select **ZMQ IPC** from the detector type dropdown and click **Add**, then set the endpoint to `tcp://host.docker.internal:5555`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | -------------------------------------------------------- | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` (should match the imgsize set during model export) | +| **Object detection model input height** | `320` (should match the imgsize set during model export) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/yolo.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: apple-silicon: @@ -556,13 +841,16 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. ## AMD/ROCm GPU detector ### Setup -Support for AMD GPUs is provided using the [ONNX detector](#ONNX). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. +Support for AMD GPUs is provided using the [ONNX detector](#onnx). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. ### Docker settings for GPU access @@ -680,6 +968,14 @@ If the correct build is used for your GPU then the GPU will be detected and used When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be: + + + +Navigate to and select **ONNX** from the detector type dropdown and click **Add** to add multiple detectors. + + + + ```yaml detectors: onnx_0: @@ -688,6 +984,9 @@ detectors: type: onnx ``` + + + ::: ### ONNX Supported Models @@ -715,7 +1014,25 @@ If you are using a Frigate+ YOLO-NAS model, you should not define any of the bel ::: -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config folder, use the following configuration: + + + + +Navigate to and select **ONNX** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------------- | +| **Object Detection Model Type** | `yolonas` | +| **Object detection model input width** | `320` (should match whatever was set in notebook) | +| **Object detection model input height** | `320` (should match whatever was set in notebook) | +| **Model Input Pixel Color Format** | `bgr` | +| **Model Input Tensor Shape** | `nchw` | +| **Custom object detector model path** | `/config/yolo_nas_s.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -732,6 +1049,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + #### YOLO (v3, v4, v7, v9) @@ -753,7 +1073,25 @@ If you are using a Frigate+ model, you should not define any of the below `model ::: -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config folder, use the following configuration: + + + + +Navigate to and select **ONNX** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | -------------------------------------------------------- | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` (should match the imgsize set during model export) | +| **Object detection model input height** | `320` (should match the imgsize set during model export) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/yolo.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -770,6 +1108,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. @@ -781,7 +1122,25 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
YOLOx Setup & Config -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config folder, use the following configuration: + + + + +Navigate to and select **ONNX** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | -------------------------------------------------------- | +| **Object Detection Model Type** | `yolox` | +| **Object detection model input width** | `416` (should match the imgsize set during model export) | +| **Object detection model input height** | `416` (should match the imgsize set during model export) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float_denorm` | +| **Custom object detector model path** | `/config/model_cache/yolox_tiny.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -798,6 +1157,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
@@ -809,7 +1171,24 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
RF-DETR Setup & Config -After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: +After placing the downloaded onnx model in your `config/model_cache` folder, use the following configuration: + + + + +Navigate to and select **ONNX** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| --------------------------------------- | --------------------------------- | +| **Object Detection Model Type** | `rfdetr` | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/rfdetr.onnx` | + + + ```yaml detectors: @@ -825,6 +1204,9 @@ model: path: /config/model_cache/rfdetr.onnx ``` + + +
#### D-FINE @@ -834,7 +1216,25 @@ model:
D-FINE Setup & Config -After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration: +After placing the downloaded onnx model in your `config/model_cache` folder, use the following configuration: + + + + +Navigate to and select **ONNX** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------- | +| **Object Detection Model Type** | `dfine` | +| **Object detection model input width** | `640` | +| **Object detection model input height** | `640` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Custom object detector model path** | `/config/model_cache/dfine_m_obj2coco.onnx` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + ```yaml detectors: @@ -851,6 +1251,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + +
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. @@ -869,6 +1272,14 @@ The number of threads used by the interpreter can be specified using the `"num_t A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. + + + +Navigate to and select **CPU** from the detector type dropdown and click **Add**. Configure the number of threads and click **Add** again to add additional CPU detectors as needed (one per camera is recommended). + + + + ```yaml detectors: cpu1: @@ -882,6 +1293,9 @@ model: path: "/custom_model.tflite" ``` + + + When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. ## Deepstack / CodeProject.AI Server Detector @@ -892,7 +1306,15 @@ The Deepstack / CodeProject.AI Server detector for Frigate allows you to integra To get started with CodeProject.AI, visit their [official website](https://www.codeproject.com/Articles/5322557/CodeProject-AI-Server-AI-the-easy-way) to follow the instructions to download and install the AI server on your preferred device. Detailed setup instructions for CodeProject.AI are outside the scope of the Frigate documentation. -To integrate CodeProject.AI into Frigate, you'll need to make the following changes to your Frigate configuration file: +To integrate CodeProject.AI into Frigate, configure the detector as follows: + + + + +Navigate to and select **DeepStack** from the detector type dropdown and click **Add**. Set the API URL to point to your CodeProject.AI server (e.g., `http://:/v1/vision/detection`). + + + ```yaml detectors: @@ -902,6 +1324,9 @@ detectors: api_timeout: 0.1 # seconds ``` + + + Replace `` and `` with the IP address and port of your CodeProject.AI server. To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly. @@ -922,6 +1347,14 @@ To configure the MemryX detector, use the following example configuration: #### Single PCIe MemryX MX3 + + + +Navigate to and select **MemryX** from the detector type dropdown and click **Add**, then set device to `PCIe:0`. + + + + ```yaml detectors: memx0: @@ -929,8 +1362,19 @@ detectors: device: PCIe:0 ``` + + + #### Multiple PCIe MemryX MX3 Modules + + + +Navigate to and select **MemryX** from the detector type dropdown and click **Add** to add multiple detectors, specifying `PCIe:0`, `PCIe:1`, `PCIe:2`, etc. as the device for each. + + + + ```yaml detectors: memx0: @@ -946,6 +1390,9 @@ detectors: device: PCIe:2 ``` + + + ### Supported Models MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the container at `/memryx_models/model_folder/`. @@ -964,6 +1411,23 @@ The input size for **YOLO-NAS** can be set to either **320x320** (default) or ** Below is the recommended configuration for using the **YOLO-NAS** (small) model with the MemryX detector: + + + +Navigate to and select **MemryX** from the detector type dropdown and click **Add**, then set device to `PCIe:0`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------------- | +| **Object Detection Model Type** | `yolonas` | +| **Object detection model input width** | `320` (can be set to `640` for higher resolution) | +| **Object detection model input height** | `320` (can be set to `640` for higher resolution) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: memx0: @@ -984,6 +1448,9 @@ model: # └── yolonas_post.onnx (optional; only if the model includes a cropped post-processing network) ``` + + + #### YOLOv9 The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage). @@ -992,6 +1459,23 @@ The YOLOv9s model included in this detector is downloaded from [the original Git Below is the recommended configuration for using the **YOLOv9** (small) model with the MemryX detector: + + + +Navigate to and select **MemryX** from the detector type dropdown and click **Add**, then set device to `PCIe:0`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------------- | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` (can be set to `640` for higher resolution) | +| **Object detection model input height** | `320` (can be set to `640` for higher resolution) | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: memx0: @@ -1011,6 +1495,9 @@ model: # ├── yolov9.dfp (a file ending with .dfp) ``` + + + #### YOLOX The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/opencv_zoo) and precompiled to DFP. @@ -1019,6 +1506,23 @@ The model is sourced from the [OpenCV Model Zoo](https://github.com/opencv/openc Below is the recommended configuration for using the **YOLOX** (small) model with the MemryX detector: + + + +Navigate to and select **MemryX** from the detector type dropdown and click **Add**, then set device to `PCIe:0`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ----------------------- | +| **Object Detection Model Type** | `yolox` | +| **Object detection model input width** | `640` | +| **Object detection model input height** | `640` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float_denorm` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: memx0: @@ -1038,6 +1542,9 @@ model: # ├── yolox.dfp (a file ending with .dfp) ``` + + + #### SSDLite MobileNet v2 The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmmlab.com/model/mmdet-det/ssdlite-e8679f.onnx) and has been converted to DFP. @@ -1046,6 +1553,23 @@ The model is sourced from the [OpenMMLab Model Zoo](https://mmdeploy-oss.openmml Below is the recommended configuration for using the **SSDLite MobileNet v2** model with the MemryX detector: + + + +Navigate to and select **MemryX** from the detector type dropdown and click **Add**, then set device to `PCIe:0`. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ----------------------- | +| **Object Detection Model Type** | `ssd` | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input D Type** | `float` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: memx0: @@ -1066,6 +1590,9 @@ model: # └── ssdlite_mobilenet_post.onnx (optional; only if the model includes a cropped post-processing network) ``` + + + #### Using a Custom Model To use your own model: @@ -1165,6 +1692,23 @@ The TensorRT detector uses `.trt` model files that are located in `/config/model Use the config below to work with generated TRT models: + + + +Navigate to and select **TensorRT** from the detector type dropdown and click **Add**, then set the device to `0` (the default GPU index). Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------------------------ | +| **Custom object detector model path** | `/config/model_cache/tensorrt/yolov7-320.trt` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | +| **Model Input Tensor Shape** | `nchw` | +| **Model Input Pixel Color Format** | `rgb` | +| **Object detection model input width** | `320` (MUST match the chosen model, e.g., yolov7-320 -> 320) | +| **Object detection model input height** | `320` (MUST match the chosen model, e.g., yolov7-320 -> 320) | + + + + ```yaml detectors: tensorrt: @@ -1180,6 +1724,9 @@ model: height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416 ``` + + + ## Synaptics Hardware accelerated object detection is supported on the following SoCs: @@ -1202,6 +1749,22 @@ A synap model is provided in the container at /mobilenet.synap and is used by th Use the model configuration shown below when using the synaptics detector with the default synap model: + + + +Navigate to and select **Synaptics** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ---------------------------- | +| **Custom object detector model path** | `/synaptics/mobilenet.synap` | +| **Object detection model input width** | `224` | +| **Object detection model input height** | `224` | +| **Model Input Tensor Shape** | `nhwc` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: # required synap_npu: # required @@ -1211,10 +1774,13 @@ model: # required path: /synaptics/mobilenet.synap # required width: 224 # required height: 224 # required - tensor_format: nhwc # default value (optional. If you change the model, it is required) + input_tensor: nhwc # default value (optional. If you change the model, it is required) labelmap_path: /labelmap/coco-80.txt # required ``` + + + ## Rockchip platform Hardware accelerated object detection is supported on the following SoCs: @@ -1231,6 +1797,14 @@ This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airoc When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be: + + + +Navigate to and select **RKNN** from the detector type dropdown and click **Add** to add multiple detectors, each with `num_cores` set to `0` for automatic selection. + + + + ```yaml detectors: rknn_0: @@ -1241,6 +1815,9 @@ detectors: num_cores: 0 ``` + + + ::: ### Prerequisites @@ -1262,6 +1839,14 @@ $ cat /sys/kernel/debug/rknpu/load This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional. + + + +Navigate to and select **RKNN** from the detector type dropdown and click **Add**. Set `num_cores` to `0` for automatic selection (increase for better performance on multicore NPUs, e.g., set to `3` on rk3588). + + + + ```yaml detectors: # required rknn: # required @@ -1272,6 +1857,9 @@ detectors: # required num_cores: 0 ``` + + + The inference time was determined on a rk3588 with 3 NPU cores. | Model | Size in mb | Inference time in ms | @@ -1288,6 +1876,24 @@ The inference time was determined on a rk3588 with 3 NPU cores. #### YOLO-NAS + + + +Navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ----------------------------------------------------------------------- | +| **Custom object detector model path** | `deci-fp16-yolonas_s` (or `deci-fp16-yolonas_m`, `deci-fp16-yolonas_l`) | +| **Object Detection Model Type** | `yolonas` | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input Pixel Color Format** | `bgr` | +| **Model Input Tensor Shape** | `nhwc` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml model: # required # name of model (will be automatically downloaded) or path to your own .rknn model file @@ -1305,6 +1911,9 @@ model: # required labelmap_path: /labelmap/coco-80.txt ``` + + + :::warning The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html @@ -1313,6 +1922,23 @@ The pre-trained YOLO-NAS weights from DeciAI are subject to their license and ca #### YOLO (v9) + + + +Navigate to and configure: + +| Field | Value | +| ---------------------------------------- | -------------------------------------------------- | +| **Custom object detector model path** | `frigate-fp16-yolov9-t` (or other yolov9 variants) | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input Tensor Shape** | `nhwc` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml model: # required # name of model (will be automatically downloaded) or path to your own .rknn model file @@ -1331,8 +1957,28 @@ model: # required labelmap_path: /labelmap/coco-80.txt ``` + + + #### YOLOx + + + +Navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ---------------------------------------------- | +| **Custom object detector model path** | `rock-i8-yolox_nano` (or other yolox variants) | +| **Object Detection Model Type** | `yolox` | +| **Object detection model input width** | `416` | +| **Object detection model input height** | `416` | +| **Model Input Tensor Shape** | `nhwc` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml model: # required # name of model (will be automatically downloaded) or path to your own .rknn model file @@ -1350,6 +1996,9 @@ model: # required labelmap_path: /labelmap/coco-80.txt ``` + + + ### Converting your own onnx model to rknn format To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to: @@ -1405,7 +2054,15 @@ degirum_detector: All supported hardware will automatically be found on your AI server host as long as relevant runtimes and drivers are properly installed on your machine. Refer to [DeGirum's docs site](https://docs.degirum.com/pysdk/runtimes-and-drivers) if you have any trouble. -Once completed, changing the `config.yml` file is simple. +Once completed, configure the detector as follows: + + + + +Navigate to and select **DeGirum** from the detector type dropdown and click **Add**. Set the location to your AI server (e.g., service name, container name, or `host:port`), the zoo to `degirum/public`, and provide your authentication token if needed. + + + ```yaml degirum_detector: @@ -1415,6 +2072,9 @@ degirum_detector: token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the [AI Hub](https://hub.degirum.com). This can be left blank if you're pulling a model from the public zoo and running inferences on your local hardware using @local or a local DeGirum AI Server ``` + + + Setting up a model in the `config.yml` is similar to setting up an AI server. You can set it to: @@ -1437,7 +2097,15 @@ It is also possible to eliminate the need for an AI server and run the hardware 1. Ensuring that the frigate docker container has the runtime you want to use. So for instance, running `@local` for Hailo means making sure the container you're using has the Hailo runtime installed. 2. To double check the runtime is detected by the DeGirum detector, make sure the `degirum sys-info` command properly shows whatever runtimes you mean to install. -3. Create a DeGirum detector in your `config.yml` file. +3. Create a DeGirum detector in your configuration. + + + + +Navigate to and select **DeGirum** from the detector type dropdown and click **Add**. Set the location to `@local`, the zoo to `degirum/public`, and provide your authentication token. + + + ```yaml degirum_detector: @@ -1447,6 +2115,9 @@ degirum_detector: token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the [AI Hub](https://hub.degirum.com). This can be left blank if you're pulling a model from the public zoo and running inferences on your local hardware using @local or a local DeGirum AI Server ``` + + + Once `degirum_detector` is setup, you can choose a model through 'model' section in the `config.yml` file. ```yaml @@ -1463,7 +2134,15 @@ If you do not possess whatever hardware you want to run, there's also the option 1. Sign up at [DeGirum's AI Hub](https://hub.degirum.com). 2. Get an access token. -3. Create a DeGirum detector in your `config.yml` file. +3. Create a DeGirum detector in your configuration. + + + + +Navigate to and select **DeGirum** from the detector type dropdown and click **Add**. Set the location to `@cloud`, the zoo to `degirum/public`, and provide your authentication token. + + + ```yaml degirum_detector: @@ -1473,6 +2152,9 @@ degirum_detector: token: dg_example_token # For authentication with the AI Hub. Get this token through the "tokens" section on the main page of the (AI Hub)[https://hub.degirum.com). ``` + + + Once `degirum_detector` is setup, you can choose a model through 'model' section in the `config.yml` file. ```yaml @@ -1504,6 +2186,24 @@ A yolov9 model is provided in the container at `/axmodels` and is used by this d Use the model configuration shown below when using the axengine detector with the default axmodel: + + + +Navigate to and select **AXEngine NPU** from the detector type dropdown and click **Add**. Then navigate to and configure: + +| Field | Value | +| ---------------------------------------- | ----------------------- | +| **Custom object detector model path** | `frigate-yolov9-tiny` | +| **Object Detection Model Type** | `yolo-generic` | +| **Object detection model input width** | `320` | +| **Object detection model input height** | `320` | +| **Model Input D Type** | `int` | +| **Model Input Pixel Color Format** | `bgr` | +| **Label map for custom object detector** | `/labelmap/coco-80.txt` | + + + + ```yaml detectors: axengine: @@ -1519,6 +2219,9 @@ model: labelmap_path: /labelmap/coco-80.txt ``` + + + # Models Some model types are not included in Frigate by default. diff --git a/docs/docs/configuration/object_filters.md b/docs/docs/configuration/object_filters.md index 3f36086c0..dfea51804 100644 --- a/docs/docs/configuration/object_filters.md +++ b/docs/docs/configuration/object_filters.md @@ -3,11 +3,15 @@ id: object_filters title: Filters --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + There are several types of object filters that can be used to reduce false positive rates. ## Object Scores -For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85: +For object filters, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85: | Frame | Current Score | Score History | Computed Score | Detected Object | | ----- | ------------- | --------------------------------- | -------------- | --------------- | @@ -28,6 +32,46 @@ Any detection below `min_score` will be immediately thrown out and never tracked `threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an tracked object. If `threshold` is too high then true positive tracked objects may be missed due to the object never scoring high enough. +## Configuring Object Scores + + + + +Navigate to to set score filters globally. + +| Field | Description | +| --------------------------------------- | ---------------------------------------------------------------- | +| **Object filters > Person > Min Score** | Minimum score for a single detection to initiate tracking | +| **Object filters > Person > Threshold** | Minimum computed (median) score to be considered a true positive | + +To override score filters for a specific camera, navigate to and select the camera. + + + + +```yaml +objects: + filters: + person: + min_score: 0.5 + threshold: 0.7 +``` + +To override at the camera level: + +```yaml +cameras: + front_door: + objects: + filters: + person: + min_score: 0.5 + threshold: 0.7 +``` + + + + ## Object Shape False positives can also be reduced by filtering a detection based on its shape. @@ -46,6 +90,50 @@ Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a " ::: +### Configuring Shape Filters + + + + +Navigate to to set shape filters globally. + +| Field | Description | +| --------------------------------------- | ------------------------------------------------------------------------ | +| **Object filters > Person > Min Area** | Minimum bounding box area in pixels (or decimal for percentage of frame) | +| **Object filters > Person > Max Area** | Maximum bounding box area in pixels (or decimal for percentage of frame) | +| **Object filters > Person > Min Ratio** | Minimum width/height ratio of the bounding box | +| **Object filters > Person > Max Ratio** | Maximum width/height ratio of the bounding box | + +To override shape filters for a specific camera, navigate to and select the camera. + + + + +```yaml +objects: + filters: + person: + min_area: 5000 + max_area: 100000 + min_ratio: 0.5 + max_ratio: 2.0 +``` + +To override at the camera level: + +```yaml +cameras: + front_door: + objects: + filters: + person: + min_area: 5000 + max_area: 100000 +``` + + + + ## Other Tools ### Zones @@ -54,4 +142,4 @@ Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a " ### Object Masks -[Object Filter Masks](/configuration/masks) are a last resort but can be useful when false positives are in the relatively same place but can not be filtered due to their size or shape. +[Object Filter Masks](/configuration/masks) are a last resort but can be useful when false positives are in the relatively same place but can not be filtered due to their size or shape. Object filter masks can be configured in . diff --git a/docs/docs/configuration/objects.md b/docs/docs/configuration/objects.md index 796d31258..9925ae8fe 100644 --- a/docs/docs/configuration/objects.md +++ b/docs/docs/configuration/objects.md @@ -3,6 +3,9 @@ id: objects title: Available Objects --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; import labels from "../../../labelmap.txt"; Frigate includes the object labels listed below from the Google Coral test data. @@ -10,7 +13,7 @@ Frigate includes the object labels listed below from the Google Coral test data. Please note: - `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. -- `person` is the only tracked object by default. See the [full configuration reference](reference.md) for an example of expanding the list of tracked objects. +- `person` is the only tracked object by default. To track additional objects, configure them in the objects settings.
    {labels.split("\n").map((label) => ( @@ -18,6 +21,135 @@ Please note: ))}
+## Configuring Tracked Objects + +By default, Frigate only tracks `person`. To track additional object types, add them to the tracked objects list. + + + + +1. Navigate to . + - Add the desired object types to the **Objects to track** list (e.g., `person`, `car`, `dog`) + +To override the tracked objects list for a specific camera: + +1. Navigate to . + - Add the desired object types to the **Objects to track** list + + + + +```yaml +objects: + track: + - person + - car + - dog +``` + +To override at the camera level: + +```yaml +cameras: + front_door: + objects: + track: + - person + - car +``` + + + + +## Filtering Objects + +Object filters help reduce false positives by constraining the size, shape, and confidence thresholds for each object type. Filters can be configured globally or per camera. + + + + +Navigate to . + +| Field | Description | +| --------------------------------------- | ------------------------------------------------------------------------ | +| **Object filters > Person > Min Area** | Minimum bounding box area in pixels (or decimal for percentage of frame) | +| **Object filters > Person > Max Area** | Maximum bounding box area in pixels (or decimal for percentage of frame) | +| **Object filters > Person > Min Ratio** | Minimum width/height ratio of the bounding box | +| **Object filters > Person > Max Ratio** | Maximum width/height ratio of the bounding box | +| **Object filters > Person > Min Score** | Minimum score for the object to initiate tracking | +| **Object filters > Person > Threshold** | Minimum computed score to be considered a true positive | + +To override filters for a specific camera, navigate to . + + + + +```yaml +objects: + filters: + person: + min_area: 5000 + max_area: 100000 + min_ratio: 0.5 + max_ratio: 2.0 + min_score: 0.5 + threshold: 0.7 +``` + +To override at the camera level: + +```yaml +cameras: + front_door: + objects: + filters: + person: + min_area: 5000 + threshold: 0.7 +``` + + + + +## Object Filter Masks + +Object filter masks prevent specific object types from being detected in certain areas of the camera frame. These masks check the bottom center of the bounding box. A global mask applies to all object types, while per-object masks apply only to the specified type. + + + + +Navigate to and select a camera. Use the mask editor to draw object filter masks directly on the camera feed. Global object masks and per-object masks can both be configured from this view. + + + + +```yaml +objects: + # Global mask applied to all object types + mask: + mask1: + friendly_name: "Object filter mask area" + enabled: true + coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278" + # Per-object mask + filters: + person: + mask: + mask1: + friendly_name: "Person filter mask" + enabled: true + coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278" +``` + + + + +:::note + +The global mask is combined with any object-specific mask. Both are checked based on the bottom center of the bounding box. + +::: + ## Custom Models Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use your own models with volume mounts: diff --git a/docs/docs/configuration/profiles.md b/docs/docs/configuration/profiles.md index ef0778e18..b290d30f7 100644 --- a/docs/docs/configuration/profiles.md +++ b/docs/docs/configuration/profiles.md @@ -3,6 +3,10 @@ id: profiles title: Profiles --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Profiles allow you to define named sets of camera configuration overrides that can be activated and deactivated at runtime without restarting Frigate. This is useful for scenarios like switching between "Home" and "Away" modes, daytime and nighttime configurations, or any situation where you want to quickly change how multiple cameras behave. ## How Profiles Work @@ -24,16 +28,18 @@ Profile changes are applied in-memory and take effect immediately — no restart The easiest way to define profiles is to use the Frigate UI. Profiles can also be configured manually in your configuration file. -### Using the UI +### Creating and Managing Profiles -To create and manage profiles from the UI, open **Settings**. From there you can: + + -1. **Create a profile** — Navigate to **Profiles**. Click the **Add Profile** button, enter a name (and optionally a profile ID). -2. **Configure overrides** — Navigate to a camera configuration section (e.g. Motion detection, Record, Notifications). In the top right, two buttons will appear - choose a camera and a profile from the profile selector to edit overrides for that camera and section. Only the fields you change will be stored as overrides — fields that require a restart are hidden since profiles are applied at runtime. You can click the **Remove Profile Override** button -3. **Activate a profile** — Use the **Profiles** option in Frigate's main menu to choose a profile. Alternatively, in Settings, navigate to **Profiles**, then choose a profile in the Active Profile dropdown to activate it. The active profile is also shown in the status bar at the bottom of the screen on desktop browsers. -4. **Delete a profile** — Navigate to **Profiles**, then click the trash icon for a profile. This removes the profile definition and all camera overrides associated with it. +1. **Create a profile** — Navigate to . Click the **Add Profile** button, enter a name (and optionally a profile ID). +2. **Configure overrides** — Navigate to a camera configuration section (e.g. Motion detection, Record, Notifications). In the top right, two buttons will appear - choose a camera and a profile from the profile selector to edit overrides for that camera and section. Only the fields you change will be stored as overrides — fields that require a restart are hidden since profiles are applied at runtime. You can click the **Remove Profile Override** button to clear overrides. +3. **Activate a profile** — Use the **Profiles** option in Frigate's main menu to choose a profile. Alternatively, in Settings, navigate to , then choose a profile in the Active Profile dropdown to activate it. The active profile is also shown in the status bar at the bottom of the screen on desktop browsers. +4. **Delete a profile** — Navigate to , then click the trash icon for a profile. This removes the profile definition and all camera overrides associated with it. -### Defining Profiles in YAML + + First, define your profiles at the top level of your Frigate config. Every profile name referenced by a camera must be defined here. @@ -47,8 +53,6 @@ profiles: friendly_name: Night Mode ``` -### Camera Profile Overrides - Under each camera, add a `profiles` section with overrides for each profile. You only need to include the settings you want to change. ```yaml @@ -91,6 +95,9 @@ cameras: - person ``` + + + ### Supported Override Sections The following camera configuration sections can be overridden in a profile: @@ -113,7 +120,7 @@ The following camera configuration sections can be overridden in a profile: :::note -Only the fields you explicitly set in a profile override are applied. All other fields retain their base configuration values. For zones, profile zones are merged with the camera's base zones — any zone defined in the profile will override or add to the base zones. +Only the fields you explicitly set in a profile override are applied. All other fields retain their base configuration values. For masks and zones, profile zones **override** the camera's base masks and zones. If configuring profiles via YAML, you should not define masks or zones in profiles that are not defined in the base config. ::: @@ -125,6 +132,17 @@ Profiles can be activated and deactivated from the Frigate UI. Open the Settings A common use case is having different detection and notification settings based on whether you are home or away. + + + +1. Navigate to and create two profiles: **Home** and **Away**. +2. For the **front_door** camera, configure the **Away** profile to enable notifications and set alert labels to `person` and `car`. Configure the **Home** profile to disable notifications. +3. For the **indoor_cam** camera, configure the **Away** profile to enable the camera, detection, and recording. Configure the **Home** profile to disable the camera entirely for privacy. +4. Activate the desired profile from or from the **Profiles** option in Frigate's main menu. + + + + ```yaml profiles: home: @@ -181,6 +199,9 @@ cameras: enabled: false ``` + + + In this example: - **Away profile**: The front door camera enables notifications and tracks specific alert labels. The indoor camera is fully enabled with detection and recording. diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index afd26c641..d98f51491 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -3,7 +3,11 @@ id: record title: Recording --- -Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed. +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + +Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed. New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy. @@ -13,7 +17,23 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br ### Most conservative: Ensure all video is saved -For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. +For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following configuration will store all video for 3 days. After 3 days, only video containing motion will be saved for 7 days. After 7 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. + + + + +Navigate to . + +- Set **Enable recording** to on +- Set **Continuous retention > Retention days** to `3` +- Set **Motion retention > Retention days** to `7` +- Set **Alert retention > Event retention > Retention days** to `30` +- Set **Alert retention > Event retention > Retention mode** to `all` +- Set **Detection retention > Event retention > Retention days** to `30` +- Set **Detection retention > Event retention > Retention mode** to `all` + + + ```yaml record: @@ -32,9 +52,27 @@ record: mode: all ``` + + + ### Reduced storage: Only saving video when motion is detected -In order to reduce storage requirements, you can adjust your config to only retain video where motion / activity was detected. +To reduce storage requirements, configure recording to only retain video where motion or activity was detected. + + + + +Navigate to . + +- Set **Enable recording** to on +- Set **Motion retention > Retention days** to `3` +- Set **Alert retention > Event retention > Retention days** to `30` +- Set **Alert retention > Event retention > Retention mode** to `motion` +- Set **Detection retention > Event retention > Retention days** to `30` +- Set **Detection retention > Event retention > Retention mode** to `motion` + + + ```yaml record: @@ -51,9 +89,25 @@ record: mode: motion ``` + + + ### Minimum: Alerts only -If you only want to retain video that occurs during activity caused by tracked object(s), this config will discard video unless an alert is ongoing. +If you only want to retain video that occurs during activity caused by tracked object(s), this configuration will discard video unless an alert is ongoing. + + + + +Navigate to . + +- Set **Enable recording** to on +- Set **Continuous retention > Retention days** to `0` +- Set **Alert retention > Event retention > Retention days** to `30` +- Set **Alert retention > Event retention > Retention mode** to `motion` + + + ```yaml record: @@ -66,6 +120,9 @@ record: mode: motion ``` + + + ## Will Frigate delete old recordings if my storage runs out? As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. @@ -82,7 +139,21 @@ Retention configs support decimals meaning they can be configured to retain `0.5 ### Continuous and Motion Recording -The number of days to retain continuous and motion recordings can be set via the following config where X is a number, by default continuous recording is disabled. +The number of days to retain continuous and motion recordings can be configured. By default, continuous recording is disabled. + + + + +Navigate to . + +| Field | Description | +| ----------------------------------------- | -------------------------------------------- | +| **Enable recording** | Enable or disable recording for all cameras | +| **Continuous retention > Retention days** | Number of days to keep continuous recordings | +| **Motion retention > Retention days** | Number of days to keep motion recordings | + + + ```yaml record: @@ -93,11 +164,28 @@ record: days: 2 # <- number of days to keep motion recordings ``` -Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) + + + +Continuous recording supports different retention modes [which are described below](#configuring-recording-retention). ### Object Recording -The number of days to record review items can be specified for review items classified as alerts as well as tracked objects. +The number of days to retain recordings for review items can be specified for items classified as alerts as well as tracked objects. + + + + +Navigate to . + +| Field | Description | +| ---------------------------------------------------------- | ------------------------------------------- | +| **Enable recording** | Enable or disable recording for all cameras | +| **Alert retention > Event retention > Retention days** | Number of days to keep alert recordings | +| **Detection retention > Event retention > Retention days** | Number of days to keep detection recordings | + + + ```yaml record: @@ -110,9 +198,10 @@ record: days: 10 # <- number of days to keep detections recordings ``` -This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs. + + -**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. +This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs. ## Can I have "continuous" recordings, but only at certain times? @@ -128,7 +217,7 @@ Time lapse exporting is available only via the [HTTP API](../integrations/api/ex When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS. -To configure the speed-up factor, the frame rate and further custom settings, the configuration parameter `timelapse_args` can be used. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS: +To configure the speed-up factor, the frame rate and further custom settings, use the `timelapse_args` parameter. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS: ```yaml {3-4} record: @@ -139,7 +228,7 @@ record: :::tip -When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras..record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). +When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set the camera-level export hwaccel_args with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). ::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index c6ac207aa..e5eb16138 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -951,7 +951,7 @@ cameras: onvif: # Required: host of the camera being connected to. # NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0". - # NOTE: ONVIF user, and password can be specified with environment variables or docker secrets + # NOTE: ONVIF host, user, and password can be specified with environment variables or docker secrets # that must begin with 'FRIGATE_'. e.g. host: '{FRIGATE_ONVIF_USERNAME}' host: 0.0.0.0 # Optional: ONVIF port for device (default: shown below). @@ -966,6 +966,10 @@ cameras: # Optional: Ignores time synchronization mismatches between the camera and the server during authentication. # Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents. ignore_time_mismatch: False + # Optional: ONVIF media profile to use for PTZ control, matched by token or name. (default: shown below) + # If not set, the first profile with valid PTZ configuration is selected automatically. + # Use this when your camera has multiple ONVIF profiles and you need to select a specific one. + profile: None # Optional: PTZ camera object autotracking. Keeps a moving object in # the center of the frame by automatically moving the PTZ camera. autotracking: diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index ac3bcc503..af4d635c6 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -3,6 +3,10 @@ id: restream title: Restream --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + ## RTSP Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. @@ -52,6 +56,16 @@ Some cameras only support one active connection or you may just want to have a s One connection is made to the camera. One for the restream, `detect` and `record` connect to the restream. +Configure the go2rtc stream and point the camera inputs at the local restream. + + + + +Navigate to and add stream entries for each camera. Then navigate to for each camera and set the input paths to use the local restream URL (`rtsp://127.0.0.1:8554/`). + + + + ```yaml go2rtc: streams: @@ -87,10 +101,21 @@ cameras: - audio # <- only necessary if audio detection is enabled ``` + + + ### With Sub Stream Two connections are made to the camera. One for the sub stream, one for the restream, `record` connects to the restream. + + + +Navigate to and add stream entries for each camera and its sub stream. Then navigate to for each camera and configure separate inputs for the main and sub streams using the local restream URLs. + + + + ```yaml go2rtc: streams: @@ -138,6 +163,9 @@ cameras: - detect ``` + + + ## Handling Complex Passwords go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose. diff --git a/docs/docs/configuration/review.md b/docs/docs/configuration/review.md index d8769749b..4f39611db 100644 --- a/docs/docs/configuration/review.md +++ b/docs/docs/configuration/review.md @@ -3,6 +3,10 @@ id: review title: Review --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + The Review page of the Frigate UI is for quickly reviewing historical footage of interest from your cameras. _Review items_ are indicated on a vertical timeline and displayed as a grid of previews - bandwidth-optimized, low frame rate, low resolution videos. Hovering over or swiping a preview plays the video and marks it as reviewed. If more in-depth analysis is required, the preview can be clicked/tapped and the full frame rate, full resolution recording is displayed. Review items are filterable by date, object type, and camera. @@ -23,7 +27,7 @@ Not every segment of video captured by Frigate may be of the same level of inter :::note -Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add the following to your config: +Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add more labels via or and select your camera. Alternatively, add the following to your config: ```yaml objects: @@ -38,7 +42,17 @@ See the [objects documentation](objects.md) for the list of objects that Frigate ## Restricting alerts to specific labels -By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config: +By default a review item will only be marked as an alert if a person or car is detected. Configure the alert labels to include any object or audio label. + + + + +Navigate to or and select your camera. + +Expand **Alerts config** and configure which labels and zones should generate alerts. + + + ```yaml # can be overridden at the camera level @@ -52,10 +66,23 @@ review: - speech ``` + + + ## Restricting detections to specific labels By default all detections that do not qualify as an alert qualify as a detection. However, detections can further be filtered to only include certain labels or certain zones. + + + +Navigate to or and select your camera. + +Expand **Detections config** and configure which labels should qualify as detections. + + + + ```yaml # can be overridden at the camera level review: @@ -65,11 +92,23 @@ review: - dog ``` + + + ## Excluding a camera from alerts or detections -To exclude a specific camera from alerts or detections, simply provide an empty list to the alerts or detections field _at the camera level_. +To exclude a specific camera from alerts or detections, provide an empty list to the alerts or detections labels field at the camera level. -For example, to exclude objects on the camera _gatecamera_ from any detections, include this in your config: +For example, to exclude objects on the camera _gatecamera_ from any detections: + + + + +1. Navigate to and select the **gatecamera** camera. + - Expand **Detections config** and turn off all of the object label switches. + + + ```yaml {3-5} cameras: @@ -79,6 +118,9 @@ cameras: labels: [] ``` + + + ## Restricting review items to specific zones By default a review item will be created if any `review -> alerts -> labels` and `review -> detections -> labels` are detected anywhere in the camera frame. You will likely want to configure review items to only be created when the object enters an area of interest, [see the zone docs for more information](./zones.md#restricting-alerts-and-detections-to-specific-zones) diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index 4c646f79a..49e0db88a 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -3,6 +3,10 @@ id: semantic_search title: Semantic Search --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results. Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and save embeddings to Frigate's database. All of this runs locally. @@ -19,7 +23,17 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended. ## Configuration -Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Enrichments Settings page before it can be used. Semantic Search is a global configuration setting. +Semantic Search is disabled by default and must be enabled before it can be used. Semantic Search is a global configuration setting. + + + + +Navigate to . + +- Set **Enable semantic search** to on + + + ```yaml semantic_search: @@ -27,6 +41,9 @@ semantic_search: reindex: False ``` + + + :::tip The embeddings database can be re-indexed from the existing tracked objects in your database by pressing the "Reindex" button in the Enrichments Settings in the UI or by adding `reindex: True` to your `semantic_search` configuration and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. @@ -41,7 +58,20 @@ The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a visio The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the object description docs](/configuration/genai/objects.md) for more information on how to automatically generate tracked object descriptions. -Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`: +Differently weighted versions of the Jina models are available and can be selected by setting the model size. + + + + +Navigate to . + +| Field | Description | +| ------------------------------------------------ | -------------------------------------------------------------------------- | +| **Semantic search model or GenAI provider name** | Select `jinav1` to use the Jina AI CLIP V1 model | +| **Model size** | `small` (quantized, CPU-friendly) or `large` (full model, GPU-accelerated) | + + + ```yaml semantic_search: @@ -50,6 +80,9 @@ semantic_search: model_size: small ``` + + + - Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable. - Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality. @@ -59,7 +92,20 @@ Frigate also supports the [V2 model from Jina](https://huggingface.co/jinaai/jin V2 offers only a 3% performance improvement over V1 in both text-image and text-text retrieval tasks, an upgrade that is unlikely to yield noticeable real-world benefits. Additionally, V2 has _significantly_ higher RAM and GPU requirements, leading to increased inference time and memory usage. If you plan to use V2, ensure your system has ample RAM and a discrete GPU. CPU inference (with the `small` model) using V2 is not recommended. -To use the V2 model, update the `model` parameter in your config: +To use the V2 model, set the model to `jinav2`. + + + + +Navigate to . + +| Field | Description | +| ------------------------------------------------ | ----------------------------------------------------- | +| **Semantic search model or GenAI provider name** | Select `jinav2` to use the Jina AI CLIP V2 model | +| **Model size** | `large` is recommended for V2 (requires discrete GPU) | + + + ```yaml semantic_search: @@ -68,6 +114,9 @@ semantic_search: model_size: large ``` + + + For most users, especially native English speakers, the V1 model remains the recommended choice. :::note @@ -82,9 +131,23 @@ Frigate can use a GenAI provider for semantic search embeddings when that provid To use llama.cpp for semantic search: -1. Configure a GenAI provider in your config with `embeddings` in its `roles`. -2. Set `semantic_search.model` to the GenAI config key (e.g. `default`). -3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support: +1. Configure a GenAI provider with `embeddings` in its `roles`. +2. Set the semantic search model to the GenAI config key (e.g. `default`). +3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support. + + + + +Navigate to . + +| Field | Description | +| ------------------------------------------------ | ---------------------------------------------------------------------------------------------- | +| **Semantic search model or GenAI provider name** | Set to the GenAI config key (e.g. `default`) to use a configured GenAI provider for embeddings | + +The GenAI provider must also be configured with the `embeddings` role under . + + + ```yaml genai: @@ -102,6 +165,9 @@ semantic_search: model: default ``` + + + The llama.cpp server must be started with `--embeddings` for the embeddings API, and a multi-modal embeddings model. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for details. :::note @@ -114,6 +180,19 @@ Switching between Jina models and a GenAI provider requires reindexing. Embeddin The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation. + + + +Navigate to . + +| Field | Description | +| -------------- | ---------------------------------------------------------------------- | +| **Model size** | Set to `large` to enable GPU acceleration | +| **Device** | (Optional) Specify a GPU device index in a multi-GPU system (e.g. `0`) | + + + + ```yaml semantic_search: enabled: True @@ -122,6 +201,9 @@ semantic_search: device: 0 ``` + + + :::info If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU will be detected and used automatically. @@ -153,16 +235,15 @@ Semantic Search must be enabled to use Triggers. ### Configuration -Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires - `notification`, `sub_label`, and `attribute`. +Triggers are defined within the `semantic_search` configuration for each camera. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires - `notification`, `sub_label`, and `attribute`. Triggers are best configured through the Frigate UI. #### Managing Triggers in the UI -1. Navigate to the **Settings** page and select the **Triggers** tab. -2. Choose a camera from the dropdown menu to view or manage its triggers. -3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one. -4. In the **Create Trigger** wizard: +1. Navigate to and select a camera from the dropdown menu. +2. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one. +3. In the **Create Trigger** wizard: - Enter a **Name** for the trigger (e.g., "Red Car Alert"). - Enter a descriptive **Friendly Name** for the trigger (e.g., "Red car on the driveway camera"). - Select the **Type** (`Thumbnail` or `Description`). @@ -173,14 +254,14 @@ Triggers are best configured through the Frigate UI. If native webpush notifications are enabled, check the `Send Notification` box to send a notification. Check the `Add Sub Label` box to add the trigger's friendly name as a sub label to any triggering tracked objects. Check the `Add Attribute` box to add the trigger's internal ID (e.g., "red_car_alert") to a data attribute on the tracked object that can be processed via the API or MQTT. -5. Save the trigger to update the configuration and store the embedding in the database. +4. Save the trigger to update the configuration and store the embedding in the database. When a trigger fires, the UI highlights the trigger with a blue dot for 3 seconds for easy identification. Additionally, the UI will show the last date/time and tracked object ID that activated your trigger. The last triggered timestamp is not saved to the database or persisted through restarts of Frigate. ### Usage and Best Practices 1. **Thumbnail Triggers**: Select a representative image (event ID) from the Explore page that closely matches the object you want to detect. For best results, choose images where the object is prominent and fills most of the frame. -2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object’s description. Avoid vague terms to improve matching accuracy. +2. **Description Triggers**: Write concise, specific text descriptions (e.g., "Person in a red jacket") that align with the tracked object's description. Avoid vague terms to improve matching accuracy. 3. **Threshold Tuning**: Adjust the threshold to balance sensitivity and specificity. A higher threshold (e.g., 0.8) requires closer matches, reducing false positives but potentially missing similar objects. A lower threshold (e.g., 0.6) is more inclusive but may trigger more often. 4. **Using Explore**: Use the context menu or right-click / long-press on a tracked object in the Grid View in Explore to quickly add a trigger based on the tracked object's thumbnail. 5. **Editing triggers**: For the best experience, triggers should be edited via the UI. However, Frigate will ensure triggers edited in the config will be synced with triggers created and edited in the UI. @@ -195,6 +276,6 @@ When a trigger fires, the UI highlights the trigger with a blue dot for 3 second #### Why can't I create a trigger on thumbnails for some text, like "person with a blue shirt" and have it trigger when a person with a blue shirt is detected? -TL;DR: Text-to-image triggers aren’t supported because CLIP can confuse similar images and give inconsistent scores, making automation unreliable. The same word–image pair can give different scores and the score ranges can be too close together to set a clear cutoff. +TL;DR: Text-to-image triggers aren't supported because CLIP can confuse similar images and give inconsistent scores, making automation unreliable. The same word-image pair can give different scores and the score ranges can be too close together to set a clear cutoff. -Text-to-image triggers are not supported due to fundamental limitations of CLIP-based similarity search. While CLIP works well for exploratory, manual queries, it is unreliable for automated triggers based on a threshold. Issues include embedding drift (the same text–image pair can yield different cosine distances over time), lack of true semantic grounding (visually similar but incorrect matches), and unstable thresholding (distance distributions are dataset-dependent and often too tightly clustered to separate relevant from irrelevant results). Instead, it is recommended to set up a workflow with thumbnail triggers: first use text search to manually select 3–5 representative reference tracked objects, then configure thumbnail triggers based on that visual similarity. This provides robust automation without the semantic ambiguity of text to image matching. +Text-to-image triggers are not supported due to fundamental limitations of CLIP-based similarity search. While CLIP works well for exploratory, manual queries, it is unreliable for automated triggers based on a threshold. Issues include embedding drift (the same text-image pair can yield different cosine distances over time), lack of true semantic grounding (visually similar but incorrect matches), and unstable thresholding (distance distributions are dataset-dependent and often too tightly clustered to separate relevant from irrelevant results). Instead, it is recommended to set up a workflow with thumbnail triggers: first use text search to manually select 3-5 representative reference tracked objects, then configure thumbnail triggers based on that visual similarity. This provides robust automation without the semantic ambiguity of text to image matching. diff --git a/docs/docs/configuration/snapshots.md b/docs/docs/configuration/snapshots.md index 2f339b210..675e68a9c 100644 --- a/docs/docs/configuration/snapshots.md +++ b/docs/docs/configuration/snapshots.md @@ -3,19 +3,134 @@ id: snapshots title: Snapshots --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `--clean.webp`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx) Snapshots are accessible in the UI in the Explore pane. This allows for quick submission to the Frigate+ service. To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones) -Snapshots sent via MQTT are configured in the [config file](/configuration) under `cameras -> your_camera -> mqtt` +Snapshots sent via MQTT are configured separately under the camera MQTT settings, not here. + +## Enabling Snapshots + +Enable snapshot saving and configure the default settings that apply to all cameras. + + + + +Navigate to . + +- Set **Enable snapshots** to on + + + + +```yaml +snapshots: + enabled: True +``` + + + + +To override snapshot settings for a specific camera: + + + + +Navigate to and select your camera. + +- Set **Enable snapshots** to on + + + + +```yaml +cameras: + front_door: + snapshots: + enabled: True +``` + + + + +## Snapshot Options + +Configure how snapshots are rendered and stored. These settings control the defaults applied when snapshots are requested via the API. + + + + +Navigate to . + +| Field | Description | +| ------------------------ | ------------------------------------------------------------------------------ | +| **Enable snapshots** | Enable or disable saving snapshots for tracked objects | +| **Timestamp overlay** | Overlay a timestamp on snapshots from API | +| **Bounding box overlay** | Draw bounding boxes for tracked objects on snapshots from API | +| **Crop snapshot** | Crop snapshots from API to the detected object's bounding box | +| **Snapshot height** | Height in pixels to resize snapshots to; leave empty to preserve original size | +| **Snapshot quality** | Encode quality for saved snapshots (0-100) | +| **Required zones** | Zones an object must enter for a snapshot to be saved | + + + + +```yaml +snapshots: + enabled: True + timestamp: False + bounding_box: True + crop: False + height: 175 + required_zones: [] + quality: 60 +``` + + + + +## Snapshot Retention + +Configure how long snapshots are retained on disk. Per-object retention overrides allow different retention periods for specific object types. + + + + +Navigate to . + +| Field | Description | +| -------------------------------------------------- | ----------------------------------------------------------------------------------- | +| **Snapshot retention > Default retention** | Number of days to retain snapshots (default: 10) | +| **Snapshot retention > Retention mode** | Retention mode: `all`, `motion`, or `active_objects` | +| **Snapshot retention > Object retention > Person** | Per-object overrides for retention days (e.g., keep `person` snapshots for 15 days) | + + + + +```yaml +snapshots: + enabled: True + retain: + default: 10 + mode: motion + objects: + person: 15 +``` + + + ## Frame Selection Frigate does not save every frame. It picks a single "best" frame for each tracked object based on detection confidence, object size, and the presence of key attributes like faces or license plates. Frames where the object touches the edge of the frame are deprioritized. That best frame is written to disk once tracking ends. -MQTT snapshots are published more frequently — each time a better thumbnail frame is found during tracking, or when the current best image is older than `best_image_timeout` (default: 60s). These use their own annotation settings configured under `cameras -> your_camera -> mqtt`. +MQTT snapshots are published more frequently — each time a better thumbnail frame is found during tracking, or when the current best image is older than `best_image_timeout` (default: 60s). These use their own annotation settings configured under the camera MQTT settings. ## Rendering @@ -28,4 +143,4 @@ Frigate stores a single clean snapshot on disk: | `/api/events//snapshot-clean.webp` | Returns the same stored snapshot without annotations | | [Frigate+](/plus/first_model) submission | Uses the same stored clean snapshot | -MQTT snapshots are configured separately under `cameras -> your_camera -> mqtt` and are unrelated to the stored event snapshot. +MQTT snapshots are configured separately under the camera MQTT settings and are unrelated to the stored event snapshot. diff --git a/docs/docs/configuration/stationary_objects.md b/docs/docs/configuration/stationary_objects.md index 341d1ea57..63d03374c 100644 --- a/docs/docs/configuration/stationary_objects.md +++ b/docs/docs/configuration/stationary_objects.md @@ -1,14 +1,29 @@ # Stationary Objects +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + An object is considered stationary when it is being tracked and has been in a very similar position for a certain number of frames. This number is defined in the configuration under `detect -> stationary -> threshold`, and is 10x the frame rate (or 10 seconds) by default. Once an object is considered stationary, it will remain stationary until motion occurs within the object at which point object detection will start running again. If the object changes location, it will be considered active. ## Why does it matter if an object is stationary? -Once an object becomes stationary, object detection will not be continually run on that object. This serves to reduce resource usage and redundant detections when there has been no motion near the tracked object. This also means that Frigate is contextually aware, and can for example [filter out recording segments](record.md#what-do-the-different-retain-modes-mean) to only when the object is considered active. Motion alone does not determine if an object is "active" for active_objects segment retention. Lighting changes for a parked car won't make an object active. +Once an object becomes stationary, object detection will not be continually run on that object. This serves to reduce resource usage and redundant detections when there has been no motion near the tracked object. This also means that Frigate is contextually aware, and can for example [filter out recording segments](record.md#configuring-recording-retention) to only when the object is considered active. Motion alone does not determine if an object is "active" for active_objects segment retention. Lighting changes for a parked car won't make an object active. ## Tuning stationary behavior -The default config is: +Configure how Frigate handles stationary objects. + + + + +Navigate to . + +- Set **Stationary objects config > Stationary interval** to the frequency for running detection on stationary objects (default: 50). Once stationary, detection runs every nth frame to verify the object is still present. There is no way to disable stationary object tracking with this value. +- Set **Stationary objects config > Stationary threshold** to the number of frames an object must remain relatively still before it is considered stationary (default: 50) + + + ```yaml detect: @@ -17,11 +32,8 @@ detect: threshold: 50 ``` -`interval` is defined as the frequency for running detection on stationary objects. This means that by default once an object is considered stationary, detection will not be run on it until motion is detected or until the interval (every 50th frame by default). With `interval >= 1`, every nth frames detection will be run to make sure the object is still there. - -NOTE: There is no way to disable stationary object tracking with this value. - -`threshold` is the number of frames an object needs to remain relatively still before it is considered stationary. + + ## Why does Frigate track stationary objects? diff --git a/docs/docs/configuration/tls.md b/docs/docs/configuration/tls.md index b4bfc1842..9757a7816 100644 --- a/docs/docs/configuration/tls.md +++ b/docs/docs/configuration/tls.md @@ -3,19 +3,36 @@ id: tls title: TLS --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + # TLS Frigate's integrated NGINX server supports TLS certificates. By default Frigate will generate a self signed certificate that will be used for port 8971. Frigate is designed to make it easy to use whatever tool you prefer to manage certificates. Frigate is often running behind a reverse proxy that manages TLS certificates for multiple services. You will likely need to set your reverse proxy to allow self signed certificates or you can disable TLS in Frigate's config. However, if you are running on a dedicated device that's separate from your proxy or if you expose Frigate directly to the internet, you may want to configure TLS with valid certificates. -In many deployments, TLS will be unnecessary. It can be disabled in the config with the following yaml: +In many deployments, TLS will be unnecessary. Disable it as follows: + + + + +Navigate to . + +- Set **Enable TLS** to off if running behind a reverse proxy that handles TLS (default: on) + + + ```yaml tls: enabled: False ``` + + + ## Certificates TLS certificates can be mounted at `/etc/letsencrypt/live/frigate` using a bind mount or docker volume. diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index ba86b4a86..2cb3c8ebe 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -3,6 +3,10 @@ id: zones title: Zones --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Presence in a zone is evaluated based on the bottom center of the bounding box for the object. It does not matter how much of the bounding box overlaps with the zone. For example, the cat in this image is currently in Zone 1, but **not** Zone 2. @@ -16,11 +20,51 @@ Zones can be toggled on or off without removing them from the configuration. Dis During testing, enable the Zones option for the Debug view of your camera (Settings --> Debug) so you can adjust as needed. The zone line will increase in thickness when any object enters the zone. -To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead. +## Creating a Zone + + + + +1. Navigate to and select the desired camera. +2. Under the **Zones** section, click the plus icon to add a new zone. +3. Click on the camera's latest image to create the points for the zone boundary. Click the first point again to close the polygon. +4. Configure zone options such as **Friendly name**, **Objects**, **Loitering time**, and **Inertia** in the zone editor. +5. Press **Save** when finished. + + + + +Follow [the steps for creating a mask](masks.md), but use the zone section of the web UI instead. Alternatively, define zones directly in your configuration file: + +```yaml +cameras: + name_of_your_camera: + zones: + entire_yard: + friendly_name: Entire yard + coordinates: 0.123,0.456,0.789,0.012,... +``` + + + ### Restricting alerts and detections to specific zones -Often you will only want alerts to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to have an alert created when an object enters your entire_yard zone, the config would be: +Often you will only want alerts to be created when an object enters areas of interest. This is done by combining zones with required zones for review items. + +To create an alert only when an object enters the `entire_yard` zone: + + + + +Navigate to . + +| Field | Description | +| ---------------------------------- | ----------------------------------------------------------------------------------------- | +| **Alerts config > Required zones** | Zones that an object must enter to be considered an alert; leave empty to allow any zone. | + + + ```yaml {6,8} cameras: @@ -35,7 +79,23 @@ cameras: coordinates: ... ``` -You may also want to filter detections to only be created when an object enters a secondary area of interest. This is done using zones along with setting required_zones. Let's say you want alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard, the config would be + + + +You may also want to filter detections to only be created when an object enters a secondary area of interest. For example, to trigger alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard: + + + + +Navigate to . + +| Field | Description | +| -------------------------------------- | -------------------------------------------------------------------------------------------- | +| **Alerts config > Required zones** | Zones that an object must enter to be considered an alert; leave empty to allow any zone. | +| **Detections config > Required zones** | Zones that an object must enter to be considered a detection; leave empty to allow any zone. | + + + ```yaml cameras: @@ -56,8 +116,22 @@ cameras: coordinates: ... ``` + + + ### Restricting snapshots to specific zones +To only save snapshots when an object enters a specific zone: + + + + +1. Navigate to and select your camera. + - Set **Required zones** to `entire_yard` + + + + ```yaml cameras: name_of_your_camera: @@ -70,9 +144,24 @@ cameras: coordinates: ... ``` + + + ### Restricting zones to specific objects -Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example will limit one zone to person objects and the other to cars. +Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example limits one zone to person objects and the other to cars. + + + + +1. Navigate to and select the desired camera. +2. Create a zone named `entire_yard` covering everywhere you want to track a person. + - Under **Objects**, add `person` +3. Create a second zone named `front_yard_street` covering just the street. + - Under **Objects**, add `car` + + + ```yaml cameras: @@ -88,6 +177,9 @@ cameras: - car ``` + + + Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street. ### Zone Loitering @@ -103,6 +195,17 @@ When using loitering zones, a review item will behave in the following way: ::: + + + +1. Navigate to and select the desired camera. +2. Edit or create the zone (e.g., `sidewalk`). + - Set **Loitering time** to the desired number of seconds (e.g., `4`) + - Under **Objects**, add the relevant object types (e.g., `person`) + + + + ```yaml cameras: name_of_your_camera: @@ -114,9 +217,22 @@ cameras: - person ``` + + + ### Zone Inertia -Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames. This value can be configured: +Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames. + + + + +1. Navigate to and select the desired camera. +2. Edit or create the zone (e.g., `front_yard`). + - Set **Inertia** to the desired number of consecutive frames (e.g., `3`) + + + ```yaml cameras: @@ -129,8 +245,21 @@ cameras: - person ``` + + + There may also be cases where you expect an object to quickly enter and exit a zone, like when a car is pulling into the driveway, and you may want to have the object be considered present in the zone immediately: + + + +1. Navigate to and select the desired camera. +2. Edit or create the zone (e.g., `driveway_entrance`). + - Set **Inertia** to `1` + + + + ```yaml cameras: name_of_your_camera: @@ -142,6 +271,9 @@ cameras: - car ``` + + + ### Speed Estimation Frigate can be configured to estimate the speed of objects moving through a zone. This works by combining data from Frigate's object tracker and "real world" distance measurements of the edges of the zone. The recommended use case for this feature is to track the speed of vehicles on a road as they move through the zone. @@ -152,7 +284,19 @@ Your zone must be defined with exactly 4 points and should be aligned to the gro Speed estimation requires a minimum number of frames for your object to be tracked before a valid estimate can be calculated, so create your zone away from places where objects enter and exit for the best results. The object's bounding box must be stable and remain a constant size as it enters and exits the zone. _Your zone should not take up the full frame, and the zone does **not** need to be the same size or larger than the objects passing through it._ An object's speed is tracked while it passes through the zone and then saved to Frigate's database. -Accurate real-world distance measurements are required to estimate speeds. These distances can be specified in your zone config through the `distances` field. +Accurate real-world distance measurements are required to estimate speeds. These distances can be specified through the `distances` field. Each number represents the real-world distance between consecutive points in the `coordinates` list. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI. + + + + +1. Navigate to and select the desired camera. +2. Create or edit a zone with exactly 4 points aligned to the ground plane. +3. In the zone editor, enter the real-world **Distances** between each pair of consecutive points. + - For example, if the distance between the first and second points is 10 meters, between the second and third is 12 meters, etc. +4. Distances are measured in meters (metric) or feet (imperial), depending on the **Unit system** setting. + + + ```yaml cameras: @@ -163,16 +307,34 @@ cameras: distances: 10,12,11,13.5 # in meters or feet ``` -Each number in the `distance` field represents the real-world distance between the points in the `coordinates` list. So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI. +So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. + + + The `distance` values are measured in meters (metric) or feet (imperial), depending on how `unit_system` is configured in your `ui` config: + + + +Navigate to . + +| Field | Description | +| --------------- | -------------------------------------------------------------------- | +| **Unit system** | Set to `metric` (kilometers per hour) or `imperial` (miles per hour) | + + + + ```yaml ui: # can be "metric" or "imperial", default is metric unit_system: metric ``` + + + The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents). These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph). For miles per hour, set `unit_system` to `imperial`. For kilometers per hour, set `unit_system` to `metric`. @@ -191,6 +353,17 @@ These speed values are output as a number in miles per hour (mph) or kilometers Zones can be configured with a minimum speed requirement, meaning an object must be moving at or above this speed to be considered inside the zone. Zone `distances` must be defined as described above. + + + +1. Navigate to and select the desired camera. +2. Edit or create the zone with distances configured. + - Set **Speed threshold** to the desired minimum speed (e.g., `20`) + - The unit is kph or mph, depending on the **Unit system** setting + + + + ```yaml cameras: name_of_your_camera: @@ -202,3 +375,6 @@ cameras: # highlight-next-line speed_threshold: 20 # unit is in kph or mph, depending on how unit_system is set (see above) ``` + + + diff --git a/docs/docs/frigate/camera_setup.md b/docs/docs/frigate/camera_setup.md index 64c650c13..4cb56dc50 100644 --- a/docs/docs/frigate/camera_setup.md +++ b/docs/docs/frigate/camera_setup.md @@ -34,7 +34,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings: - Encode Mode: H.264 - Resolution: 2688\*1520 - Frame Rate(FPS): 15 -- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](/configuration/live#camera_settings_recommendations) for more info) +- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](/configuration/live#camera-settings-recommendations) for more info) **Sub Stream (Detection)** diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 86afbfa53..afbd95aaf 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -95,7 +95,7 @@ Frigate supports multiple different detectors that work on different types of ha **Rockchip** - [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs to provide efficient object detection. - - [Supports limited model architectures](../../configuration/object_detectors#choosing-a-model) + - [Supports limited model architectures](../../configuration/object_detectors#rockchip-supported-models) - Runs best with tiny or small size models - Runs efficiently on low power hardware @@ -263,7 +263,7 @@ Inference speeds may vary depending on the host platform. The above data was mea ### Nvidia Jetson -Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). +Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index a115ecf97..2f2e55fa0 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -271,7 +271,7 @@ If you are using `docker run`, add this option to your command `--device /dev/ha #### Configuration -Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup. +Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8) to complete the setup. ### MemryX MX3 diff --git a/docs/docs/frigate/updating.md b/docs/docs/frigate/updating.md index 841a3e2d5..a4dfb7f0a 100644 --- a/docs/docs/frigate/updating.md +++ b/docs/docs/frigate/updating.md @@ -5,7 +5,7 @@ title: Updating # Updating Frigate -The current stable version of Frigate is **0.17.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.17.0). +The current stable version of Frigate is **0.18.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.18.0). Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant App, etc.). Below are instructions for the most common setups. @@ -31,21 +31,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps: 2. **Update and Pull the Latest Image**: - If using Docker Compose: - - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.4`). For example: + - Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.18.0` instead of `0.17.1`). For example: ```yaml services: frigate: - image: ghcr.io/blakeblackshear/frigate:0.17.0 + image: ghcr.io/blakeblackshear/frigate:0.18.0 ``` - Then pull the image: ```bash - docker pull ghcr.io/blakeblackshear/frigate:0.17.0 + docker pull ghcr.io/blakeblackshear/frigate:0.18.0 ``` - **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling. - If using `docker run`: - - Pull the image with the appropriate tag (e.g., `0.17.0`, `0.17.0-tensorrt`, or `stable`): + - Pull the image with the appropriate tag (e.g., `0.18.0`, `0.18.0-tensorrt`, or `stable`): ```bash - docker pull ghcr.io/blakeblackshear/frigate:0.17.0 + docker pull ghcr.io/blakeblackshear/frigate:0.18.0 ``` 3. **Start the Container**: @@ -77,6 +77,7 @@ For users running Frigate as a Home Assistant App: - If an update is available, you’ll see an "Update" button. 2. **Update the App**: + - Make a backup of the current version of the app. - Click the "Update" button next to the Frigate app. - Wait for the process to complete. Home Assistant will handle downloading and installing the new version. @@ -99,7 +100,7 @@ If an update causes issues: 1. Stop Frigate. 2. Restore your backed-up config file and database. 3. Revert to the previous image version: - - For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`) in your `docker run` command. + - For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.17.1`) in your `docker run` command. - For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`), and re-run `docker compose up -d`. - For Home Assistant: Restore from the app/addon backup you took before you updated. 4. Verify the old version is running again. diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 4d632fdd6..26fb26644 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -17,7 +17,7 @@ First, you will want to configure go2rtc to connect to your camera stream by add For the best experience, you should set the stream name under `go2rtc` to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. -See [the live view docs](../configuration/live.md#setting-stream-for-live-ui) for more information. +See [the live view docs](../configuration/live.md#setting-streams-for-live-ui) for more information. ::: diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index 30f4ce016..cd456f201 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -3,6 +3,10 @@ id: getting_started title: Getting started --- +import ConfigTabs from "@site/src/components/ConfigTabs"; +import TabItem from "@theme/TabItem"; +import NavPath from "@site/src/components/NavPath"; + # Getting Started :::tip @@ -85,7 +89,7 @@ This section shows how to create a minimal directory structure for a Docker inst ### Setup directories -Frigate will create a config file if one does not exist on the initial startup. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation. +Frigate will create a config file if one does not exist on the initial startup. The following directory structure is the bare minimum to get started. ``` . @@ -128,7 +132,7 @@ services: - "8554:8554" # RTSP feeds ``` -Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. On startup, an admin user and password will be created and outputted in the logs. You can see this by running `docker logs frigate`. Frigate should now be accessible at `https://server_ip:8971` where you can login with the `admin` user and finish the configuration using the built-in configuration editor. +Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. On startup, an admin user and password will be created and outputted in the logs. You can see this by running `docker logs frigate`. Frigate should now be accessible at `https://server_ip:8971` where you can login with the `admin` user and finish configuration using the Settings UI. ## Configuring Frigate @@ -140,15 +144,15 @@ At this point you should be able to start Frigate and a basic config will be cre ### Step 2: Add a camera -You can click the `Add Camera` button to use the camera setup wizard to get your first camera added into Frigate. +Click the **Add Camera** button in to use the camera setup wizard to get your first camera added into Frigate. ### Step 3: Configure hardware acceleration (recommended) -Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) config reference for examples applicable to your hardware. +Now that you have a working camera configuration, set up hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) docs for examples applicable to your hardware. -Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): +:::note -`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) +Hardware acceleration requires passing the appropriate device to the Docker container. For Intel and AMD GPUs, add the device to your `docker-compose.yml`: ```yaml {4,5} services: @@ -159,7 +163,17 @@ services: ... ``` -`config.yml` +After modifying, run `docker compose up -d` to apply changes. + +::: + + + + +Navigate to and set **Hardware acceleration arguments** to the appropriate preset for your hardware (e.g., `VAAPI (Intel/AMD GPU)` for most Intel processors). + + + ```yaml mqtt: ... @@ -173,6 +187,9 @@ cameras: detect: ... ``` + + + ### Step 4: Configure detectors By default, Frigate will use a single CPU detector. @@ -184,6 +201,24 @@ In many cases, the integrated graphics on Intel CPUs provides sufficient perform You need to refer to **Configure hardware acceleration** above to enable the container to use the GPU. + + + +1. Navigate to and add a detector with **Type** `OpenVINO` and **Device** `GPU` +2. Navigate to and configure the model settings for OpenVINO: + +| Field | Value | +| ---------------------------------------- | ------------------------------------------ | +| **Object detection model input width** | `300` | +| **Object detection model input height** | `300` | +| **Model Input Tensor Shape** | `nhwc` | +| **Model Input Pixel Color Format** | `bgr` | +| **Custom object detector model path** | `/openvino-model/ssdlite_mobilenet_v2.xml` | +| **Label map for custom object detector** | `/openvino-model/coco_91cl_bkgr.txt` | + + + + ```yaml {3-6,9-15,20-21} mqtt: ... @@ -209,6 +244,9 @@ cameras: ... ``` + + + If you have a USB Coral, you will need to add a detectors section to your config. @@ -216,7 +254,9 @@ If you have a USB Coral, you will need to add a detectors section to your config
Use USB Coral detector -`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) +:::note + +You need to pass the USB Coral device to the Docker container. Add the following to your `docker-compose.yml` and run `docker compose up -d`: ```yaml {4-6} services: @@ -228,6 +268,16 @@ services: ... ``` +::: + + + + +Navigate to and add a detector with **Type** `EdgeTPU` and **Device** `usb`. + + + + ```yaml {3-6,11-12} mqtt: ... @@ -244,17 +294,20 @@ cameras: ... ``` + + +
More details on available detectors can be found [here](../configuration/object_detectors.md). -Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/reference.md). +Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they can be configured in or via the [configuration file reference](../configuration/reference.md). ### Step 5: Setup motion masks -Now that you have optimized your configuration for decoding the video stream, you will want to check to see where to implement motion masks. To do this, navigate to the camera in the UI, select "Debug" at the top, and enable "Motion boxes" in the options below the video feed. Watch for areas that continuously trigger unwanted motion to be detected. Common areas to mask include camera timestamps and trees that frequently blow in the wind. The goal is to avoid wasting object detection cycles looking at these areas. +Now that you have optimized your configuration for decoding the video stream, you will want to check to see where to implement motion masks. Click on the camera from the main dashboard, then select the gear icon in the top right, enable Debug View, and finally enable the switch for Motion Boxes. Watch for areas that continuously trigger unwanted motion to be detected. Common areas to mask include camera timestamps and trees that frequently blow in the wind. The goal is to avoid wasting object detection cycles looking at these areas. -Now that you know where you need to mask, use the "Mask & Zone creator" in the options pane to generate the coordinates needed for your config file. More information about masks can be found [here](../configuration/masks.md). +Use the mask editor to draw polygon masks directly on the camera feed. Navigate to and set up a motion mask over the area. More information about masks can be found [here](../configuration/masks.md). :::warning @@ -262,7 +315,7 @@ Note that motion masks should not be used to mark out areas where you do not wan ::: -Your configuration should look similar to this now. +If you are using YAML to configure Frigate instead of the UI, your configuration should look similar to this now: ```yaml {16-18} mqtt: @@ -292,7 +345,14 @@ cameras: In order to review activity in the Frigate UI, recordings need to be enabled. -To enable recording video, add the `record` role to a stream and enable it in the config. If record is disabled in the config, it won't be possible to enable it in the UI. + + + +1. If you have separate streams for detect and record, navigate to , select your camera, and add a second input with the `record` role pointing to your high-resolution stream +2. Navigate to (or for a specific camera) and set **Enable recording** to on + + + ```yaml {16-17} mqtt: ... @@ -315,6 +375,9 @@ cameras: motion: ... ``` + + + If you don't have separate streams for detect and record, you would just add the record role to the list on the first input. :::note diff --git a/docs/scripts/README.md b/docs/scripts/README.md new file mode 100644 index 000000000..347536a07 --- /dev/null +++ b/docs/scripts/README.md @@ -0,0 +1,184 @@ +# Documentation Scripts + +## generate_ui_tabs.py + +Automatically generates "Frigate UI" tab content for documentation files based on the YAML config examples already in the docs. + +Instead of manually writing UI instructions for every YAML block, this script reads three data sources from the codebase and generates the UI tabs: + +1. **JSON Schema** (from Pydantic config models) -- field names, types, defaults +2. **i18n translation files** -- the exact labels shown in the Settings UI +3. **Section mappings** (from Settings.tsx) -- config key to UI navigation path + +### Prerequisites + +Run from the repository root. The script imports Frigate's Python config models directly, so the `frigate` package must be importable: + +```bash +# From repo root -- no extra install needed if your environment can import frigate +python3 docs/scripts/generate_ui_tabs.py --help +``` + +### Usage + +#### Preview (default) + +Shows what would be generated for each bare YAML block, without modifying any files: + +```bash +# Single file +python3 docs/scripts/generate_ui_tabs.py docs/docs/configuration/record.md + +# All config docs +python3 docs/scripts/generate_ui_tabs.py docs/docs/configuration/ +``` + +#### Inject + +Wraps bare YAML blocks with `` and inserts the generated UI tab. Also adds the required imports (`ConfigTabs`, `TabItem`, `NavPath`) after the frontmatter if missing. + +Already-wrapped blocks are skipped (idempotent). + +```bash +python3 docs/scripts/generate_ui_tabs.py --inject docs/docs/configuration/record.md +``` + +#### Check + +Compares existing UI tabs against what the script would generate from the current schema and i18n files. Prints a unified diff for each drifted block and exits with code 1 if any drift is found. + +Use this in CI to catch stale docs after schema or i18n changes. + +```bash +python3 docs/scripts/generate_ui_tabs.py --check docs/docs/configuration/ +``` + +#### Regenerate + +Replaces the UI tab content in existing `` blocks with freshly generated content. The YAML tab is preserved exactly as-is. Only blocks that have actually changed are rewritten. + +```bash +# Preview changes without writing +python3 docs/scripts/generate_ui_tabs.py --regenerate --dry-run docs/docs/configuration/ + +# Apply changes +python3 docs/scripts/generate_ui_tabs.py --regenerate docs/docs/configuration/ +``` + +#### Output to directory (`--outdir`) + +Write generated files to a separate directory instead of modifying the originals. The source directory structure is mirrored. Files without changes are copied as-is so the output is a complete snapshot suitable for diffing. + +Works with `--inject` and `--regenerate`. + +```bash +# Generate into a named directory +python3 docs/scripts/generate_ui_tabs.py --inject --outdir /tmp/generated docs/docs/configuration/ + +# Then diff original vs generated +diff -rq docs/docs/configuration/ /tmp/generated/ + +# Or let an AI agent compare them +diff -ru docs/docs/configuration/record.md /tmp/generated/record.md +``` + +This is useful for AI agents that need to review the generated output before applying it, or for previewing what `--inject` or `--regenerate` would do across an entire directory. + +#### Verbose mode + +Add `-v` to any mode for detailed diagnostics (skipped blocks, reasons, unchanged blocks): + +```bash +python3 docs/scripts/generate_ui_tabs.py -v docs/docs/configuration/ +``` + +### Typical workflow + +```bash +# 1. Preview what would be generated (output to temp dir, originals untouched) +python3 docs/scripts/generate_ui_tabs.py --inject --outdir /tmp/ui-preview docs/docs/configuration/ +# Compare: diff -ru docs/docs/configuration/ /tmp/ui-preview/ + +# 2. Apply: inject UI tabs into the actual docs +python3 docs/scripts/generate_ui_tabs.py --inject docs/docs/configuration/ + +# 3. Review and hand-edit where needed (the script gets you 90% there) + +# 4. Later, after schema or i18n changes, check for drift +python3 docs/scripts/generate_ui_tabs.py --check docs/docs/configuration/ + +# 5. If drifted, preview then regenerate +python3 docs/scripts/generate_ui_tabs.py --regenerate --outdir /tmp/ui-regen docs/docs/configuration/ +# Compare: diff -ru docs/docs/configuration/ /tmp/ui-regen/ + +# 6. Apply regeneration +python3 docs/scripts/generate_ui_tabs.py --regenerate docs/docs/configuration/ +``` + +### How it decides what to generate + +The script detects two patterns from the YAML block content: + +**Pattern A -- Field table.** When the YAML has inline comments (e.g., `# <- description`), the script generates a markdown table with field names and descriptions: + +```markdown +Navigate to . + +| Field | Description | +|-------|-------------| +| **Continuous retention > Retention days** | Days to retain recordings. | +| **Motion retention > Retention days** | Days to retain recordings. | +``` + +**Pattern B -- Set instructions.** When the YAML has concrete values without comments, the script generates step-by-step instructions: + +```markdown +Navigate to . + +- Set **Enable recording** to on +- Set **Continuous retention > Retention days** to `3` +- Set **Alert retention > Event retention > Retention days** to `30` +- Set **Alert retention > Event retention > Retention mode** to `all` +``` + +**Camera-level config** is auto-detected when the YAML is nested under `cameras:`. The output uses a generic camera reference rather than the example camera name from the YAML: + +```markdown +1. Navigate to and select your camera. + - Set **Enable recording** to on + - Set **Continuous retention > Retention days** to `5` +``` + +### What gets skipped + +- YAML blocks already inside `` (for `--inject`) +- YAML blocks whose top-level key is not a known config section (e.g., `go2rtc`, `docker-compose`, `scrape_configs`) +- Fields listed in `hiddenFields` in the section configs (e.g., `enabled_in_config`) + +### File structure + +``` +docs/scripts/ +├── generate_ui_tabs.py # CLI entry point +├── README.md # This file +└── lib/ + ├── __init__.py + ├── schema_loader.py # Loads JSON schema from Pydantic models + ├── i18n_loader.py # Loads i18n translation JSON files + ├── section_config_parser.py # Parses TS section configs (hiddenFields, etc.) + ├── yaml_extractor.py # Extracts YAML blocks and ConfigTabs from markdown + ├── ui_generator.py # Generates UI tab markdown content + └── nav_map.py # Maps config sections to Settings UI nav paths +``` + +### Data sources + +| Source | Path | What it provides | +|--------|------|------------------| +| Pydantic models | `frigate/config/` | Field names, types, defaults, nesting | +| JSON schema | Generated from Pydantic at runtime | Full schema with `$defs` and `$ref` | +| i18n (global) | `web/public/locales/en/config/global.json` | Field labels for global settings | +| i18n (cameras) | `web/public/locales/en/config/cameras.json` | Field labels for camera settings | +| i18n (menu) | `web/public/locales/en/views/settings.json` | Sidebar menu labels | +| Section configs | `web/src/components/config-form/section-configs/*.ts` | Hidden fields, advanced fields, field order | +| Navigation map | Hardcoded from `web/src/pages/Settings.tsx` | Config section to UI path mapping | diff --git a/docs/scripts/generate_ui_tabs.py b/docs/scripts/generate_ui_tabs.py new file mode 100644 index 000000000..fa468922c --- /dev/null +++ b/docs/scripts/generate_ui_tabs.py @@ -0,0 +1,660 @@ +#!/usr/bin/env python3 +"""Generate Frigate UI tab content for documentation files. + +This script reads YAML code blocks from documentation markdown files and +generates corresponding "Frigate UI" tab instructions based on: +- JSON Schema (from Pydantic config models) +- i18n translation files (for UI field labels) +- Section configs (for hidden/advanced field info) +- Navigation mappings (for Settings UI paths) + +Usage: + # Preview generated UI tabs for a single file + python docs/scripts/generate_ui_tabs.py docs/docs/configuration/record.md + + # Preview all config docs + python docs/scripts/generate_ui_tabs.py docs/docs/configuration/ + + # Inject UI tabs into files (wraps bare YAML blocks with ConfigTabs) + python docs/scripts/generate_ui_tabs.py --inject docs/docs/configuration/record.md + + # Regenerate existing UI tabs from current schema/i18n + python docs/scripts/generate_ui_tabs.py --regenerate docs/docs/configuration/ + + # Check for drift between existing UI tabs and what would be generated + python docs/scripts/generate_ui_tabs.py --check docs/docs/configuration/ + + # Write generated files to a temp directory for comparison (originals unchanged) + python docs/scripts/generate_ui_tabs.py --inject --outdir /tmp/generated docs/docs/configuration/ + + # Show detailed warnings and diagnostics + python docs/scripts/generate_ui_tabs.py --verbose docs/docs/configuration/ +""" + +import argparse +import difflib +import shutil +import sys +import tempfile +from pathlib import Path + +# Ensure frigate package is importable +sys.path.insert(0, str(Path(__file__).resolve().parents[1].parent)) + +from lib.i18n_loader import load_i18n +from lib.nav_map import ALL_CONFIG_SECTIONS +from lib.schema_loader import load_schema +from lib.section_config_parser import load_section_configs +from lib.ui_generator import generate_ui_content, wrap_with_config_tabs +from lib.yaml_extractor import ( + extract_config_tabs_blocks, + extract_yaml_blocks, +) + + +def process_file( + filepath: Path, + schema: dict, + i18n: dict, + section_configs: dict, + inject: bool = False, + verbose: bool = False, + outpath: Path | None = None, +) -> dict: + """Process a single markdown file for initial injection of bare YAML blocks. + + Args: + outpath: If set, write the result here instead of modifying filepath. + + Returns: + Stats dict with counts of blocks found, generated, skipped, etc. + """ + content = filepath.read_text() + blocks = extract_yaml_blocks(content) + + stats = { + "file": str(filepath), + "total_blocks": len(blocks), + "config_blocks": 0, + "already_wrapped": 0, + "generated": 0, + "skipped": 0, + "warnings": [], + } + + if not blocks: + return stats + + # For injection, we need to track replacements + replacements: list[tuple[int, int, str]] = [] + + for block in blocks: + # Skip non-config YAML blocks + if block.section_key is None or ( + block.section_key not in ALL_CONFIG_SECTIONS + and not block.is_camera_level + ): + stats["skipped"] += 1 + if verbose and block.config_keys: + stats["warnings"].append( + f" Line {block.line_start}: Skipped block with keys " + f"{block.config_keys} (not a known config section)" + ) + continue + + stats["config_blocks"] += 1 + + # Skip already-wrapped blocks + if block.inside_config_tabs: + stats["already_wrapped"] += 1 + if verbose: + stats["warnings"].append( + f" Line {block.line_start}: Already inside ConfigTabs, skipping" + ) + continue + + # Generate UI content + ui_content = generate_ui_content( + block, schema, i18n, section_configs + ) + + if ui_content is None: + stats["skipped"] += 1 + if verbose: + stats["warnings"].append( + f" Line {block.line_start}: Could not generate UI content " + f"for section '{block.section_key}'" + ) + continue + + stats["generated"] += 1 + + if inject: + full_block = wrap_with_config_tabs( + ui_content, block.raw, block.highlight + ) + replacements.append((block.line_start, block.line_end, full_block)) + else: + # Preview mode: print to stdout + print(f"\n{'='*60}") + print(f"File: {filepath}") + print(f"Line {block.line_start}: section={block.section_key}, " + f"camera={block.is_camera_level}") + print(f"{'='*60}") + print() + print("--- Generated UI tab ---") + print(ui_content) + print() + print("--- Would produce ---") + print(wrap_with_config_tabs(ui_content, block.raw, block.highlight)) + print() + + # Apply injections in reverse order (to preserve line numbers) + if inject and replacements: + lines = content.split("\n") + for start, end, replacement in reversed(replacements): + # start/end are 1-based line numbers + # The YAML block spans from the ``` line before start to the ``` line at end + # We need to replace from the opening ``` to the closing ``` + block_start = start - 2 # 0-based index of ```yaml line + block_end = end - 1 # 0-based index of closing ``` line + + replacement_lines = replacement.split("\n") + lines[block_start : block_end + 1] = replacement_lines + + new_content = "\n".join(lines) + + # Ensure imports are present + new_content = _ensure_imports(new_content) + + target = outpath or filepath + target.parent.mkdir(parents=True, exist_ok=True) + target.write_text(new_content) + print(f" Injected {len(replacements)} ConfigTabs block(s) into {target}") + elif outpath is not None: + # No changes but outdir requested -- copy original so the output + # directory contains a complete set of files for diffing. + outpath.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(filepath, outpath) + + return stats + + +def regenerate_file( + filepath: Path, + schema: dict, + i18n: dict, + section_configs: dict, + dry_run: bool = False, + verbose: bool = False, + outpath: Path | None = None, +) -> dict: + """Regenerate UI tabs in existing ConfigTabs blocks. + + Strips the current UI tab content and regenerates it from the YAML tab + using the current schema and i18n data. + + Args: + outpath: If set, write the result here instead of modifying filepath. + + Returns: + Stats dict + """ + content = filepath.read_text() + tab_blocks = extract_config_tabs_blocks(content) + + stats = { + "file": str(filepath), + "total_blocks": len(tab_blocks), + "regenerated": 0, + "unchanged": 0, + "skipped": 0, + "warnings": [], + } + + if not tab_blocks: + return stats + + replacements: list[tuple[int, int, str]] = [] + + for tab_block in tab_blocks: + yaml_block = tab_block.yaml_block + + # Skip non-config blocks + if yaml_block.section_key is None or ( + yaml_block.section_key not in ALL_CONFIG_SECTIONS + and not yaml_block.is_camera_level + ): + stats["skipped"] += 1 + if verbose: + stats["warnings"].append( + f" Line {tab_block.line_start}: Skipped (not a config section)" + ) + continue + + # Generate fresh UI content + new_ui = generate_ui_content( + yaml_block, schema, i18n, section_configs + ) + + if new_ui is None: + stats["skipped"] += 1 + if verbose: + stats["warnings"].append( + f" Line {tab_block.line_start}: Could not regenerate " + f"for section '{yaml_block.section_key}'" + ) + continue + + # Compare with existing + existing_ui = tab_block.ui_content + if _normalize_whitespace(new_ui) == _normalize_whitespace(existing_ui): + stats["unchanged"] += 1 + if verbose: + stats["warnings"].append( + f" Line {tab_block.line_start}: Unchanged" + ) + continue + + stats["regenerated"] += 1 + + new_full = wrap_with_config_tabs( + new_ui, yaml_block.raw, yaml_block.highlight + ) + replacements.append( + (tab_block.line_start, tab_block.line_end, new_full) + ) + + if dry_run or verbose: + print(f"\n{'='*60}") + print(f"File: {filepath}, line {tab_block.line_start}") + print(f"Section: {yaml_block.section_key}") + print(f"{'='*60}") + _print_diff(existing_ui, new_ui, filepath, tab_block.line_start) + + # Apply replacements + if not dry_run and replacements: + lines = content.split("\n") + for start, end, replacement in reversed(replacements): + block_start = start - 1 # 0-based index of line + block_end = end - 1 # 0-based index of line + replacement_lines = replacement.split("\n") + lines[block_start : block_end + 1] = replacement_lines + + new_content = "\n".join(lines) + target = outpath or filepath + target.parent.mkdir(parents=True, exist_ok=True) + target.write_text(new_content) + print( + f" Regenerated {len(replacements)} ConfigTabs block(s) in {target}", + file=sys.stderr, + ) + elif outpath is not None: + outpath.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(filepath, outpath) + + return stats + + +def check_file( + filepath: Path, + schema: dict, + i18n: dict, + section_configs: dict, + verbose: bool = False, +) -> dict: + """Check for drift between existing UI tabs and what would be generated. + + Returns: + Stats dict with drift info. Non-zero "drifted" means the file is stale. + """ + content = filepath.read_text() + tab_blocks = extract_config_tabs_blocks(content) + + stats = { + "file": str(filepath), + "total_blocks": len(tab_blocks), + "up_to_date": 0, + "drifted": 0, + "skipped": 0, + "warnings": [], + } + + if not tab_blocks: + return stats + + for tab_block in tab_blocks: + yaml_block = tab_block.yaml_block + + if yaml_block.section_key is None or ( + yaml_block.section_key not in ALL_CONFIG_SECTIONS + and not yaml_block.is_camera_level + ): + stats["skipped"] += 1 + continue + + new_ui = generate_ui_content( + yaml_block, schema, i18n, section_configs + ) + + if new_ui is None: + stats["skipped"] += 1 + continue + + existing_ui = tab_block.ui_content + if _normalize_whitespace(new_ui) == _normalize_whitespace(existing_ui): + stats["up_to_date"] += 1 + else: + stats["drifted"] += 1 + print(f"\n{'='*60}") + print(f"DRIFT: {filepath}, line {tab_block.line_start}") + print(f"Section: {yaml_block.section_key}") + print(f"{'='*60}") + _print_diff(existing_ui, new_ui, filepath, tab_block.line_start) + + return stats + + +def _normalize_whitespace(text: str) -> str: + """Normalize whitespace for comparison (strip lines, collapse blanks).""" + lines = [line.rstrip() for line in text.strip().splitlines()] + # Collapse multiple blank lines into one + result: list[str] = [] + prev_blank = False + for line in lines: + if line == "": + if not prev_blank: + result.append(line) + prev_blank = True + else: + result.append(line) + prev_blank = False + return "\n".join(result) + + +def _print_diff(existing: str, generated: str, filepath: Path, line: int): + """Print a unified diff between existing and generated UI content.""" + existing_lines = existing.strip().splitlines(keepends=True) + generated_lines = generated.strip().splitlines(keepends=True) + + diff = difflib.unified_diff( + existing_lines, + generated_lines, + fromfile=f"{filepath}:{line} (existing)", + tofile=f"{filepath}:{line} (generated)", + lineterm="", + ) + diff_text = "\n".join(diff) + if diff_text: + print(diff_text) + else: + print(" (whitespace-only difference)") + + +def _ensure_imports(content: str) -> str: + """Ensure ConfigTabs/TabItem/NavPath imports are present in the file.""" + lines = content.split("\n") + + needed_imports = [] + if "" in content and 'import ConfigTabs' not in content: + needed_imports.append( + 'import ConfigTabs from "@site/src/components/ConfigTabs";' + ) + if "outpath mapping + file_outpaths: dict[Path, Path | None] = {} + for f in files: + if outdir is not None: + try: + rel = f.resolve().relative_to(base_dir) + except ValueError: + rel = Path(f.name) + file_outpaths[f] = outdir / rel + else: + file_outpaths[f] = None + + # Load data sources + print("Loading schema from Pydantic models...", file=sys.stderr) + schema = load_schema() + print("Loading i18n translations...", file=sys.stderr) + i18n = load_i18n() + print("Loading section configs...", file=sys.stderr) + section_configs = load_section_configs() + print(f"Processing {len(files)} file(s)...\n", file=sys.stderr) + + if args.check: + _run_check(files, schema, i18n, section_configs, args.verbose) + elif args.regenerate: + _run_regenerate( + files, schema, i18n, section_configs, + args.dry_run, args.verbose, file_outpaths, + ) + else: + _run_inject( + files, schema, i18n, section_configs, + args.inject, args.verbose, file_outpaths, + ) + + if outdir is not None: + print(f"\nOutput written to: {outdir}", file=sys.stderr) + + +def _run_inject(files, schema, i18n, section_configs, inject, verbose, file_outpaths): + """Run default mode: preview or inject bare YAML blocks.""" + total_stats = { + "files": 0, + "total_blocks": 0, + "config_blocks": 0, + "already_wrapped": 0, + "generated": 0, + "skipped": 0, + } + + for filepath in files: + stats = process_file( + filepath, schema, i18n, section_configs, + inject=inject, verbose=verbose, + outpath=file_outpaths.get(filepath), + ) + + total_stats["files"] += 1 + for key in ["total_blocks", "config_blocks", "already_wrapped", + "generated", "skipped"]: + total_stats[key] += stats[key] + + if verbose and stats["warnings"]: + print(f"\n{filepath}:", file=sys.stderr) + for w in stats["warnings"]: + print(w, file=sys.stderr) + + print("\n" + "=" * 60, file=sys.stderr) + print("Summary:", file=sys.stderr) + print(f" Files processed: {total_stats['files']}", file=sys.stderr) + print(f" Total YAML blocks: {total_stats['total_blocks']}", file=sys.stderr) + print(f" Config blocks: {total_stats['config_blocks']}", file=sys.stderr) + print(f" Already wrapped: {total_stats['already_wrapped']}", file=sys.stderr) + print(f" Generated: {total_stats['generated']}", file=sys.stderr) + print(f" Skipped: {total_stats['skipped']}", file=sys.stderr) + print("=" * 60, file=sys.stderr) + + +def _run_regenerate(files, schema, i18n, section_configs, dry_run, verbose, file_outpaths): + """Run regenerate mode: update existing ConfigTabs blocks.""" + total_stats = { + "files": 0, + "total_blocks": 0, + "regenerated": 0, + "unchanged": 0, + "skipped": 0, + } + + for filepath in files: + stats = regenerate_file( + filepath, schema, i18n, section_configs, + dry_run=dry_run, verbose=verbose, + outpath=file_outpaths.get(filepath), + ) + + total_stats["files"] += 1 + for key in ["total_blocks", "regenerated", "unchanged", "skipped"]: + total_stats[key] += stats[key] + + if verbose and stats["warnings"]: + print(f"\n{filepath}:", file=sys.stderr) + for w in stats["warnings"]: + print(w, file=sys.stderr) + + action = "Would regenerate" if dry_run else "Regenerated" + print("\n" + "=" * 60, file=sys.stderr) + print("Summary:", file=sys.stderr) + print(f" Files processed: {total_stats['files']}", file=sys.stderr) + print(f" ConfigTabs blocks: {total_stats['total_blocks']}", file=sys.stderr) + print(f" {action}: {total_stats['regenerated']}", file=sys.stderr) + print(f" Unchanged: {total_stats['unchanged']}", file=sys.stderr) + print(f" Skipped: {total_stats['skipped']}", file=sys.stderr) + print("=" * 60, file=sys.stderr) + + +def _run_check(files, schema, i18n, section_configs, verbose): + """Run check mode: detect drift without modifying files.""" + total_stats = { + "files": 0, + "total_blocks": 0, + "up_to_date": 0, + "drifted": 0, + "skipped": 0, + } + + for filepath in files: + stats = check_file( + filepath, schema, i18n, section_configs, verbose=verbose, + ) + + total_stats["files"] += 1 + for key in ["total_blocks", "up_to_date", "drifted", "skipped"]: + total_stats[key] += stats[key] + + print("\n" + "=" * 60, file=sys.stderr) + print("Summary:", file=sys.stderr) + print(f" Files processed: {total_stats['files']}", file=sys.stderr) + print(f" ConfigTabs blocks: {total_stats['total_blocks']}", file=sys.stderr) + print(f" Up to date: {total_stats['up_to_date']}", file=sys.stderr) + print(f" Drifted: {total_stats['drifted']}", file=sys.stderr) + print(f" Skipped: {total_stats['skipped']}", file=sys.stderr) + print("=" * 60, file=sys.stderr) + + if total_stats["drifted"] > 0: + print( + f"\n{total_stats['drifted']} block(s) have drifted from schema/i18n. " + "Run with --regenerate to update.", + file=sys.stderr, + ) + sys.exit(1) + else: + print("\nAll UI tabs are up to date.", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/docs/scripts/lib/__init__.py b/docs/scripts/lib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docs/scripts/lib/i18n_loader.py b/docs/scripts/lib/i18n_loader.py new file mode 100644 index 000000000..7416e86c7 --- /dev/null +++ b/docs/scripts/lib/i18n_loader.py @@ -0,0 +1,139 @@ +"""Load i18n translation files for Settings UI field labels.""" + +import json +from pathlib import Path +from typing import Any + +# Base path for locale files +WEB_LOCALES = Path(__file__).resolve().parents[3] / "web" / "public" / "locales" / "en" + + +def load_i18n() -> dict[str, Any]: + """Load and merge all relevant i18n files. + + Returns: + Dict with keys: "global", "cameras", "settings_menu" + """ + global_path = WEB_LOCALES / "config" / "global.json" + cameras_path = WEB_LOCALES / "config" / "cameras.json" + settings_path = WEB_LOCALES / "views" / "settings.json" + + result: dict[str, Any] = {} + + with open(global_path) as f: + result["global"] = json.load(f) + + with open(cameras_path) as f: + result["cameras"] = json.load(f) + + with open(settings_path) as f: + settings = json.load(f) + result["settings_menu"] = settings.get("menu", {}) + + # Build a unified enum value → label lookup from all known sources. + # Merges multiple maps so callers don't need to know which file + # a particular enum lives in. + value_labels: dict[str, str] = {} + + config_form = settings.get("configForm", {}) + + # FFmpeg preset labels (preset-vaapi → "VAAPI (Intel/AMD GPU)") + value_labels.update( + config_form.get("ffmpegArgs", {}).get("presetLabels", {}) + ) + + # Timestamp position (tl → "Top left") + value_labels.update(settings.get("timestampPosition", {})) + + # Input role options (detect → "Detect") + value_labels.update( + config_form.get("inputRoles", {}).get("options", {}) + ) + + # GenAI role options (vision → "Vision") + value_labels.update( + config_form.get("genaiRoles", {}).get("options", {}) + ) + + result["value_labels"] = value_labels + + return result + + +def get_field_label( + i18n: dict[str, Any], + section_key: str, + field_path: list[str], + level: str = "global", +) -> str | None: + """Look up the UI label for a field. + + Args: + i18n: Loaded i18n data from load_i18n() + section_key: Config section (e.g., "record") + field_path: Path within section (e.g., ["continuous", "days"]) + level: "global" or "cameras" + + Returns: + The label string, or None if not found. + """ + source = i18n.get(level, {}) + node = source.get(section_key, {}) + + for key in field_path: + if not isinstance(node, dict): + return None + node = node.get(key, {}) + + if isinstance(node, dict): + return node.get("label") + return None + + +def get_field_description( + i18n: dict[str, Any], + section_key: str, + field_path: list[str], + level: str = "global", +) -> str | None: + """Look up the UI description for a field.""" + source = i18n.get(level, {}) + node = source.get(section_key, {}) + + for key in field_path: + if not isinstance(node, dict): + return None + node = node.get(key, {}) + + if isinstance(node, dict): + return node.get("description") + return None + + +def get_value_label( + i18n: dict[str, Any], + value: str, +) -> str | None: + """Look up the display label for an enum/option value. + + Args: + i18n: Loaded i18n data from load_i18n() + value: The raw config value (e.g., "preset-vaapi", "tl") + + Returns: + The human-readable label (e.g., "VAAPI (Intel/AMD GPU)"), or None. + """ + return i18n.get("value_labels", {}).get(value) + + +def get_section_label( + i18n: dict[str, Any], + section_key: str, + level: str = "global", +) -> str | None: + """Get the top-level label for a config section.""" + source = i18n.get(level, {}) + section = source.get(section_key, {}) + if isinstance(section, dict): + return section.get("label") + return None diff --git a/docs/scripts/lib/nav_map.py b/docs/scripts/lib/nav_map.py new file mode 100644 index 000000000..80f13d65b --- /dev/null +++ b/docs/scripts/lib/nav_map.py @@ -0,0 +1,120 @@ +"""Map config section keys to Settings UI navigation paths.""" + +# Derived from web/src/pages/Settings.tsx section mappings +# and web/public/locales/en/views/settings.json menu labels. +# +# Format: section_key -> (group_label, page_label) +# Navigation path: "Settings > {group_label} > {page_label}" + +GLOBAL_NAV: dict[str, tuple[str, str]] = { + "detect": ("Global configuration", "Object detection"), + "ffmpeg": ("Global configuration", "FFmpeg"), + "record": ("Global configuration", "Recording"), + "snapshots": ("Global configuration", "Snapshots"), + "motion": ("Global configuration", "Motion detection"), + "objects": ("Global configuration", "Objects"), + "review": ("Global configuration", "Review"), + "audio": ("Global configuration", "Audio events"), + "live": ("Global configuration", "Live playback"), + "timestamp_style": ("Global configuration", "Timestamp style"), + "notifications": ("Notifications", "Notifications"), +} + +CAMERA_NAV: dict[str, tuple[str, str]] = { + "detect": ("Camera configuration", "Object detection"), + "ffmpeg": ("Camera configuration", "FFmpeg"), + "record": ("Camera configuration", "Recording"), + "snapshots": ("Camera configuration", "Snapshots"), + "motion": ("Camera configuration", "Motion detection"), + "objects": ("Camera configuration", "Objects"), + "review": ("Camera configuration", "Review"), + "audio": ("Camera configuration", "Audio events"), + "audio_transcription": ("Camera configuration", "Audio transcription"), + "notifications": ("Camera configuration", "Notifications"), + "live": ("Camera configuration", "Live playback"), + "birdseye": ("Camera configuration", "Birdseye"), + "face_recognition": ("Camera configuration", "Face recognition"), + "lpr": ("Camera configuration", "License plate recognition"), + "mqtt": ("Camera configuration", "MQTT"), + "onvif": ("Camera configuration", "ONVIF"), + "ui": ("Camera configuration", "Camera UI"), + "timestamp_style": ("Camera configuration", "Timestamp style"), +} + +ENRICHMENT_NAV: dict[str, tuple[str, str]] = { + "semantic_search": ("Enrichments", "Semantic search"), + "genai": ("Enrichments", "Generative AI"), + "face_recognition": ("Enrichments", "Face recognition"), + "lpr": ("Enrichments", "License plate recognition"), + "classification": ("Enrichments", "Object classification"), + "audio_transcription": ("Enrichments", "Audio transcription"), +} + +SYSTEM_NAV: dict[str, tuple[str, str]] = { + "go2rtc_streams": ("System", "go2rtc streams"), + "database": ("System", "Database"), + "mqtt": ("System", "MQTT"), + "tls": ("System", "TLS"), + "auth": ("System", "Authentication"), + "networking": ("System", "Networking"), + "proxy": ("System", "Proxy"), + "ui": ("System", "UI"), + "logger": ("System", "Logging"), + "environment_vars": ("System", "Environment variables"), + "telemetry": ("System", "Telemetry"), + "birdseye": ("System", "Birdseye"), + "detectors": ("System", "Detector hardware"), + "model": ("System", "Detection model"), +} + +# All known top-level config section keys +ALL_CONFIG_SECTIONS = ( + set(GLOBAL_NAV) + | set(CAMERA_NAV) + | set(ENRICHMENT_NAV) + | set(SYSTEM_NAV) + | {"cameras"} +) + + +def get_nav_path(section_key: str, level: str = "global") -> str | None: + """Get the full navigation path for a config section. + + Args: + section_key: Config section key (e.g., "record") + level: "global", "camera", "enrichment", or "system" + + Returns: + NavPath string like "Settings > Global configuration > Recording", + or None if not found. + """ + nav_tables = { + "global": GLOBAL_NAV, + "camera": CAMERA_NAV, + "enrichment": ENRICHMENT_NAV, + "system": SYSTEM_NAV, + } + + table = nav_tables.get(level) + if table is None: + return None + + entry = table.get(section_key) + if entry is None: + return None + + group, page = entry + return f"Settings > {group} > {page}" + + +def detect_level(section_key: str) -> str: + """Detect whether a config section is global, camera, enrichment, or system.""" + if section_key in SYSTEM_NAV: + return "system" + if section_key in ENRICHMENT_NAV: + return "enrichment" + if section_key in GLOBAL_NAV: + return "global" + if section_key in CAMERA_NAV: + return "camera" + return "global" diff --git a/docs/scripts/lib/schema_loader.py b/docs/scripts/lib/schema_loader.py new file mode 100644 index 000000000..a1e88a989 --- /dev/null +++ b/docs/scripts/lib/schema_loader.py @@ -0,0 +1,88 @@ +"""Load JSON schema from Frigate's Pydantic config models.""" + +from typing import Any + + +def load_schema() -> dict[str, Any]: + """Generate and return the full JSON schema for FrigateConfig.""" + from frigate.config.config import FrigateConfig + from frigate.util.schema import get_config_schema + + return get_config_schema(FrigateConfig) + + +def resolve_ref(schema: dict[str, Any], ref: str) -> dict[str, Any]: + """Resolve a $ref pointer within the schema.""" + # ref format: "#/$defs/RecordConfig" + parts = ref.lstrip("#/").split("/") + node = schema + for part in parts: + node = node[part] + return node + + +def resolve_schema_node( + schema: dict[str, Any], node: dict[str, Any] +) -> dict[str, Any]: + """Resolve a schema node, following $ref and allOf if present.""" + if "$ref" in node: + node = resolve_ref(schema, node["$ref"]) + if "allOf" in node: + merged: dict[str, Any] = {} + for item in node["allOf"]: + resolved = resolve_schema_node(schema, item) + merged.update(resolved) + return merged + return node + + +def get_section_schema( + schema: dict[str, Any], section_key: str +) -> dict[str, Any] | None: + """Get the resolved schema for a top-level config section.""" + props = schema.get("properties", {}) + if section_key not in props: + return None + return resolve_schema_node(schema, props[section_key]) + + +def get_field_info( + schema: dict[str, Any], section_key: str, field_path: list[str] +) -> dict[str, Any] | None: + """Get schema info for a specific field path within a section. + + Args: + schema: Full JSON schema + section_key: Top-level section (e.g., "record") + field_path: List of nested keys (e.g., ["continuous", "days"]) + + Returns: + Resolved schema node for the field, or None if not found. + """ + section = get_section_schema(schema, section_key) + if section is None: + return None + + node = section + for key in field_path: + props = node.get("properties", {}) + if key not in props: + return None + node = resolve_schema_node(schema, props[key]) + + return node + + +def is_boolean_field(field_schema: dict[str, Any]) -> bool: + """Check if a schema node represents a boolean field.""" + return field_schema.get("type") == "boolean" + + +def is_enum_field(field_schema: dict[str, Any]) -> bool: + """Check if a schema node is an enum.""" + return "enum" in field_schema + + +def is_object_field(field_schema: dict[str, Any]) -> bool: + """Check if a schema node is an object with properties.""" + return field_schema.get("type") == "object" or "properties" in field_schema diff --git a/docs/scripts/lib/section_config_parser.py b/docs/scripts/lib/section_config_parser.py new file mode 100644 index 000000000..805ab2145 --- /dev/null +++ b/docs/scripts/lib/section_config_parser.py @@ -0,0 +1,130 @@ +"""Parse TypeScript section config files for hidden/advanced field info.""" + +import json +import re +from pathlib import Path +from typing import Any + +SECTION_CONFIGS_DIR = ( + Path(__file__).resolve().parents[3] + / "web" + / "src" + / "components" + / "config-form" + / "section-configs" +) + + +def _extract_string_array(text: str, field_name: str) -> list[str]: + """Extract a string array value from TypeScript object literal text.""" + pattern = rf"{field_name}\s*:\s*\[(.*?)\]" + match = re.search(pattern, text, re.DOTALL) + if not match: + return [] + content = match.group(1) + return re.findall(r'"([^"]*)"', content) + + +def _parse_section_file(filepath: Path) -> dict[str, Any]: + """Parse a single section config .ts file.""" + text = filepath.read_text() + + # Extract base block + base_match = re.search(r"base\s*:\s*\{(.*?)\n \}", text, re.DOTALL) + base_text = base_match.group(1) if base_match else "" + + # Extract global block + global_match = re.search(r"global\s*:\s*\{(.*?)\n \}", text, re.DOTALL) + global_text = global_match.group(1) if global_match else "" + + # Extract camera block + camera_match = re.search(r"camera\s*:\s*\{(.*?)\n \}", text, re.DOTALL) + camera_text = camera_match.group(1) if camera_match else "" + + result: dict[str, Any] = { + "fieldOrder": _extract_string_array(base_text, "fieldOrder"), + "hiddenFields": _extract_string_array(base_text, "hiddenFields"), + "advancedFields": _extract_string_array(base_text, "advancedFields"), + } + + # Merge global-level hidden fields + global_hidden = _extract_string_array(global_text, "hiddenFields") + if global_hidden: + result["globalHiddenFields"] = global_hidden + + # Merge camera-level hidden fields + camera_hidden = _extract_string_array(camera_text, "hiddenFields") + if camera_hidden: + result["cameraHiddenFields"] = camera_hidden + + return result + + +def load_section_configs() -> dict[str, dict[str, Any]]: + """Load all section configs from TypeScript files. + + Returns: + Dict mapping section name to parsed config. + """ + # Read sectionConfigs.ts to get the mapping of section keys to filenames + registry_path = SECTION_CONFIGS_DIR.parent / "sectionConfigs.ts" + registry_text = registry_path.read_text() + + configs: dict[str, dict[str, Any]] = {} + + for ts_file in SECTION_CONFIGS_DIR.glob("*.ts"): + if ts_file.name == "types.ts": + continue + + section_name = ts_file.stem + configs[section_name] = _parse_section_file(ts_file) + + # Map section config keys from the registry (handles renames like + # "timestamp_style: timestampStyle") + key_map: dict[str, str] = {} + for match in re.finditer( + r"(\w+)(?:\s*:\s*\w+)?\s*,", registry_text[registry_text.find("{") :] + ): + key = match.group(1) + key_map[key] = key + + # Handle explicit key mappings like `timestamp_style: timestampStyle` + for match in re.finditer(r"(\w+)\s*:\s*(\w+)\s*,", registry_text): + key_map[match.group(1)] = match.group(2) + + return configs + + +def get_hidden_fields( + configs: dict[str, dict[str, Any]], + section_key: str, + level: str = "global", +) -> set[str]: + """Get the set of hidden fields for a section at a given level. + + Args: + configs: Loaded section configs + section_key: Config section name (e.g., "record") + level: "global" or "camera" + + Returns: + Set of hidden field paths (e.g., {"enabled_in_config", "sync_recordings"}) + """ + config = configs.get(section_key, {}) + hidden = set(config.get("hiddenFields", [])) + + if level == "global": + hidden.update(config.get("globalHiddenFields", [])) + elif level == "camera": + hidden.update(config.get("cameraHiddenFields", [])) + + return hidden + + +def get_advanced_fields( + configs: dict[str, dict[str, Any]], + section_key: str, +) -> set[str]: + """Get the set of advanced fields for a section.""" + config = configs.get(section_key, {}) + return set(config.get("advancedFields", [])) diff --git a/docs/scripts/lib/ui_generator.py b/docs/scripts/lib/ui_generator.py new file mode 100644 index 000000000..7b9a59286 --- /dev/null +++ b/docs/scripts/lib/ui_generator.py @@ -0,0 +1,283 @@ +"""Generate UI tab markdown content from parsed YAML blocks.""" + +from typing import Any + +from .i18n_loader import get_field_description, get_field_label, get_value_label +from .nav_map import ALL_CONFIG_SECTIONS, detect_level, get_nav_path +from .schema_loader import is_boolean_field, is_object_field +from .section_config_parser import get_hidden_fields +from .yaml_extractor import YamlBlock, get_leaf_paths + + +def _format_value( + value: object, + field_schema: dict[str, Any] | None, + i18n: dict[str, Any] | None = None, +) -> str: + """Format a YAML value for UI display. + + Looks up i18n labels for enum/option values when available. + """ + if field_schema and is_boolean_field(field_schema): + return "on" if value else "off" + if isinstance(value, bool): + return "on" if value else "off" + if isinstance(value, list): + if len(value) == 0: + return "an empty list" + items = [] + for v in value: + label = get_value_label(i18n, str(v)) if i18n else None + items.append(f"`{label}`" if label else f"`{v}`") + return ", ".join(items) + if value is None: + return "empty" + + # Try i18n label for the raw value (enum translations) + if i18n and isinstance(value, str): + label = get_value_label(i18n, value) + if label: + return f"`{label}`" + + return f"`{value}`" + + +def _build_field_label( + i18n: dict[str, Any], + section_key: str, + field_path: list[str], + level: str, +) -> str: + """Build the display label for a field using i18n labels. + + For a path like ["continuous", "days"], produces + "Continuous retention > Retention days" using the actual i18n labels. + """ + parts: list[str] = [] + + for depth in range(len(field_path)): + sub_path = field_path[: depth + 1] + label = get_field_label(i18n, section_key, sub_path, level) + + if label: + parts.append(label) + else: + # Fallback to title-cased field name + parts.append(field_path[depth].replace("_", " ").title()) + + return " > ".join(parts) + + +def _is_hidden( + field_key: str, + full_path: list[str], + hidden_fields: set[str], +) -> bool: + """Check if a field should be hidden from UI output.""" + # Check exact match + if field_key in hidden_fields: + return True + + # Check dotted path match (e.g., "alerts.enabled_in_config") + dotted = ".".join(str(p) for p in full_path) + if dotted in hidden_fields: + return True + + # Check wildcard patterns (e.g., "filters.*.mask") + for pattern in hidden_fields: + if "*" in pattern: + parts = pattern.split(".") + if len(parts) == len(full_path): + match = all( + p == "*" or p == fp for p, fp in zip(parts, full_path) + ) + if match: + return True + + return False + + +def generate_ui_content( + block: YamlBlock, + schema: dict[str, Any], + i18n: dict[str, Any], + section_configs: dict[str, dict[str, Any]], +) -> str | None: + """Generate UI tab markdown content for a YAML block. + + Args: + block: Parsed YAML block from a doc file + schema: Full JSON schema + i18n: Loaded i18n translations + section_configs: Parsed section config data + + Returns: + Generated markdown string for the UI tab, or None if the block + can't be converted (not a config block, etc.) + """ + if block.section_key is None: + return None + + # Determine which config data to walk + if block.is_camera_level: + # Camera-level: unwrap cameras.{name}.{section} + cam_data = block.parsed.get("cameras", {}) + cam_name = block.camera_name or next(iter(cam_data), None) + if not cam_name: + return None + inner = cam_data.get(cam_name, {}) + if not isinstance(inner, dict): + return None + level = "camera" + else: + inner = block.parsed + # Determine level from section key + level = detect_level(block.section_key) + + # Collect sections to process (may span multiple top-level keys) + sections_to_process: list[tuple[str, dict]] = [] + for key in inner: + if key in ALL_CONFIG_SECTIONS or key == block.section_key: + val = inner[key] + if isinstance(val, dict): + sections_to_process.append((key, val)) + else: + # Simple scalar at section level (e.g., record.enabled = True) + sections_to_process.append((key, {key: val})) + + # If inner is the section itself (e.g., parsed = {"record": {...}}) + if not sections_to_process and block.section_key in inner: + section_data = inner[block.section_key] + if isinstance(section_data, dict): + sections_to_process = [(block.section_key, section_data)] + + if not sections_to_process: + # Try treating the whole inner dict as the section data + sections_to_process = [(block.section_key, inner)] + + # Choose pattern based on whether YAML has comments (descriptive) or values + use_table = block.has_comments + + lines: list[str] = [] + step_num = 1 + + for section_key, section_data in sections_to_process: + # Get navigation path + i18n_level = "cameras" if level == "camera" else "global" + nav_path = get_nav_path(section_key, level) + if nav_path is None: + # Try global as fallback + nav_path = get_nav_path(section_key, "global") + if nav_path is None: + continue + + # Get hidden fields for this section + hidden = get_hidden_fields(section_configs, section_key, level) + + # Get leaf paths from the YAML data + leaves = get_leaf_paths(section_data) + + # Filter out hidden fields + visible_leaves: list[tuple[tuple[str, ...], object]] = [] + for path, value in leaves: + path_list = list(path) + if not _is_hidden(path_list[-1], path_list, hidden): + visible_leaves.append((path, value)) + + if not visible_leaves: + continue + + if use_table: + # Pattern A: Field table with descriptions + lines.append( + f'Navigate to .' + ) + lines.append("") + lines.append("| Field | Description |") + lines.append("|-------|-------------|") + + for path, _value in visible_leaves: + path_list = list(path) + label = _build_field_label( + i18n, section_key, path_list, i18n_level + ) + desc = get_field_description( + i18n, section_key, path_list, i18n_level + ) + if not desc: + desc = "" + lines.append(f"| **{label}** | {desc} |") + else: + # Pattern B: Set instructions + multi_section = len(sections_to_process) > 1 + + if multi_section: + camera_note = "" + if block.is_camera_level: + camera_note = ( + " and select your camera" + ) + lines.append( + f'{step_num}. Navigate to {camera_note}.' + ) + else: + if block.is_camera_level: + lines.append( + f'1. Navigate to and select your camera.' + ) + else: + lines.append( + f'Navigate to .' + ) + lines.append("") + + from .schema_loader import get_field_info + + for path, value in visible_leaves: + path_list = list(path) + label = _build_field_label( + i18n, section_key, path_list, i18n_level + ) + field_info = get_field_info(schema, section_key, path_list) + formatted = _format_value(value, field_info, i18n) + + if multi_section or block.is_camera_level: + lines.append(f" - Set **{label}** to {formatted}") + else: + lines.append(f"- Set **{label}** to {formatted}") + + step_num += 1 + + if not lines: + return None + + return "\n".join(lines) + + +def wrap_with_config_tabs(ui_content: str, yaml_raw: str, highlight: str | None = None) -> str: + """Wrap UI content and YAML in ConfigTabs markup. + + Args: + ui_content: Generated UI tab markdown + yaml_raw: Original YAML text + highlight: Optional highlight spec (e.g., "{3-4}") + + Returns: + Full ConfigTabs MDX block + """ + highlight_str = f" {highlight}" if highlight else "" + + return f""" + + +{ui_content} + + + + +```yaml{highlight_str} +{yaml_raw} +``` + + +""" diff --git a/docs/scripts/lib/yaml_extractor.py b/docs/scripts/lib/yaml_extractor.py new file mode 100644 index 000000000..c01451cfc --- /dev/null +++ b/docs/scripts/lib/yaml_extractor.py @@ -0,0 +1,283 @@ +"""Extract YAML code blocks from markdown documentation files.""" + +import re +from dataclasses import dataclass, field + +import yaml + + +@dataclass +class YamlBlock: + """A YAML code block extracted from a markdown file.""" + + raw: str # Original YAML text + parsed: dict # Parsed YAML content + line_start: int # Line number in the markdown file (1-based) + line_end: int # End line number + highlight: str | None = None # Highlight spec (e.g., "{3-4}") + has_comments: bool = False # Whether the YAML has inline comments + inside_config_tabs: bool = False # Already wrapped in ConfigTabs + section_key: str | None = None # Detected top-level config section + is_camera_level: bool = False # Whether this is camera-level config + camera_name: str | None = None # Camera name if camera-level + config_keys: list[str] = field( + default_factory=list + ) # Top-level keys in the YAML + + +def extract_yaml_blocks(content: str) -> list[YamlBlock]: + """Extract all YAML fenced code blocks from markdown content. + + Args: + content: Markdown file content + + Returns: + List of YamlBlock instances + """ + blocks: list[YamlBlock] = [] + lines = content.split("\n") + i = 0 + in_config_tabs = False + + while i < len(lines): + line = lines[i] + + # Track ConfigTabs context + if "" in line: + in_config_tabs = True + elif "" in line: + in_config_tabs = False + + # Look for YAML fence opening + fence_match = re.match(r"^```yaml\s*(\{[^}]*\})?\s*$", line) + if fence_match: + highlight = fence_match.group(1) + start_line = i + 1 # 1-based + yaml_lines: list[str] = [] + i += 1 + + # Collect until closing fence + while i < len(lines) and not lines[i].startswith("```"): + yaml_lines.append(lines[i]) + i += 1 + + end_line = i + 1 # 1-based, inclusive of closing fence + raw = "\n".join(yaml_lines) + + # Check for inline comments + has_comments = any( + re.search(r"#\s*(<-|[A-Za-z])", yl) for yl in yaml_lines + ) + + # Parse YAML + try: + parsed = yaml.safe_load(raw) + except yaml.YAMLError: + i += 1 + continue + + if not isinstance(parsed, dict): + i += 1 + continue + + # Detect config section and level + config_keys = list(parsed.keys()) + section_key = None + is_camera = False + camera_name = None + + if "cameras" in parsed and isinstance(parsed["cameras"], dict): + is_camera = True + cam_entries = parsed["cameras"] + if len(cam_entries) == 1: + camera_name = list(cam_entries.keys())[0] + inner = cam_entries[camera_name] + if isinstance(inner, dict): + inner_keys = list(inner.keys()) + if len(inner_keys) >= 1: + section_key = inner_keys[0] + elif len(config_keys) >= 1: + section_key = config_keys[0] + + blocks.append( + YamlBlock( + raw=raw, + parsed=parsed, + line_start=start_line, + line_end=end_line, + highlight=highlight, + has_comments=has_comments, + inside_config_tabs=in_config_tabs, + section_key=section_key, + is_camera_level=is_camera, + camera_name=camera_name, + config_keys=config_keys, + ) + ) + + i += 1 + + return blocks + + +@dataclass +class ConfigTabsBlock: + """An existing ConfigTabs block in a markdown file.""" + + line_start: int # 1-based line of + line_end: int # 1-based line of + ui_content: str # Content inside the UI TabItem + yaml_block: YamlBlock # The YAML block inside the YAML TabItem + raw_text: str # Full raw text of the ConfigTabs block + + +def extract_config_tabs_blocks(content: str) -> list[ConfigTabsBlock]: + """Extract existing ConfigTabs blocks from markdown content. + + Parses the structure: + + + ...ui content... + + + ```yaml + ...yaml... + ``` + + + + Returns: + List of ConfigTabsBlock instances + """ + blocks: list[ConfigTabsBlock] = [] + lines = content.split("\n") + i = 0 + + while i < len(lines): + if "" not in lines[i]: + i += 1 + continue + + block_start = i # 0-based + + # Find + j = i + 1 + while j < len(lines) and "" not in lines[j]: + j += 1 + + if j >= len(lines): + i += 1 + continue + + block_end = j # 0-based, line with + block_text = "\n".join(lines[block_start : block_end + 1]) + + # Extract UI content (between and ) + ui_match = re.search( + r'\s*\n(.*?)\n\s*', + block_text, + re.DOTALL, + ) + ui_content = ui_match.group(1).strip() if ui_match else "" + + # Extract YAML block from inside the yaml TabItem + yaml_tab_match = re.search( + r'\s*\n(.*?)\n\s*', + block_text, + re.DOTALL, + ) + + yaml_block = None + if yaml_tab_match: + yaml_tab_text = yaml_tab_match.group(1) + fence_match = re.search( + r"```yaml\s*(\{[^}]*\})?\s*\n(.*?)\n```", + yaml_tab_text, + re.DOTALL, + ) + if fence_match: + highlight = fence_match.group(1) + yaml_raw = fence_match.group(2) + has_comments = bool( + re.search(r"#\s*(<-|[A-Za-z])", yaml_raw) + ) + + try: + parsed = yaml.safe_load(yaml_raw) + except yaml.YAMLError: + parsed = {} + + if isinstance(parsed, dict): + config_keys = list(parsed.keys()) + section_key = None + is_camera = False + camera_name = None + + if "cameras" in parsed and isinstance( + parsed["cameras"], dict + ): + is_camera = True + cam_entries = parsed["cameras"] + if len(cam_entries) == 1: + camera_name = list(cam_entries.keys())[0] + inner = cam_entries[camera_name] + if isinstance(inner, dict): + inner_keys = list(inner.keys()) + if len(inner_keys) >= 1: + section_key = inner_keys[0] + elif len(config_keys) >= 1: + section_key = config_keys[0] + + yaml_block = YamlBlock( + raw=yaml_raw, + parsed=parsed, + line_start=block_start + 1, + line_end=block_end + 1, + highlight=highlight, + has_comments=has_comments, + inside_config_tabs=True, + section_key=section_key, + is_camera_level=is_camera, + camera_name=camera_name, + config_keys=config_keys, + ) + + if yaml_block: + blocks.append( + ConfigTabsBlock( + line_start=block_start + 1, # 1-based + line_end=block_end + 1, # 1-based + ui_content=ui_content, + yaml_block=yaml_block, + raw_text=block_text, + ) + ) + + i = j + 1 + + return blocks + + +def get_leaf_paths( + data: dict, prefix: tuple[str, ...] = () +) -> list[tuple[tuple[str, ...], object]]: + """Walk a parsed YAML dict and return all leaf key paths with values. + + Args: + data: Parsed YAML dict + prefix: Current key path prefix + + Returns: + List of (key_path_tuple, value) pairs. + e.g., [( ("record", "continuous", "days"), 3 ), ...] + """ + results: list[tuple[tuple[str, ...], object]] = [] + + for key, value in data.items(): + path = prefix + (str(key),) + if isinstance(value, dict): + results.extend(get_leaf_paths(value, path)) + else: + results.append((path, value)) + + return results diff --git a/docs/src/components/ConfigTabs/index.jsx b/docs/src/components/ConfigTabs/index.jsx new file mode 100644 index 000000000..0fbc51897 --- /dev/null +++ b/docs/src/components/ConfigTabs/index.jsx @@ -0,0 +1,34 @@ +import React, { Children, cloneElement } from "react"; +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +export default function ConfigTabs({ children }) { + const wrapped = Children.map(children, (child) => { + if (child?.props?.value === "ui") { + return cloneElement(child, { + className: "config-tab-ui", + }); + } + if (child?.props?.value === "yaml") { + return cloneElement(child, { + className: "config-tab-yaml", + }); + } + return child; + }); + + return ( +
+ + {wrapped} + +
+ ); +} diff --git a/docs/src/components/NavPath/index.jsx b/docs/src/components/NavPath/index.jsx new file mode 100644 index 000000000..e5ec86bdc --- /dev/null +++ b/docs/src/components/NavPath/index.jsx @@ -0,0 +1,30 @@ +import React from "react"; + +export default function NavPath({ path }) { + const segments = path.split(" > "); + return ( + + {segments.map((seg, i) => ( + + {i > 0 && ( + + → + + )} + {seg} + + ))} + + ); +} diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index 5d8fc5055..6d9b7c82f 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -241,4 +241,50 @@ margin: 0 calc(-1 * var(--ifm-pre-padding)); padding: 0 var(--ifm-pre-padding); border-left: 3px solid #ff000080; +} + +/* ConfigTabs wrapper */ +.config-tabs-wrapper { + border: 1px solid var(--ifm-color-emphasis-300); + border-radius: 8px; + overflow: hidden; + margin-bottom: 16px; +} + +.config-tabs-wrapper .tabs-container { + margin-bottom: 0 !important; +} + +.config-tabs-wrapper .tabs { + background: var(--ifm-color-emphasis-100); + border-bottom: 1px solid var(--ifm-color-emphasis-300); + margin-bottom: 0; + padding: 0 12px; +} + +.config-tabs-wrapper .tabs__item { + padding: 8px 16px; + border-radius: 0; +} + +.config-tabs-wrapper .tabs__item--active { + border-bottom-color: var(--ifm-color-primary); +} + +.config-tabs-wrapper .config-tab-ui { + padding: 4px 16px 16px; +} + +.config-tabs-wrapper .config-tab-ui > :last-child { + margin-bottom: 0; +} + +.config-tabs-wrapper div[class*="codeBlockContainer"] { + border-top-left-radius: 0; + border-top-right-radius: 0; + margin: 0; +} + +.config-tabs-wrapper .tabs-container > .margin-top--md:has(.config-tab-yaml:not([hidden])) { + margin-top: 0 !important; } \ No newline at end of file diff --git a/frigate/api/app.py b/frigate/api/app.py index 498094ff8..af6778451 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -125,6 +125,16 @@ def metrics(request: Request): return Response(content=content, media_type=content_type) +@router.get( + "/genai/models", + dependencies=[Depends(allow_any_authenticated())], + summary="List available GenAI models", + description="Returns available models for each configured GenAI provider.", +) +def genai_models(request: Request): + return JSONResponse(content=request.app.genai_manager.list_models()) + + @router.get("/config", dependencies=[Depends(allow_any_authenticated())]) def config(request: Request): config_obj: FrigateConfig = request.app.frigate_config @@ -142,9 +152,20 @@ def config(request: Request): # remove the proxy secret config["proxy"].pop("auth_secret", None) + # remove genai api keys + for genai_name, genai_cfg in config.get("genai", {}).items(): + if isinstance(genai_cfg, dict): + genai_cfg.pop("api_key", None) + for camera_name, camera in request.app.frigate_config.cameras.items(): camera_dict = config["cameras"][camera_name] + # remove onvif credentials + onvif_dict = camera_dict.get("onvif", {}) + if onvif_dict: + onvif_dict.pop("user", None) + onvif_dict.pop("password", None) + # clean paths for input in camera_dict.get("ffmpeg", {}).get("inputs", []): input["path"] = clean_camera_user_pass(input["path"]) @@ -613,6 +634,34 @@ def config_set(request: Request, body: AppConfigSetBody): try: config = FrigateConfig.parse(new_raw_config) + except ValidationError as e: + with open(config_file, "w") as f: + f.write(old_raw_config) + f.close() + logger.error( + f"Config Validation Error:\n\n{str(traceback.format_exc())}" + ) + error_messages = [] + for err in e.errors(): + msg = err.get("msg", "") + # Strip pydantic "Value error, " prefix for cleaner display + if msg.startswith("Value error, "): + msg = msg[len("Value error, ") :] + error_messages.append(msg) + message = ( + "; ".join(error_messages) + if error_messages + else "Check logs for error message." + ) + return JSONResponse( + content=( + { + "success": False, + "message": f"Error saving config: {message}", + } + ), + status_code=400, + ) except Exception: with open(config_file, "w") as f: f.write(old_raw_config) diff --git a/frigate/api/camera.py b/frigate/api/camera.py index e79864d66..1881bde6d 100644 --- a/frigate/api/camera.py +++ b/frigate/api/camera.py @@ -30,6 +30,7 @@ from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateTopic, ) +from frigate.config.env import FRIGATE_ENV_VARS from frigate.util.builtin import clean_camera_user_pass from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files from frigate.util.config import find_config_file @@ -124,7 +125,10 @@ def go2rtc_add_stream(request: Request, stream_name: str, src: str = ""): try: params = {"name": stream_name} if src: - params["src"] = src + try: + params["src"] = src.format(**FRIGATE_ENV_VARS) + except KeyError: + params["src"] = src r = requests.put( "http://127.0.0.1:1984/api/streams", @@ -1220,6 +1224,15 @@ def camera_set( status_code=400, ) + if not sub_command and feature in _SUB_COMMAND_FEATURES: + return JSONResponse( + content={ + "success": False, + "message": f"Feature '{feature}' requires a sub-command (e.g. mask or zone name)", + }, + status_code=400, + ) + if camera_name == "*": cameras = list(frigate_config.cameras.keys()) elif camera_name not in frigate_config.cameras: diff --git a/frigate/api/chat.py b/frigate/api/chat.py index 136c425f2..3e10714ca 100644 --- a/frigate/api/chat.py +++ b/frigate/api/chat.py @@ -15,6 +15,7 @@ from pydantic import BaseModel from frigate.api.auth import ( allow_any_authenticated, get_allowed_cameras_for_filter, + require_camera_access, ) from frigate.api.defs.query.events_query_parameters import EventsQueryParams from frigate.api.defs.request.chat_body import ChatCompletionRequest @@ -293,6 +294,60 @@ def get_tool_definitions() -> List[Dict[str, Any]]: }, }, }, + { + "type": "function", + "function": { + "name": "get_profile_status", + "description": ( + "Get the current profile status including the active profile and " + "timestamps of when each profile was last activated. Use this to " + "determine time periods for recap requests — e.g. when the user asks " + "'what happened while I was away?', call this first to find the relevant " + "time window based on profile activation history." + ), + "parameters": { + "type": "object", + "properties": {}, + "required": [], + }, + }, + }, + { + "type": "function", + "function": { + "name": "get_recap", + "description": ( + "Get a recap of all activity (alerts and detections) for a given time period. " + "Use this after calling get_profile_status to retrieve what happened during " + "a specific window — e.g. 'what happened while I was away?'. Returns a " + "chronological list of activity with camera, objects, zones, and GenAI-generated " + "descriptions when available. Summarize the results for the user." + ), + "parameters": { + "type": "object", + "properties": { + "after": { + "type": "string", + "description": "Start of the time period in ISO 8601 format (e.g. '2025-03-15T08:00:00').", + }, + "before": { + "type": "string", + "description": "End of the time period in ISO 8601 format (e.g. '2025-03-15T17:00:00').", + }, + "cameras": { + "type": "string", + "description": "Comma-separated camera IDs to include, or 'all' for all cameras. Default is 'all'.", + }, + "severity": { + "type": "string", + "enum": ["alert", "detection"], + "description": "Filter by severity level. Omit to include both alerts and detections.", + }, + }, + "required": ["after", "before"], + }, + }, + }, ] @@ -465,45 +520,14 @@ async def _execute_get_live_context( "detections": list(tracked_objects_dict.values()), } - # Grab live frame and handle based on provider configuration + # Grab live frame when the chat model supports vision image_url = await _get_live_frame_image_url(request, camera, allowed_cameras) if image_url: - genai_manager = request.app.genai_manager - if genai_manager.tool_client is genai_manager.vision_client: - # Same provider handles both roles — pass image URL so it can - # be injected as a user message (images can't be in tool results) + chat_client = request.app.genai_manager.chat_client + if chat_client is not None and chat_client.supports_vision: + # Pass image URL so it can be injected as a user message + # (images can't be in tool results) result["_image_url"] = image_url - elif genai_manager.vision_client is not None: - # Separate vision provider — have it describe the image, - # providing detection context so it knows what to focus on - frame_bytes = _decode_data_url(image_url) - if frame_bytes: - detections = result.get("detections", []) - if detections: - detection_lines = [] - for d in detections: - parts = [d.get("label", "unknown")] - if d.get("sub_label"): - parts.append(f"({d['sub_label']})") - if d.get("zones"): - parts.append(f"in {', '.join(d['zones'])}") - detection_lines.append(" ".join(parts)) - context = ( - "The following objects are currently being tracked: " - + "; ".join(detection_lines) - + "." - ) - else: - context = "No objects are currently being tracked." - - description = genai_manager.vision_client._send( - f"Describe what you see in this security camera image. " - f"{context} Focus on the scene, any visible activity, " - f"and details about the tracked objects.", - [frame_bytes], - ) - if description: - result["image_description"] = description return result @@ -554,17 +578,6 @@ async def _get_live_frame_image_url( return None -def _decode_data_url(data_url: str) -> Optional[bytes]: - """Decode a base64 data URL to raw bytes.""" - try: - # Format: data:image/jpeg;base64, - _, encoded = data_url.split(",", 1) - return base64.b64decode(encoded) - except (ValueError, Exception) as e: - logger.debug("Failed to decode data URL: %s", e) - return None - - async def _execute_set_camera_state( request: Request, arguments: Dict[str, Any], @@ -645,10 +658,14 @@ async def _execute_tool_internal( return await _execute_start_camera_watch(request, arguments) elif tool_name == "stop_camera_watch": return _execute_stop_camera_watch() + elif tool_name == "get_profile_status": + return _execute_get_profile_status(request) + elif tool_name == "get_recap": + return _execute_get_recap(arguments, allowed_cameras) else: logger.error( "Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context, " - "start_camera_watch, stop_camera_watch. Arguments received: %s", + "start_camera_watch, stop_camera_watch, get_profile_status, get_recap. Arguments received: %s", tool_name, json.dumps(arguments), ) @@ -672,10 +689,12 @@ async def _execute_start_camera_watch( if camera not in config.cameras: return {"error": f"Camera '{camera}' not found."} + await require_camera_access(camera, request=request) + genai_manager = request.app.genai_manager - vision_client = genai_manager.vision_client or genai_manager.tool_client - if vision_client is None: - return {"error": "No vision/GenAI provider configured."} + chat_client = genai_manager.chat_client + if chat_client is None or not chat_client.supports_vision: + return {"error": "VLM watch requires a chat model with vision support."} try: job_id = start_vlm_watch_job( @@ -710,6 +729,168 @@ def _execute_stop_camera_watch() -> Dict[str, Any]: return {"success": False, "message": "No active watch job to cancel."} +def _execute_get_profile_status(request: Request) -> Dict[str, Any]: + """Return profile status including active profile and activation timestamps.""" + profile_manager = getattr(request.app, "profile_manager", None) + if profile_manager is None: + return {"error": "Profile manager is not available."} + + info = profile_manager.get_profile_info() + + # Convert timestamps to human-readable local times inline + last_activated = {} + for name, ts in info.get("last_activated", {}).items(): + try: + dt = datetime.fromtimestamp(ts) + last_activated[name] = dt.strftime("%Y-%m-%d %I:%M:%S %p") + except (TypeError, ValueError, OSError): + last_activated[name] = str(ts) + + return { + "active_profile": info.get("active_profile"), + "profiles": info.get("profiles", []), + "last_activated": last_activated, + } + + +def _execute_get_recap( + arguments: Dict[str, Any], + allowed_cameras: List[str], +) -> Dict[str, Any]: + """Fetch review segments with GenAI metadata for a time period.""" + from functools import reduce + + from peewee import operator + + from frigate.models import ReviewSegment + + after_str = arguments.get("after") + before_str = arguments.get("before") + + def _parse_as_local_timestamp(s: str): + s = s.replace("Z", "").strip()[:19] + dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") + return time.mktime(dt.timetuple()) + + try: + after = _parse_as_local_timestamp(after_str) + except (ValueError, AttributeError, TypeError): + return {"error": f"Invalid 'after' timestamp: {after_str}"} + + try: + before = _parse_as_local_timestamp(before_str) + except (ValueError, AttributeError, TypeError): + return {"error": f"Invalid 'before' timestamp: {before_str}"} + + cameras = arguments.get("cameras", "all") + if cameras != "all": + requested = set(cameras.split(",")) + camera_list = list(requested.intersection(allowed_cameras)) + if not camera_list: + return {"events": [], "message": "No accessible cameras matched."} + else: + camera_list = allowed_cameras + + clauses = [ + (ReviewSegment.start_time < before) + & ((ReviewSegment.end_time.is_null(True)) | (ReviewSegment.end_time > after)), + (ReviewSegment.camera << camera_list), + ] + + severity_filter = arguments.get("severity") + if severity_filter: + clauses.append(ReviewSegment.severity == severity_filter) + + try: + rows = ( + ReviewSegment.select( + ReviewSegment.camera, + ReviewSegment.start_time, + ReviewSegment.end_time, + ReviewSegment.severity, + ReviewSegment.data, + ) + .where(reduce(operator.and_, clauses)) + .order_by(ReviewSegment.start_time.asc()) + .limit(100) + .dicts() + .iterator() + ) + + events: List[Dict[str, Any]] = [] + + for row in rows: + data = row.get("data") or {} + if isinstance(data, str): + try: + data = json.loads(data) + except json.JSONDecodeError: + data = {} + + camera = row["camera"] + event: Dict[str, Any] = { + "camera": camera.replace("_", " ").title(), + "severity": row.get("severity", "detection"), + } + + # Include GenAI metadata when available + metadata = data.get("metadata") + if metadata and isinstance(metadata, dict): + if metadata.get("title"): + event["title"] = metadata["title"] + if metadata.get("scene"): + event["description"] = metadata["scene"] + threat = metadata.get("potential_threat_level") + if threat is not None: + threat_labels = { + 0: "normal", + 1: "needs_review", + 2: "security_concern", + } + event["threat_level"] = threat_labels.get(threat, str(threat)) + + # Only include objects/zones/audio when there's no GenAI description + # to keep the payload concise — the description already covers these + if "description" not in event: + objects = data.get("objects", []) + if objects: + event["objects"] = objects + zones = data.get("zones", []) + if zones: + event["zones"] = zones + audio = data.get("audio", []) + if audio: + event["audio"] = audio + + start_ts = row.get("start_time") + end_ts = row.get("end_time") + if start_ts is not None: + try: + event["time"] = datetime.fromtimestamp(start_ts).strftime( + "%I:%M %p" + ) + except (TypeError, ValueError, OSError): + pass + if end_ts is not None and start_ts is not None: + try: + event["duration_seconds"] = round(end_ts - start_ts) + except (TypeError, ValueError): + pass + + events.append(event) + + if not events: + return { + "events": [], + "message": "No activity was found during this time period.", + } + + return {"events": events} + except Exception as e: + logger.error("Error executing get_recap: %s", e, exc_info=True) + return {"error": "Failed to fetch recap data."} + + async def _execute_pending_tools( pending_tool_calls: List[Dict[str, Any]], request: Request, @@ -847,7 +1028,7 @@ async def chat_completion( 6. Repeats until final answer 7. Returns response to user """ - genai_client = request.app.genai_manager.tool_client + genai_client = request.app.genai_manager.chat_client if not genai_client: return JSONResponse( content={ @@ -1156,12 +1337,14 @@ async def start_vlm_monitor( status_code=404, ) - vision_client = genai_manager.vision_client or genai_manager.tool_client - if vision_client is None: + await require_camera_access(body.camera, request=request) + + chat_client = genai_manager.chat_client + if chat_client is None or not chat_client.supports_vision: return JSONResponse( content={ "success": False, - "message": "No vision/GenAI provider configured.", + "message": "VLM watch requires a chat model with vision support.", }, status_code=400, ) diff --git a/frigate/api/media.py b/frigate/api/media.py index fd48a11e1..489c008b4 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -896,6 +896,7 @@ async def event_thumbnail( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: + await require_camera_access(camera_state.name, request=request) thumbnail_bytes = tracked_obj.get_thumbnail(extension.value) except Exception: return JSONResponse( @@ -1066,7 +1067,7 @@ def grid_snapshot( @router.delete( - "/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))] + "/{camera_name}/region_grid", dependencies=[Depends(require_role(["admin"]))] ) def clear_region_grid(request: Request, camera_name: str): """Clear the region grid for a camera.""" diff --git a/frigate/api/review.py b/frigate/api/review.py index d2e8063d5..cb114db2a 100644 --- a/frigate/api/review.py +++ b/frigate/api/review.py @@ -742,11 +742,11 @@ async def set_not_reviewed( @router.post( "/review/summarize/start/{start_ts}/end/{end_ts}", - dependencies=[Depends(allow_any_authenticated())], + dependencies=[Depends(require_role(["admin"]))], description="Use GenAI to summarize review items over a period of time.", ) def generate_review_summary(request: Request, start_ts: float, end_ts: float): - if not request.app.genai_manager.vision_client: + if not request.app.genai_manager.description_client: return JSONResponse( content=( { diff --git a/frigate/camera/__init__.py b/frigate/camera/__init__.py index 0461c98cb..85831653e 100644 --- a/frigate/camera/__init__.py +++ b/frigate/camera/__init__.py @@ -1,26 +1,27 @@ import multiprocessing as mp -from multiprocessing.managers import SyncManager +import queue +from multiprocessing.managers import SyncManager, ValueProxy from multiprocessing.sharedctypes import Synchronized from multiprocessing.synchronize import Event class CameraMetrics: - camera_fps: Synchronized - detection_fps: Synchronized - detection_frame: Synchronized - process_fps: Synchronized - skipped_fps: Synchronized - read_start: Synchronized - audio_rms: Synchronized - audio_dBFS: Synchronized + camera_fps: ValueProxy[float] + detection_fps: ValueProxy[float] + detection_frame: ValueProxy[float] + process_fps: ValueProxy[float] + skipped_fps: ValueProxy[float] + read_start: ValueProxy[float] + audio_rms: ValueProxy[float] + audio_dBFS: ValueProxy[float] - frame_queue: mp.Queue + frame_queue: queue.Queue - process_pid: Synchronized - capture_process_pid: Synchronized - ffmpeg_pid: Synchronized - reconnects_last_hour: Synchronized - stalls_last_hour: Synchronized + process_pid: ValueProxy[int] + capture_process_pid: ValueProxy[int] + ffmpeg_pid: ValueProxy[int] + reconnects_last_hour: ValueProxy[int] + stalls_last_hour: ValueProxy[int] def __init__(self, manager: SyncManager): self.camera_fps = manager.Value("d", 0) @@ -56,14 +57,14 @@ class PTZMetrics: reset: Event def __init__(self, *, autotracker_enabled: bool): - self.autotracker_enabled = mp.Value("i", autotracker_enabled) + self.autotracker_enabled = mp.Value("i", autotracker_enabled) # type: ignore[assignment] - self.start_time = mp.Value("d", 0) - self.stop_time = mp.Value("d", 0) - self.frame_time = mp.Value("d", 0) - self.zoom_level = mp.Value("d", 0) - self.max_zoom = mp.Value("d", 0) - self.min_zoom = mp.Value("d", 0) + self.start_time = mp.Value("d", 0) # type: ignore[assignment] + self.stop_time = mp.Value("d", 0) # type: ignore[assignment] + self.frame_time = mp.Value("d", 0) # type: ignore[assignment] + self.zoom_level = mp.Value("d", 0) # type: ignore[assignment] + self.max_zoom = mp.Value("d", 0) # type: ignore[assignment] + self.min_zoom = mp.Value("d", 0) # type: ignore[assignment] self.tracking_active = mp.Event() self.motor_stopped = mp.Event() diff --git a/frigate/camera/activity_manager.py b/frigate/camera/activity_manager.py index 3f229e490..38425add9 100644 --- a/frigate/camera/activity_manager.py +++ b/frigate/camera/activity_manager.py @@ -37,6 +37,9 @@ class CameraActivityManager: self.__init_camera(camera_config) def __init_camera(self, camera_config: CameraConfig) -> None: + if camera_config.name is None: + return + self.last_camera_activity[camera_config.name] = {} self.camera_all_object_counts[camera_config.name] = Counter() self.camera_active_object_counts[camera_config.name] = Counter() @@ -114,7 +117,7 @@ class CameraActivityManager: self.last_camera_activity = new_activity def compare_camera_activity( - self, camera: str, new_activity: dict[str, Any] + self, camera: str, new_activity: list[dict[str, Any]] ) -> None: all_objects = Counter( obj["label"].replace("-verified", "") for obj in new_activity @@ -175,6 +178,9 @@ class AudioActivityManager: self.__init_camera(camera_config) def __init_camera(self, camera_config: CameraConfig) -> None: + if camera_config.name is None: + return + self.current_audio_detections[camera_config.name] = {} def update_activity(self, new_activity: dict[str, dict[str, Any]]) -> None: @@ -202,7 +208,7 @@ class AudioActivityManager: def compare_audio_activity( self, camera: str, new_detections: list[tuple[str, float]], now: float - ) -> None: + ) -> bool: camera_config = self.config.cameras.get(camera) if camera_config is None: return False diff --git a/frigate/camera/maintainer.py b/frigate/camera/maintainer.py index 9cfdcc7f3..c4ddc51e8 100644 --- a/frigate/camera/maintainer.py +++ b/frigate/camera/maintainer.py @@ -102,7 +102,7 @@ class CameraMaintainer(threading.Thread): f"recommend increasing it to at least {shm_stats['min_shm']}MB." ) - return shm_stats["shm_frame_count"] + return int(shm_stats["shm_frame_count"]) def __start_camera_processor( self, name: str, config: CameraConfig, runtime: bool = False @@ -152,10 +152,10 @@ class CameraMaintainer(threading.Thread): camera_stop_event, self.config.logger, ) - self.camera_processes[config.name] = camera_process + self.camera_processes[name] = camera_process camera_process.start() - self.camera_metrics[config.name].process_pid.value = camera_process.pid - logger.info(f"Camera processor started for {config.name}: {camera_process.pid}") + self.camera_metrics[name].process_pid.value = camera_process.pid + logger.info(f"Camera processor started for {name}: {camera_process.pid}") def __start_camera_capture( self, name: str, config: CameraConfig, runtime: bool = False @@ -219,7 +219,7 @@ class CameraMaintainer(threading.Thread): logger.info(f"Closing frame queue for {camera}") empty_and_close_queue(self.camera_metrics[camera].frame_queue) - def run(self): + def run(self) -> None: self.__init_historical_regions() # start camera processes diff --git a/frigate/camera/state.py b/frigate/camera/state.py index f609a05f9..8d0b58602 100644 --- a/frigate/camera/state.py +++ b/frigate/camera/state.py @@ -31,29 +31,51 @@ logger = logging.getLogger(__name__) class CameraState: def __init__( self, - name, + name: str, config: FrigateConfig, frame_manager: SharedMemoryFrameManager, ptz_autotracker_thread: PtzAutoTrackerThread, - ): + ) -> None: self.name = name self.config = config self.camera_config = config.cameras[name] self.frame_manager = frame_manager self.best_objects: dict[str, TrackedObject] = {} self.tracked_objects: dict[str, TrackedObject] = {} - self.frame_cache = {} - self.zone_objects = defaultdict(list) + self.frame_cache: dict[float, dict[str, Any]] = {} + self.zone_objects: defaultdict[str, list[Any]] = defaultdict(list) self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8) self.current_frame_lock = threading.Lock() self.current_frame_time = 0.0 - self.motion_boxes = [] - self.regions = [] - self.previous_frame_id = None - self.callbacks = defaultdict(list) + self.motion_boxes: list[tuple[int, int, int, int]] = [] + self.regions: list[tuple[int, int, int, int]] = [] + self.previous_frame_id: str | None = None + self.callbacks: defaultdict[str, list[Callable]] = defaultdict(list) self.ptz_autotracker_thread = ptz_autotracker_thread self.prev_enabled = self.camera_config.enabled + # Minimum object area thresholds for fast-tracking updates to secondary + # face/LPR pipelines when using a model without built-in detection. + self.face_recognition_min_obj_area: int = 0 + self.lpr_min_obj_area: int = 0 + + if ( + self.camera_config.face_recognition.enabled + and "face" not in config.objects.all_objects + ): + # A face is roughly 1/8 of person box area; use a conservative + # multiplier so fast-tracking starts slightly before the optimal zone + self.face_recognition_min_obj_area = ( + self.camera_config.face_recognition.min_area * 6 + ) + + if ( + self.camera_config.lpr.enabled + and "license_plate" not in self.camera_config.objects.track + ): + # A plate is a smaller fraction of a vehicle box; use ~20x multiplier + self.lpr_min_obj_area = self.camera_config.lpr.min_area * 20 + def get_current_frame(self, draw_options: dict[str, Any] = {}) -> np.ndarray: with self.current_frame_lock: frame_copy = np.copy(self._current_frame) @@ -62,10 +84,10 @@ class CameraState: motion_boxes = self.motion_boxes.copy() regions = self.regions.copy() - frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420) + frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420) # type: ignore[assignment] # draw on the frame if draw_options.get("mask"): - mask_overlay = np.where(self.camera_config.motion.rasterized_mask == [0]) + mask_overlay = np.where(self.camera_config.motion.rasterized_mask == [0]) # type: ignore[attr-defined] frame_copy[mask_overlay] = [0, 0, 0] if draw_options.get("bounding_boxes"): @@ -97,7 +119,7 @@ class CameraState: and obj["id"] == self.ptz_autotracker_thread.ptz_autotracker.tracked_object[ self.name - ].obj_data["id"] + ].obj_data["id"] # type: ignore[attr-defined] and obj["frame_time"] == frame_time ): thickness = 5 @@ -109,10 +131,12 @@ class CameraState: if ( self.camera_config.onvif.autotracking.zooming != ZoomingModeEnum.disabled + and self.camera_config.detect.width is not None + and self.camera_config.detect.height is not None ): max_target_box = self.ptz_autotracker_thread.ptz_autotracker.tracked_object_metrics[ self.name - ]["max_target_box"] + ]["max_target_box"] # type: ignore[index] side_length = max_target_box * ( max( self.camera_config.detect.width, @@ -221,14 +245,14 @@ class CameraState: ) if draw_options.get("timestamp"): - color = self.camera_config.timestamp_style.color + ts_color = self.camera_config.timestamp_style.color draw_timestamp( frame_copy, frame_time, self.camera_config.timestamp_style.format, font_effect=self.camera_config.timestamp_style.effect, font_thickness=self.camera_config.timestamp_style.thickness, - font_color=(color.blue, color.green, color.red), + font_color=(ts_color.blue, ts_color.green, ts_color.red), position=self.camera_config.timestamp_style.position, ) @@ -273,10 +297,10 @@ class CameraState: return frame_copy - def finished(self, obj_id): + def finished(self, obj_id: str) -> None: del self.tracked_objects[obj_id] - def on(self, event_type: str, callback: Callable): + def on(self, event_type: str, callback: Callable[..., Any]) -> None: self.callbacks[event_type].append(callback) def update( @@ -286,7 +310,7 @@ class CameraState: current_detections: dict[str, dict[str, Any]], motion_boxes: list[tuple[int, int, int, int]], regions: list[tuple[int, int, int, int]], - ): + ) -> None: current_frame = self.frame_manager.get( frame_name, self.camera_config.frame_shape_yuv ) @@ -313,7 +337,7 @@ class CameraState: f"{self.name}: New object, adding {frame_time} to frame cache for {id}" ) self.frame_cache[frame_time] = { - "frame": np.copy(current_frame), + "frame": np.copy(current_frame), # type: ignore[arg-type] "object_id": id, } @@ -356,7 +380,8 @@ class CameraState: if thumb_update and current_frame is not None: # ensure this frame is stored in the cache if ( - updated_obj.thumbnail_data["frame_time"] == frame_time + updated_obj.thumbnail_data is not None + and updated_obj.thumbnail_data["frame_time"] == frame_time and frame_time not in self.frame_cache ): logger.debug( @@ -369,13 +394,30 @@ class CameraState: updated_obj.last_updated = frame_time - # if it has been more than 5 seconds since the last thumb update - # and the last update is greater than the last publish or - # the object has changed significantly or - # the object moved enough to update the path + # Determine the staleness threshold for publishing updates. + # Fast-track to 1s for objects in the optimal size range for + # secondary face/LPR recognition that don't yet have a sub_label. + obj_area = updated_obj.obj_data.get("area", 0) + obj_label = updated_obj.obj_data.get("label") + publish_threshold = 5 + + if ( + obj_label == "person" + and self.face_recognition_min_obj_area > 0 + and obj_area >= self.face_recognition_min_obj_area + and updated_obj.obj_data.get("sub_label") is None + ) or ( + obj_label in ("car", "motorcycle") + and self.lpr_min_obj_area > 0 + and obj_area >= self.lpr_min_obj_area + and updated_obj.obj_data.get("sub_label") is None + and updated_obj.obj_data.get("recognized_license_plate") is None + ): + publish_threshold = 1 + if ( ( - frame_time - updated_obj.last_published > 5 + frame_time - updated_obj.last_published > publish_threshold and updated_obj.last_updated > updated_obj.last_published ) or significant_update @@ -386,6 +428,18 @@ class CameraState: c(self.name, updated_obj, frame_name) updated_obj.last_published = frame_time + # send MQTT snapshot when object first enters a required zone, + # since the initial snapshot at creation time is blocked before + # zone evaluation has run + if updated_obj.new_zone_entered and not updated_obj.false_positive: + mqtt_required = self.camera_config.mqtt.required_zones + if mqtt_required and set(updated_obj.entered_zones) & set( + mqtt_required + ): + object_type = updated_obj.obj_data["label"] + self.send_mqtt_snapshot(updated_obj, object_type) + updated_obj.new_zone_entered = False + for id in removed_ids: # publish events to mqtt removed_obj = tracked_objects[id] @@ -397,7 +451,7 @@ class CameraState: # TODO: can i switch to looking this up and only changing when an event ends? # maintain best objects - camera_activity: dict[str, list[Any]] = { + camera_activity: dict[str, Any] = { "motion": len(motion_boxes) > 0, "objects": [], } @@ -411,10 +465,7 @@ class CameraState: sub_label = None if obj.obj_data.get("sub_label"): - if ( - obj.obj_data.get("sub_label")[0] - in self.config.model.all_attributes - ): + if obj.obj_data["sub_label"][0] in self.config.model.all_attributes: label = obj.obj_data["sub_label"][0] else: label = f"{object_type}-verified" @@ -449,14 +500,19 @@ class CameraState: # if the object is a higher score than the current best score # or the current object is older than desired, use the new object if ( - is_better_thumbnail( + current_best.thumbnail_data is not None + and obj.thumbnail_data is not None + and is_better_thumbnail( object_type, current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape, ) - or (now - current_best.thumbnail_data["frame_time"]) - > self.camera_config.best_image_timeout + or ( + current_best.thumbnail_data is not None + and (now - current_best.thumbnail_data["frame_time"]) + > self.camera_config.best_image_timeout + ) ): self.send_mqtt_snapshot(obj, object_type) else: @@ -472,7 +528,9 @@ class CameraState: if obj.thumbnail_data is not None } current_best_frames = { - obj.thumbnail_data["frame_time"] for obj in self.best_objects.values() + obj.thumbnail_data["frame_time"] + for obj in self.best_objects.values() + if obj.thumbnail_data is not None } thumb_frames_to_delete = [ t @@ -540,7 +598,7 @@ class CameraState: with open( os.path.join( CLIPS_DIR, - f"{self.camera_config.name}-{event_id}-clean.webp", + f"{self.name}-{event_id}-clean.webp", ), "wb", ) as p: @@ -549,7 +607,7 @@ class CameraState: # create thumbnail with max height of 175 and save width = int(175 * img_frame.shape[1] / img_frame.shape[0]) thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA) - thumb_path = os.path.join(THUMB_DIR, self.camera_config.name) + thumb_path = os.path.join(THUMB_DIR, self.name) os.makedirs(thumb_path, exist_ok=True) cv2.imwrite(os.path.join(thumb_path, f"{event_id}.webp"), thumb) diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 4eeb76396..27d5ef125 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -118,10 +118,21 @@ class Dispatcher: try: if command_type == "set": + # Commands that require a sub-command (mask/zone name) + sub_command_required = { + "motion_mask", + "object_mask", + "zone", + } if sub_command: self._camera_settings_handlers[command]( camera_name, sub_command, payload ) + elif command in sub_command_required: + logger.error( + "Command %s requires a sub-command (mask/zone name)", + command, + ) else: self._camera_settings_handlers[command](camera_name, payload) elif command_type == "ptz": diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index 53c223e8d..e4ed83268 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -542,9 +542,9 @@ class WebPushClient(Communicator): self.check_registrations() - reasoning: str = payload.get("reasoning", "") + text: str = payload.get("message") or payload.get("reasoning", "") title = f"{camera_name}: Monitoring Alert" - message = (reasoning[:197] + "...") if len(reasoning) > 200 else reasoning + message = (text[:197] + "...") if len(text) > 200 else text logger.debug(f"Sending camera monitoring push notification for {camera_name}") diff --git a/frigate/config/camera/detect.py b/frigate/config/camera/detect.py index c0a2e7036..71dbc3292 100644 --- a/frigate/config/camera/detect.py +++ b/frigate/config/camera/detect.py @@ -1,6 +1,6 @@ from typing import Optional -from pydantic import Field +from pydantic import Field, model_validator from ..base import FrigateBaseModel @@ -71,6 +71,7 @@ class DetectConfig(FrigateBaseModel): default=None, title="Minimum initialization frames", description="Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2.", + ge=2, ) max_disappeared: Optional[int] = Field( default=None, @@ -87,3 +88,11 @@ class DetectConfig(FrigateBaseModel): title="Annotation offset", description="Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative.", ) + + @model_validator(mode="after") + def validate_dimensions(self) -> "DetectConfig": + if (self.width is None) != (self.height is None): + raise ValueError( + "detect -> both width and height must be specified together, or both omitted" + ) + return self diff --git a/frigate/config/camera/genai.py b/frigate/config/camera/genai.py index fae0ae577..721eeb60d 100644 --- a/frigate/config/camera/genai.py +++ b/frigate/config/camera/genai.py @@ -18,8 +18,8 @@ class GenAIProviderEnum(str, Enum): class GenAIRoleEnum(str, Enum): - tools = "tools" - vision = "vision" + chat = "chat" + descriptions = "descriptions" embeddings = "embeddings" @@ -49,21 +49,21 @@ class GenAIConfig(FrigateBaseModel): roles: list[GenAIRoleEnum] = Field( default_factory=lambda: [ GenAIRoleEnum.embeddings, - GenAIRoleEnum.vision, - GenAIRoleEnum.tools, + GenAIRoleEnum.descriptions, + GenAIRoleEnum.chat, ], title="Roles", - description="GenAI roles (tools, vision, embeddings); one provider per role.", + description="GenAI roles (chat, descriptions, embeddings); one provider per role.", ) provider_options: dict[str, Any] = Field( default={}, title="Provider options", description="Additional provider-specific options to pass to the GenAI client.", - json_schema_extra={"additionalProperties": {"type": "string"}}, + json_schema_extra={"additionalProperties": {}}, ) runtime_options: dict[str, Any] = Field( default={}, title="Runtime options", description="Runtime options passed to the provider for each inference call.", - json_schema_extra={"additionalProperties": {"type": "string"}}, + json_schema_extra={"additionalProperties": {}}, ) diff --git a/frigate/config/camera/onvif.py b/frigate/config/camera/onvif.py index eb21e24bd..836dec6aa 100644 --- a/frigate/config/camera/onvif.py +++ b/frigate/config/camera/onvif.py @@ -117,6 +117,11 @@ class OnvifConfig(FrigateBaseModel): title="Disable TLS verify", description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).", ) + profile: Optional[str] = Field( + default=None, + title="ONVIF profile", + description="Specific ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically.", + ) autotracking: PtzAutotrackConfig = Field( default_factory=PtzAutotrackConfig, title="Autotracking", diff --git a/frigate/config/camera/review.py b/frigate/config/camera/review.py index ff07fb368..fbe24c98c 100644 --- a/frigate/config/camera/review.py +++ b/frigate/config/camera/review.py @@ -188,7 +188,7 @@ class ReviewConfig(FrigateBaseModel): detections: DetectionsConfig = Field( default_factory=DetectionsConfig, title="Detections config", - description="Settings for creating detection events (non-alert) and how long to keep them.", + description="Settings for which tracked objects generate detections (non-alert) and how detections are retained.", ) genai: GenAIReviewConfig = Field( default_factory=GenAIReviewConfig, diff --git a/frigate/config/camera/updater.py b/frigate/config/camera/updater.py index a55f355fb..1965f3813 100644 --- a/frigate/config/camera/updater.py +++ b/frigate/config/camera/updater.py @@ -23,6 +23,7 @@ class CameraConfigUpdateEnum(str, Enum): notifications = "notifications" objects = "objects" object_genai = "object_genai" + onvif = "onvif" record = "record" remove = "remove" # for removing a camera review = "review" @@ -31,6 +32,7 @@ class CameraConfigUpdateEnum(str, Enum): face_recognition = "face_recognition" lpr = "lpr" snapshots = "snapshots" + timestamp_style = "timestamp_style" zones = "zones" @@ -130,6 +132,10 @@ class CameraConfigUpdateSubscriber: config.lpr = updated_config elif update_type == CameraConfigUpdateEnum.snapshots: config.snapshots = updated_config + elif update_type == CameraConfigUpdateEnum.onvif: + config.onvif = updated_config + elif update_type == CameraConfigUpdateEnum.timestamp_style: + config.timestamp_style = updated_config elif update_type == CameraConfigUpdateEnum.zones: config.zones = updated_config diff --git a/frigate/config/classification.py b/frigate/config/classification.py index e507a7817..05d6edc76 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -1,7 +1,7 @@ from enum import Enum from typing import Dict, List, Optional, Union -from pydantic import ConfigDict, Field +from pydantic import ConfigDict, Field, field_validator from .base import FrigateBaseModel @@ -178,6 +178,17 @@ class SemanticSearchConfig(FrigateBaseModel): title="Semantic search model or GenAI provider name", description="The embeddings model to use for semantic search (for example 'jinav1'), or the name of a GenAI provider with the embeddings role.", ) + + @field_validator("model", mode="before") + @classmethod + def coerce_model_enum(cls, v): + if isinstance(v, str): + try: + return SemanticSearchModelEnum(v) + except ValueError: + return v + return v + model_size: str = Field( default="small", title="Model size", diff --git a/frigate/config/config.py b/frigate/config/config.py index 19d0b73a3..1d09016f6 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -25,6 +25,7 @@ from frigate.plus import PlusApi from frigate.util.builtin import ( deep_merge, get_ffmpeg_arg_list, + load_labels, ) from frigate.util.config import ( CURRENT_CONFIG_VERSION, @@ -40,7 +41,7 @@ from frigate.util.services import auto_detect_hwaccel from .auth import AuthConfig from .base import FrigateBaseModel from .camera import CameraConfig, CameraLiveConfig -from .camera.audio import AudioConfig +from .camera.audio import AudioConfig, AudioFilterConfig from .camera.birdseye import BirdseyeConfig from .camera.detect import DetectConfig from .camera.ffmpeg import FfmpegConfig @@ -473,7 +474,7 @@ class FrigateConfig(FrigateBaseModel): live: CameraLiveConfig = Field( default_factory=CameraLiveConfig, title="Live playback", - description="Settings used by the Web UI to control live stream resolution and quality.", + description="Settings to control the jsmpeg live stream resolution and quality. This does not affect restreamed cameras that use go2rtc for live view.", ) motion: Optional[MotionConfig] = Field( default=None, @@ -613,6 +614,21 @@ class FrigateConfig(FrigateBaseModel): if self.ffmpeg.hwaccel_args == "auto": self.ffmpeg.hwaccel_args = auto_detect_hwaccel() + # Populate global audio filters for all audio labels + all_audio_labels = { + label + for label in load_labels("/audio-labelmap.txt", prefill=521).values() + if label + } + + if self.audio.filters is None: + self.audio.filters = {} + + for key in sorted(all_audio_labels - self.audio.filters.keys()): + self.audio.filters[key] = AudioFilterConfig() + + self.audio.filters = dict(sorted(self.audio.filters.items())) + # Global config to propagate down to camera level global_config = self.model_dump( include={ @@ -748,7 +764,7 @@ class FrigateConfig(FrigateBaseModel): ) # Default min_initialized configuration - min_initialized = int(camera_config.detect.fps / 2) + min_initialized = max(int(camera_config.detect.fps / 2), 2) if camera_config.detect.min_initialized is None: camera_config.detect.min_initialized = min_initialized @@ -791,6 +807,16 @@ class FrigateConfig(FrigateBaseModel): camera_config.review.genai.enabled ) + if camera_config.audio.filters is None: + camera_config.audio.filters = {} + + for key in sorted(all_audio_labels - camera_config.audio.filters.keys()): + camera_config.audio.filters[key] = AudioFilterConfig() + + camera_config.audio.filters = dict( + sorted(camera_config.audio.filters.items()) + ) + # Add default filters object_keys = camera_config.objects.track if camera_config.objects.filters is None: diff --git a/frigate/config/profile_manager.py b/frigate/config/profile_manager.py index bb122cc1a..d109bdecb 100644 --- a/frigate/config/profile_manager.py +++ b/frigate/config/profile_manager.py @@ -1,7 +1,9 @@ """Profile manager for activating/deactivating named config profiles.""" import copy +import json import logging +from datetime import datetime, timezone from pathlib import Path from typing import Optional @@ -32,7 +34,7 @@ PROFILE_SECTION_UPDATES: dict[str, CameraConfigUpdateEnum] = { "zones": CameraConfigUpdateEnum.zones, } -PERSISTENCE_FILE = Path(CONFIG_DIR) / ".active_profile" +PERSISTENCE_FILE = Path(CONFIG_DIR) / ".profiles" class ProfileManager: @@ -291,25 +293,36 @@ class ProfileManager: ) def _persist_active_profile(self, profile_name: Optional[str]) -> None: - """Persist the active profile name to disk.""" + """Persist the active profile state to disk as JSON.""" try: - if profile_name is None: - PERSISTENCE_FILE.unlink(missing_ok=True) - else: - PERSISTENCE_FILE.write_text(profile_name) + data = self._load_persisted_data() + data["active"] = profile_name + if profile_name is not None: + data.setdefault("last_activated", {})[profile_name] = datetime.now( + timezone.utc + ).timestamp() + PERSISTENCE_FILE.write_text(json.dumps(data)) except OSError: logger.exception("Failed to persist active profile") @staticmethod - def load_persisted_profile() -> Optional[str]: - """Load the persisted active profile name from disk.""" + def _load_persisted_data() -> dict: + """Load the full persisted profile data from disk.""" try: if PERSISTENCE_FILE.exists(): - name = PERSISTENCE_FILE.read_text().strip() - return name if name else None - except OSError: - logger.exception("Failed to load persisted profile") - return None + raw = PERSISTENCE_FILE.read_text().strip() + if raw: + return json.loads(raw) + except (OSError, json.JSONDecodeError): + logger.exception("Failed to load persisted profile data") + return {"active": None, "last_activated": {}} + + @staticmethod + def load_persisted_profile() -> Optional[str]: + """Load the persisted active profile name from disk.""" + data = ProfileManager._load_persisted_data() + name = data.get("active") + return name if name else None def get_base_configs_for_api(self, camera_name: str) -> dict[str, dict]: """Return base (pre-profile) section configs for a camera. @@ -328,7 +341,9 @@ class ProfileManager: def get_profile_info(self) -> dict: """Get profile state info for API responses.""" + data = self._load_persisted_data() return { "profiles": self.get_available_profiles(), "active_profile": self.config.active_profile, + "last_activated": data.get("last_activated", {}), } diff --git a/frigate/data_processing/common/audio_transcription/model.py b/frigate/data_processing/common/audio_transcription/model.py index 82472ad62..a610ca9e9 100644 --- a/frigate/data_processing/common/audio_transcription/model.py +++ b/frigate/data_processing/common/audio_transcription/model.py @@ -53,7 +53,7 @@ class AudioTranscriptionModelRunner: self.downloader = ModelDownloader( model_name="sherpa-onnx", download_path=download_path, - file_names=self.model_files.keys(), + file_names=list(self.model_files.keys()), download_func=self.__download_models, ) self.downloader.ensure_model_files() diff --git a/frigate/data_processing/common/face/model.py b/frigate/data_processing/common/face/model.py index 51ee64938..45e8b8939 100644 --- a/frigate/data_processing/common/face/model.py +++ b/frigate/data_processing/common/face/model.py @@ -21,7 +21,7 @@ class FaceRecognizer(ABC): def __init__(self, config: FrigateConfig) -> None: self.config = config - self.landmark_detector: cv2.face.FacemarkLBF = None + self.landmark_detector: cv2.face.Facemark | None = None self.init_landmark_detector() @abstractmethod @@ -38,13 +38,14 @@ class FaceRecognizer(ABC): def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: pass - @redirect_output_to_logger(logger, logging.DEBUG) + @redirect_output_to_logger(logger, logging.DEBUG) # type: ignore[misc] def init_landmark_detector(self) -> None: landmark_model = os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") if os.path.exists(landmark_model): - self.landmark_detector = cv2.face.createFacemarkLBF() - self.landmark_detector.loadModel(landmark_model) + landmark_detector = cv2.face.createFacemarkLBF() + landmark_detector.loadModel(landmark_model) + self.landmark_detector = landmark_detector def align_face( self, @@ -52,8 +53,10 @@ class FaceRecognizer(ABC): output_width: int, output_height: int, ) -> np.ndarray: - # landmark is run on grayscale images + if not self.landmark_detector: + raise ValueError("Landmark detector not initialized") + # landmark is run on grayscale images if image.ndim == 3: land_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: @@ -131,8 +134,11 @@ class FaceRecognizer(ABC): def similarity_to_confidence( - cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12 -): + cosine_similarity: float, + median: float = 0.3, + range_width: float = 0.6, + slope_factor: float = 12, +) -> float: """ Default sigmoid function to map cosine similarity to confidence. @@ -151,14 +157,14 @@ def similarity_to_confidence( bias = median # Calculate confidence - confidence = 1 / (1 + np.exp(-slope * (cosine_similarity - bias))) + confidence: float = 1 / (1 + np.exp(-slope * (cosine_similarity - bias))) return confidence class FaceNetRecognizer(FaceRecognizer): def __init__(self, config: FrigateConfig): super().__init__(config) - self.mean_embs: dict[int, np.ndarray] = {} + self.mean_embs: dict[str, np.ndarray] = {} self.face_embedder: FaceNetEmbedding = FaceNetEmbedding() self.model_builder_queue: queue.Queue | None = None @@ -168,7 +174,7 @@ class FaceNetRecognizer(FaceRecognizer): def run_build_task(self) -> None: self.model_builder_queue = queue.Queue() - def build_model(): + def build_model() -> None: face_embeddings_map: dict[str, list[np.ndarray]] = {} idx = 0 @@ -187,7 +193,7 @@ class FaceNetRecognizer(FaceRecognizer): img = cv2.imread(os.path.join(face_folder, image)) if img is None: - continue + continue # type: ignore[unreachable] img = self.align_face(img, img.shape[1], img.shape[0]) emb = self.face_embedder([img])[0].squeeze() @@ -195,12 +201,13 @@ class FaceNetRecognizer(FaceRecognizer): idx += 1 + assert self.model_builder_queue is not None self.model_builder_queue.put(face_embeddings_map) thread = threading.Thread(target=build_model, daemon=True) thread.start() - def build(self): + def build(self) -> None: if not self.landmark_detector: self.init_landmark_detector() return None @@ -226,7 +233,7 @@ class FaceNetRecognizer(FaceRecognizer): logger.debug("Finished building ArcFace model") - def classify(self, face_image): + def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: if not self.landmark_detector: return None @@ -245,7 +252,7 @@ class FaceNetRecognizer(FaceRecognizer): img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) embedding = self.face_embedder([img])[0].squeeze() - score = 0 + score: float = 0 label = "" for name, mean_emb in self.mean_embs.items(): @@ -268,7 +275,7 @@ class FaceNetRecognizer(FaceRecognizer): class ArcFaceRecognizer(FaceRecognizer): def __init__(self, config: FrigateConfig): super().__init__(config) - self.mean_embs: dict[int, np.ndarray] = {} + self.mean_embs: dict[str, np.ndarray] = {} self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding(config.face_recognition) self.model_builder_queue: queue.Queue | None = None @@ -278,7 +285,7 @@ class ArcFaceRecognizer(FaceRecognizer): def run_build_task(self) -> None: self.model_builder_queue = queue.Queue() - def build_model(): + def build_model() -> None: face_embeddings_map: dict[str, list[np.ndarray]] = {} idx = 0 @@ -297,20 +304,21 @@ class ArcFaceRecognizer(FaceRecognizer): img = cv2.imread(os.path.join(face_folder, image)) if img is None: - continue + continue # type: ignore[unreachable] img = self.align_face(img, img.shape[1], img.shape[0]) - emb = self.face_embedder([img])[0].squeeze() + emb = self.face_embedder([img])[0].squeeze() # type: ignore[arg-type] face_embeddings_map[name].append(emb) idx += 1 + assert self.model_builder_queue is not None self.model_builder_queue.put(face_embeddings_map) thread = threading.Thread(target=build_model, daemon=True) thread.start() - def build(self): + def build(self) -> None: if not self.landmark_detector: self.init_landmark_detector() return None @@ -336,7 +344,7 @@ class ArcFaceRecognizer(FaceRecognizer): logger.debug("Finished building ArcFace model") - def classify(self, face_image): + def classify(self, face_image: np.ndarray) -> tuple[str, float] | None: if not self.landmark_detector: return None @@ -353,9 +361,9 @@ class ArcFaceRecognizer(FaceRecognizer): # align face and run recognition img = self.align_face(face_image, face_image.shape[1], face_image.shape[0]) - embedding = self.face_embedder([img])[0].squeeze() + embedding = self.face_embedder([img])[0].squeeze() # type: ignore[arg-type] - score = 0 + score: float = 0 label = "" for name, mean_emb in self.mean_embs.items(): diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index e4fbd1172..f767a5c2f 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -10,7 +10,7 @@ import random import re import string from pathlib import Path -from typing import Any, List, Optional, Tuple +from typing import Any, List, Tuple import cv2 import numpy as np @@ -22,19 +22,35 @@ from frigate.comms.event_metadata_updater import ( EventMetadataPublisher, EventMetadataTypeEnum, ) +from frigate.comms.inter_process import InterProcessRequestor +from frigate.config import FrigateConfig +from frigate.config.classification import LicensePlateRecognitionConfig from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR +from frigate.data_processing.common.license_plate.model import LicensePlateModelRunner from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed from frigate.util.image import area +from ...types import DataProcessorMetrics + logger = logging.getLogger(__name__) WRITE_DEBUG_IMAGES = False class LicensePlateProcessingMixin: - def __init__(self, *args, **kwargs): + # Attributes expected from consuming classes (set before super().__init__) + config: FrigateConfig + metrics: DataProcessorMetrics + model_runner: LicensePlateModelRunner + lpr_config: LicensePlateRecognitionConfig + requestor: InterProcessRequestor + detected_license_plates: dict[str, dict[str, Any]] + camera_current_cars: dict[str, list[str]] + sub_label_publisher: EventMetadataPublisher + + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.plate_rec_speed = InferenceSpeed(self.metrics.alpr_speed) self.plates_rec_second = EventsPerSecond() @@ -97,7 +113,7 @@ class LicensePlateProcessingMixin: ) try: - outputs = self.model_runner.detection_model([normalized_image])[0] + outputs = self.model_runner.detection_model([normalized_image])[0] # type: ignore[arg-type] except Exception as e: logger.warning(f"Error running LPR box detection model: {e}") return [] @@ -105,18 +121,18 @@ class LicensePlateProcessingMixin: outputs = outputs[0, :, :] if False: - current_time = int(datetime.datetime.now().timestamp()) + current_time = int(datetime.datetime.now().timestamp()) # type: ignore[unreachable] cv2.imwrite( f"debug/frames/probability_map_{current_time}.jpg", (outputs * 255).astype(np.uint8), ) boxes, _ = self._boxes_from_bitmap(outputs, outputs > self.mask_thresh, w, h) - return self._filter_polygon(boxes, (h, w)) + return self._filter_polygon(boxes, (h, w)) # type: ignore[return-value,arg-type] def _classify( self, images: List[np.ndarray] - ) -> Tuple[List[np.ndarray], List[Tuple[str, float]]]: + ) -> Tuple[List[np.ndarray], List[Tuple[str, float]]] | None: """ Classify the orientation or category of each detected license plate. @@ -138,15 +154,15 @@ class LicensePlateProcessingMixin: norm_images.append(norm_img) try: - outputs = self.model_runner.classification_model(norm_images) + outputs = self.model_runner.classification_model(norm_images) # type: ignore[arg-type] except Exception as e: logger.warning(f"Error running LPR classification model: {e}") - return + return None return self._process_classification_output(images, outputs) def _recognize( - self, camera: string, images: List[np.ndarray] + self, camera: str, images: List[np.ndarray] ) -> Tuple[List[str], List[List[float]]]: """ Recognize the characters on the detected license plates using the recognition model. @@ -179,7 +195,7 @@ class LicensePlateProcessingMixin: norm_images.append(norm_image) try: - outputs = self.model_runner.recognition_model(norm_images) + outputs = self.model_runner.recognition_model(norm_images) # type: ignore[arg-type] except Exception as e: logger.warning(f"Error running LPR recognition model: {e}") return [], [] @@ -410,7 +426,8 @@ class LicensePlateProcessingMixin: ) if sorted_data: - return map(list, zip(*sorted_data)) + plates, confs, areas_list = zip(*sorted_data) + return list(plates), list(confs), list(areas_list) return [], [], [] @@ -532,7 +549,7 @@ class LicensePlateProcessingMixin: # Add the last box merged_boxes.append(current_box) - return np.array(merged_boxes, dtype=np.int32) + return np.array(merged_boxes, dtype=np.int32) # type: ignore[return-value] def _boxes_from_bitmap( self, output: np.ndarray, mask: np.ndarray, dest_width: int, dest_height: int @@ -560,38 +577,42 @@ class LicensePlateProcessingMixin: boxes = [] scores = [] - for index in range(len(contours)): - contour = contours[index] + for index in range(len(contours)): # type: ignore[arg-type] + contour = contours[index] # type: ignore[index] # get minimum bounding box (rotated rectangle) around the contour and the smallest side length. points, sside = self._get_min_boxes(contour) if sside < self.min_size: continue - points = np.array(points, dtype=np.float32) + points = np.array(points, dtype=np.float32) # type: ignore[assignment] score = self._box_score(output, contour) if self.box_thresh > score: continue - points = self._expand_box(points) + points = self._expand_box(points) # type: ignore[assignment] # Get the minimum area rectangle again after expansion - points, sside = self._get_min_boxes(points.reshape(-1, 1, 2)) + points, sside = self._get_min_boxes(points.reshape(-1, 1, 2)) # type: ignore[attr-defined] if sside < self.min_size + 2: continue - points = np.array(points, dtype=np.float32) + points = np.array(points, dtype=np.float32) # type: ignore[assignment] # normalize and clip box coordinates to fit within the destination image size. - points[:, 0] = np.clip( - np.round(points[:, 0] / width * dest_width), 0, dest_width + points[:, 0] = np.clip( # type: ignore[call-overload] + np.round(points[:, 0] / width * dest_width), # type: ignore[call-overload] + 0, + dest_width, ) - points[:, 1] = np.clip( - np.round(points[:, 1] / height * dest_height), 0, dest_height + points[:, 1] = np.clip( # type: ignore[call-overload] + np.round(points[:, 1] / height * dest_height), # type: ignore[call-overload] + 0, + dest_height, ) - boxes.append(points.astype("int32")) + boxes.append(points.astype("int32")) # type: ignore[attr-defined] scores.append(score) return np.array(boxes, dtype="int32"), scores @@ -632,7 +653,7 @@ class LicensePlateProcessingMixin: x1, y1 = np.clip(contour.min(axis=0), 0, [w - 1, h - 1]) x2, y2 = np.clip(contour.max(axis=0), 0, [w - 1, h - 1]) mask = np.zeros((y2 - y1 + 1, x2 - x1 + 1), dtype=np.uint8) - cv2.fillPoly(mask, [contour - [x1, y1]], 1) + cv2.fillPoly(mask, [contour - [x1, y1]], 1) # type: ignore[call-overload] return cv2.mean(bitmap[y1 : y2 + 1, x1 : x2 + 1], mask)[0] @staticmethod @@ -690,7 +711,7 @@ class LicensePlateProcessingMixin: Returns: bool: Whether the polygon is valid or not. """ - return ( + return bool( point[:, 0].min() >= 0 and point[:, 0].max() < width and point[:, 1].min() >= 0 @@ -735,7 +756,7 @@ class LicensePlateProcessingMixin: return np.array([tl, tr, br, bl]) @staticmethod - def _sort_boxes(boxes): + def _sort_boxes(boxes: list[np.ndarray]) -> list[np.ndarray]: """ Sort polygons based on their position in the image. If boxes are close in vertical position (within 5 pixels), sort them by horizontal position. @@ -837,16 +858,16 @@ class LicensePlateProcessingMixin: results = [["", 0.0]] * len(images) indices = np.argsort(np.array([x.shape[1] / x.shape[0] for x in images])) - outputs = np.stack(outputs) + stacked_outputs = np.stack(outputs) - outputs = [ - (labels[idx], outputs[i, idx]) - for i, idx in enumerate(outputs.argmax(axis=1)) + stacked_outputs = [ + (labels[idx], stacked_outputs[i, idx]) + for i, idx in enumerate(stacked_outputs.argmax(axis=1)) ] for i in range(0, len(images), self.batch_size): - for j in range(len(outputs)): - label, score = outputs[j] + for j in range(len(stacked_outputs)): + label, score = stacked_outputs[j] results[indices[i + j]] = [label, score] # make sure we have high confidence if we need to flip a box if "180" in label and score >= 0.7: @@ -854,10 +875,10 @@ class LicensePlateProcessingMixin: images[indices[i + j]], cv2.ROTATE_180 ) - return images, results + return images, results # type: ignore[return-value] def _preprocess_recognition_image( - self, camera: string, image: np.ndarray, max_wh_ratio: float + self, camera: str, image: np.ndarray, max_wh_ratio: float ) -> np.ndarray: """ Preprocess an image for recognition by dynamically adjusting its width. @@ -925,7 +946,7 @@ class LicensePlateProcessingMixin: input_w = int(input_h * max_wh_ratio) # check for model-specific input width - model_input_w = self.model_runner.recognition_model.runner.get_input_width() + model_input_w = self.model_runner.recognition_model.runner.get_input_width() # type: ignore[union-attr] if isinstance(model_input_w, int) and model_input_w > 0: input_w = model_input_w @@ -945,7 +966,7 @@ class LicensePlateProcessingMixin: padded_image[:, :, :resized_w] = resized_image if False: - current_time = int(datetime.datetime.now().timestamp() * 1000) + current_time = int(datetime.datetime.now().timestamp() * 1000) # type: ignore[unreachable] cv2.imwrite( f"debug/frames/preprocessed_recognition_{current_time}.jpg", image, @@ -983,8 +1004,9 @@ class LicensePlateProcessingMixin: np.linalg.norm(points[1] - points[2]), ) ) - pts_std = np.float32( - [[0, 0], [crop_width, 0], [crop_width, crop_height], [0, crop_height]] + pts_std = np.array( + [[0, 0], [crop_width, 0], [crop_width, crop_height], [0, crop_height]], + dtype=np.float32, ) matrix = cv2.getPerspectiveTransform(points, pts_std) image = cv2.warpPerspective( @@ -1000,15 +1022,15 @@ class LicensePlateProcessingMixin: return image def _detect_license_plate( - self, camera: string, input: np.ndarray - ) -> tuple[int, int, int, int]: + self, camera: str, input: np.ndarray + ) -> tuple[int, int, int, int] | None: """ Use a lightweight YOLOv9 model to detect license plates for users without Frigate+ Return the dimensions of the detected plate as [x1, y1, x2, y2]. """ try: - predictions = self.model_runner.yolov9_detection_model(input) + predictions = self.model_runner.yolov9_detection_model(input) # type: ignore[arg-type] except Exception as e: logger.warning(f"Error running YOLOv9 license plate detection model: {e}") return None @@ -1073,7 +1095,7 @@ class LicensePlateProcessingMixin: logger.debug( f"{camera}: Found license plate. Bounding box: {expanded_box.astype(int)}" ) - return tuple(expanded_box.astype(int)) + return tuple(int(x) for x in expanded_box) # type: ignore[return-value] else: return None # No detection above the threshold @@ -1097,7 +1119,7 @@ class LicensePlateProcessingMixin: f" Variant {i + 1}: '{p['plate']}' (conf: {p['conf']:.3f}, area: {p['area']})" ) - clusters = [] + clusters: list[list[dict[str, Any]]] = [] for i, plate in enumerate(plates): merged = False for j, cluster in enumerate(clusters): @@ -1132,7 +1154,7 @@ class LicensePlateProcessingMixin: ) # Best cluster: largest size, tiebroken by max conf - def cluster_score(c): + def cluster_score(c: list[dict[str, Any]]) -> tuple[int, float]: return (len(c), max(v["conf"] for v in c)) best_cluster_idx = max( @@ -1178,7 +1200,7 @@ class LicensePlateProcessingMixin: def lpr_process( self, obj_data: dict[str, Any], frame: np.ndarray, dedicated_lpr: bool = False - ): + ) -> None: """Look for license plates in image.""" self.metrics.alpr_pps.value = self.plates_rec_second.eps() self.metrics.yolov9_lpr_pps.value = self.plates_det_second.eps() @@ -1195,7 +1217,7 @@ class LicensePlateProcessingMixin: rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) # apply motion mask - rgb[self.config.cameras[obj_data].motion.rasterized_mask == 0] = [0, 0, 0] + rgb[self.config.cameras[camera].motion.rasterized_mask == 0] = [0, 0, 0] # type: ignore[attr-defined] if WRITE_DEBUG_IMAGES: cv2.imwrite( @@ -1261,7 +1283,7 @@ class LicensePlateProcessingMixin: "stationary", False ): logger.debug( - f"{camera}: Skipping LPR for non-stationary {obj_data['label']} object {id} with no position changes. (Detected in {self.config.cameras[camera].detect.min_initialized + 1} concurrent frames, threshold to run is {self.config.cameras[camera].detect.min_initialized + 2} frames)" + f"{camera}: Skipping LPR for non-stationary {obj_data['label']} object {id} with no position changes. (Detected in {self.config.cameras[camera].detect.min_initialized + 1} concurrent frames, threshold to run is {self.config.cameras[camera].detect.min_initialized + 2} frames)" # type: ignore[operator] ) return @@ -1288,7 +1310,7 @@ class LicensePlateProcessingMixin: if time_since_stationary > self.stationary_scan_duration: return - license_plate: Optional[dict[str, Any]] = None + license_plate = None if "license_plate" not in self.config.cameras[camera].objects.track: logger.debug(f"{camera}: Running manual license_plate detection.") @@ -1301,7 +1323,7 @@ class LicensePlateProcessingMixin: rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) # apply motion mask - rgb[self.config.cameras[camera].motion.rasterized_mask == 0] = [0, 0, 0] + rgb[self.config.cameras[camera].motion.rasterized_mask == 0] = [0, 0, 0] # type: ignore[attr-defined] left, top, right, bottom = car_box car = rgb[top:bottom, left:right] @@ -1378,10 +1400,10 @@ class LicensePlateProcessingMixin: if attr.get("label") != "license_plate": continue - if license_plate is None or attr.get( + if license_plate is None or attr.get( # type: ignore[unreachable] "score", 0.0 ) > license_plate.get("score", 0.0): - license_plate = attr + license_plate = attr # type: ignore[assignment] # no license plates detected in this frame if not license_plate: @@ -1389,9 +1411,9 @@ class LicensePlateProcessingMixin: # we are using dedicated lpr with frigate+ if obj_data.get("label") == "license_plate": - license_plate = obj_data + license_plate = obj_data # type: ignore[assignment] - license_plate_box = license_plate.get("box") + license_plate_box = license_plate.get("box") # type: ignore[attr-defined] # check that license plate is valid if ( @@ -1420,7 +1442,7 @@ class LicensePlateProcessingMixin: 0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2 ) - plate_box = tuple(int(x) for x in expanded_box) + plate_box = tuple(int(x) for x in expanded_box) # type: ignore[assignment] # Crop using the expanded box license_plate_frame = license_plate_frame[ @@ -1596,7 +1618,7 @@ class LicensePlateProcessingMixin: sub_label = next( ( label - for label, plates_list in self.lpr_config.known_plates.items() + for label, plates_list in self.lpr_config.known_plates.items() # type: ignore[union-attr] if any( re.match(f"^{plate}$", rep_plate) or Levenshtein.distance(plate, rep_plate) @@ -1649,14 +1671,16 @@ class LicensePlateProcessingMixin: frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) _, encoded_img = cv2.imencode(".jpg", frame_bgr) self.sub_label_publisher.publish( - (base64.b64encode(encoded_img).decode("ASCII"), id, camera), + (base64.b64encode(encoded_img.tobytes()).decode("ASCII"), id, camera), EventMetadataTypeEnum.save_lpr_snapshot.value, ) - def handle_request(self, topic, request_data) -> dict[str, Any] | None: - return + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: + return None - def lpr_expire(self, object_id: str, camera: str): + def lpr_expire(self, object_id: str, camera: str) -> None: if object_id in self.detected_license_plates: self.detected_license_plates.pop(object_id) @@ -1673,7 +1697,7 @@ class CTCDecoder: for each decoded character sequence. """ - def __init__(self, character_dict_path=None): + def __init__(self, character_dict_path: str | None = None) -> None: """ Initializes the CTCDecoder. :param character_dict_path: Path to the character dictionary file. diff --git a/frigate/data_processing/common/license_plate/model.py b/frigate/data_processing/common/license_plate/model.py index f53ed7d95..f7121e65d 100644 --- a/frigate/data_processing/common/license_plate/model.py +++ b/frigate/data_processing/common/license_plate/model.py @@ -1,3 +1,4 @@ +from frigate.comms.inter_process import InterProcessRequestor from frigate.embeddings.onnx.lpr_embedding import ( LicensePlateDetector, PaddleOCRClassification, @@ -9,7 +10,12 @@ from ...types import DataProcessorModelRunner class LicensePlateModelRunner(DataProcessorModelRunner): - def __init__(self, requestor, device: str = "CPU", model_size: str = "small"): + def __init__( + self, + requestor: InterProcessRequestor, + device: str = "CPU", + model_size: str = "small", + ): super().__init__(requestor, device, model_size) self.detection_model = PaddleOCRDetection( model_size=model_size, requestor=requestor, device=device diff --git a/frigate/data_processing/post/api.py b/frigate/data_processing/post/api.py index 2c1359d96..044e5d245 100644 --- a/frigate/data_processing/post/api.py +++ b/frigate/data_processing/post/api.py @@ -17,7 +17,7 @@ class PostProcessorApi(ABC): self, config: FrigateConfig, metrics: DataProcessorMetrics, - model_runner: DataProcessorModelRunner, + model_runner: DataProcessorModelRunner | None, ) -> None: self.config = config self.metrics = metrics @@ -41,7 +41,7 @@ class PostProcessorApi(ABC): @abstractmethod def handle_request( self, topic: str, request_data: dict[str, Any] - ) -> dict[str, Any] | None: + ) -> dict[str, Any] | str | None: """Handle metadata requests. Args: request_data (dict): containing data about requested change to process. diff --git a/frigate/data_processing/post/audio_transcription.py b/frigate/data_processing/post/audio_transcription.py index 558ab433e..dbeb21028 100644 --- a/frigate/data_processing/post/audio_transcription.py +++ b/frigate/data_processing/post/audio_transcription.py @@ -4,7 +4,7 @@ import logging import os import threading import time -from typing import Optional +from typing import Any, Optional from peewee import DoesNotExist @@ -17,6 +17,7 @@ from frigate.const import ( UPDATE_EVENT_DESCRIPTION, ) from frigate.data_processing.types import PostProcessDataEnum +from frigate.embeddings.embeddings import Embeddings from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.audio import get_audio_from_recording @@ -31,7 +32,7 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): self, config: FrigateConfig, requestor: InterProcessRequestor, - embeddings, + embeddings: Embeddings, metrics: DataProcessorMetrics, ): super().__init__(config, metrics, None) @@ -40,7 +41,7 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): self.embeddings = embeddings self.recognizer = None self.transcription_lock = threading.Lock() - self.transcription_thread = None + self.transcription_thread: threading.Thread | None = None self.transcription_running = False # faster-whisper handles model downloading automatically @@ -69,7 +70,7 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): self.recognizer = None def process_data( - self, data: dict[str, any], data_type: PostProcessDataEnum + self, data: dict[str, Any], data_type: PostProcessDataEnum ) -> None: """Transcribe audio from a recording. @@ -141,13 +142,13 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): except Exception as e: logger.error(f"Error in audio transcription post-processing: {e}") - def __transcribe_audio(self, audio_data: bytes) -> Optional[tuple[str, float]]: + def __transcribe_audio(self, audio_data: bytes) -> Optional[str]: """Transcribe WAV audio data using faster-whisper.""" if not self.recognizer: logger.debug("Recognizer not initialized") return None - try: + try: # type: ignore[unreachable] # Save audio data to a temporary wav (faster-whisper expects a file) temp_wav = os.path.join(CACHE_DIR, f"temp_audio_{int(time.time())}.wav") with open(temp_wav, "wb") as f: @@ -176,7 +177,7 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): logger.error(f"Error transcribing audio: {e}") return None - def _transcription_wrapper(self, event: dict[str, any]) -> None: + def _transcription_wrapper(self, event: dict[str, Any]) -> None: """Wrapper to run transcription and reset running flag when done.""" try: self.process_data( @@ -194,7 +195,7 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): self.requestor.send_data(UPDATE_AUDIO_TRANSCRIPTION_STATE, "idle") - def handle_request(self, topic: str, request_data: dict[str, any]) -> str | None: + def handle_request(self, topic: str, request_data: dict[str, Any]) -> str | None: if topic == "transcribe_audio": event = request_data["event"] diff --git a/frigate/data_processing/post/license_plate.py b/frigate/data_processing/post/license_plate.py index 6f5149b9f..aa89aeb12 100644 --- a/frigate/data_processing/post/license_plate.py +++ b/frigate/data_processing/post/license_plate.py @@ -29,7 +29,7 @@ from .api import PostProcessorApi logger = logging.getLogger(__name__) -class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): +class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): # type: ignore[misc] def __init__( self, config: FrigateConfig, @@ -71,7 +71,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): # don't run LPR post processing for now return - event_id = data["event_id"] + event_id = data["event_id"] # type: ignore[unreachable] camera_name = data["camera"] if data_type == PostProcessDataEnum.recording: @@ -225,7 +225,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): logger.debug(f"Post processing plate: {event_id}, {frame_time}") self.lpr_process(keyframe_obj_data, frame) - def handle_request(self, topic, request_data) -> dict[str, Any] | None: + def handle_request(self, topic: str, request_data: dict) -> dict[str, Any] | None: if topic == EmbeddingsRequestEnum.reprocess_plate.value: event = request_data["event"] @@ -242,3 +242,5 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): "message": "Successfully requested reprocessing of license plate.", "success": True, } + + return None diff --git a/frigate/data_processing/post/object_descriptions.py b/frigate/data_processing/post/object_descriptions.py index 65ab6f7c3..babdb7252 100644 --- a/frigate/data_processing/post/object_descriptions.py +++ b/frigate/data_processing/post/object_descriptions.py @@ -16,7 +16,7 @@ from frigate.config import CameraConfig, FrigateConfig from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION from frigate.data_processing.post.semantic_trigger import SemanticTriggerProcessor from frigate.data_processing.types import PostProcessDataEnum -from frigate.genai import GenAIClient +from frigate.genai.manager import GenAIClientManager from frigate.models import Event from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed @@ -24,7 +24,7 @@ from frigate.util.file import get_event_thumbnail_bytes, load_event_snapshot_ima from frigate.util.image import create_thumbnail, ensure_jpeg_bytes if TYPE_CHECKING: - from frigate.embeddings import Embeddings + from frigate.embeddings.embeddings import Embeddings from ..post.api import PostProcessorApi from ..types import DataProcessorMetrics @@ -41,7 +41,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): embeddings: "Embeddings", requestor: InterProcessRequestor, metrics: DataProcessorMetrics, - client: GenAIClient, + genai_manager: GenAIClientManager, semantic_trigger_processor: SemanticTriggerProcessor | None, ): super().__init__(config, metrics, None) @@ -49,7 +49,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): self.embeddings = embeddings self.requestor = requestor self.metrics = metrics - self.genai_client = client + self.genai_manager = genai_manager self.semantic_trigger_processor = semantic_trigger_processor self.tracked_events: dict[str, list[Any]] = {} self.early_request_sent: dict[str, bool] = {} @@ -139,7 +139,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): ): self._process_genai_description(event, camera_config, thumbnail) else: - self.cleanup_event(event.id) + self.cleanup_event(str(event.id)) def __regenerate_description(self, event_id: str, source: str, force: bool) -> None: """Regenerate the description for an event.""" @@ -149,17 +149,17 @@ class ObjectDescriptionProcessor(PostProcessorApi): logger.error(f"Event {event_id} not found for description regeneration") return - if self.genai_client is None: - logger.error("GenAI not enabled") - return - - camera_config = self.config.cameras[event.camera] + camera_config = self.config.cameras[str(event.camera)] if not camera_config.objects.genai.enabled and not force: logger.error(f"GenAI not enabled for camera {event.camera}") return thumbnail = get_event_thumbnail_bytes(event) + if thumbnail is None: + logger.error("No thumbnail available for %s", event.id) + return + # ensure we have a jpeg to pass to the model thumbnail = ensure_jpeg_bytes(thumbnail) @@ -187,7 +187,9 @@ class ObjectDescriptionProcessor(PostProcessorApi): ) ) - self._genai_embed_description(event, embed_image) + self._genai_embed_description( + event, [img for img in embed_image if img is not None] + ) def process_data(self, frame_data: dict, data_type: PostProcessDataEnum) -> None: """Process a frame update.""" @@ -196,6 +198,9 @@ class ObjectDescriptionProcessor(PostProcessorApi): if data_type != PostProcessDataEnum.tracked_object: return + if self.genai_manager.description_client is None: + return + state: str | None = frame_data.get("state", None) if state is not None: @@ -241,7 +246,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): # Crop snapshot based on region # provide full image if region doesn't exist (manual events) height, width = img.shape[:2] - x1_rel, y1_rel, width_rel, height_rel = event.data.get( + x1_rel, y1_rel, width_rel, height_rel = event.data.get( # type: ignore[attr-defined] "region", [0, 0, 1, 1] ) x1, y1 = int(x1_rel * width), int(y1_rel * height) @@ -258,14 +263,16 @@ class ObjectDescriptionProcessor(PostProcessorApi): return None def _process_genai_description( - self, event: Event, camera_config: CameraConfig, thumbnail + self, event: Event, camera_config: CameraConfig, thumbnail: bytes ) -> None: + event_id = str(event.id) + if event.has_snapshot and camera_config.objects.genai.use_snapshot: snapshot_image = self._read_and_crop_snapshot(event) if not snapshot_image: return - num_thumbnails = len(self.tracked_events.get(event.id, [])) + num_thumbnails = len(self.tracked_events.get(event_id, [])) # ensure we have a jpeg to pass to the model thumbnail = ensure_jpeg_bytes(thumbnail) @@ -277,7 +284,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): else ( [ data["thumbnail"][:] if data.get("thumbnail") else None - for data in self.tracked_events[event.id] + for data in self.tracked_events[event_id] if data.get("thumbnail") ] if num_thumbnails > 0 @@ -286,22 +293,22 @@ class ObjectDescriptionProcessor(PostProcessorApi): ) if camera_config.objects.genai.debug_save_thumbnails and num_thumbnails > 0: - logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}") + logger.debug(f"Saving {num_thumbnails} thumbnails for event {event_id}") - Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir( + Path(os.path.join(CLIPS_DIR, f"genai-requests/{event_id}")).mkdir( parents=True, exist_ok=True ) - for idx, data in enumerate(self.tracked_events[event.id], 1): + for idx, data in enumerate(self.tracked_events[event_id], 1): jpg_bytes: bytes | None = data["thumbnail"] if jpg_bytes is None: - logger.warning(f"Unable to save thumbnail {idx} for {event.id}.") + logger.warning(f"Unable to save thumbnail {idx} for {event_id}.") else: with open( os.path.join( CLIPS_DIR, - f"genai-requests/{event.id}/{idx}.jpg", + f"genai-requests/{event_id}/{idx}.jpg", ), "wb", ) as j: @@ -310,7 +317,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): # Generate the description. Call happens in a thread since it is network bound. threading.Thread( target=self._genai_embed_description, - name=f"_genai_embed_description_{event.id}", + name=f"_genai_embed_description_{event_id}", daemon=True, args=( event, @@ -319,13 +326,18 @@ class ObjectDescriptionProcessor(PostProcessorApi): ).start() # Clean up tracked events and early request state - self.cleanup_event(event.id) + self.cleanup_event(event_id) def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None: """Embed the description for an event.""" start = datetime.datetime.now().timestamp() - camera_config = self.config.cameras[event.camera] - description = self.genai_client.generate_object_description( + camera_config = self.config.cameras[str(event.camera)] + client = self.genai_manager.description_client + + if client is None: + return + + description = client.generate_object_description( camera_config, thumbnails, event ) @@ -346,7 +358,7 @@ class ObjectDescriptionProcessor(PostProcessorApi): # Embed the description if self.config.semantic_search.enabled: - self.embeddings.embed_description(event.id, description) + self.embeddings.embed_description(str(event.id), description) # Check semantic trigger for this description if self.semantic_trigger_processor is not None: diff --git a/frigate/data_processing/post/review_descriptions.py b/frigate/data_processing/post/review_descriptions.py index 57bf0f7d1..4bb2deac2 100644 --- a/frigate/data_processing/post/review_descriptions.py +++ b/frigate/data_processing/post/review_descriptions.py @@ -22,6 +22,7 @@ from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION from frigate.data_processing.types import PostProcessDataEnum from frigate.genai import GenAIClient +from frigate.genai.manager import GenAIClientManager from frigate.models import Recordings, ReviewSegment from frigate.util.builtin import EventsPerSecond, InferenceSpeed from frigate.util.image import get_image_from_recording @@ -41,15 +42,15 @@ class ReviewDescriptionProcessor(PostProcessorApi): config: FrigateConfig, requestor: InterProcessRequestor, metrics: DataProcessorMetrics, - client: GenAIClient, + genai_manager: GenAIClientManager, ): super().__init__(config, metrics, None) self.requestor = requestor self.metrics = metrics - self.genai_client = client + self.genai_manager = genai_manager self.review_desc_speed = InferenceSpeed(self.metrics.review_desc_speed) - self.review_descs_dps = EventsPerSecond() - self.review_descs_dps.start() + self.review_desc_dps = EventsPerSecond() + self.review_desc_dps.start() def calculate_frame_count( self, @@ -59,16 +60,25 @@ class ReviewDescriptionProcessor(PostProcessorApi): ) -> int: """Calculate optimal number of frames based on context size, image source, and resolution. - Token usage varies by resolution: larger images (ultrawide aspect ratios) use more tokens. + Token usage varies by resolution: larger images (ultra-wide aspect ratios) use more tokens. Estimates ~1 token per 1250 pixels. Targets 98% context utilization with safety margin. Capped at 20 frames. """ - context_size = self.genai_client.get_context_size() + client = self.genai_manager.description_client + + if client is None: + return 3 + + context_size = client.get_context_size() camera_config = self.config.cameras[camera] detect_width = camera_config.detect.width detect_height = camera_config.detect.height - aspect_ratio = detect_width / detect_height + + if not detect_width or not detect_height: + aspect_ratio = 16 / 9 + else: + aspect_ratio = detect_width / detect_height if image_source == ImageSourceEnum.recordings: if aspect_ratio >= 1: @@ -99,12 +109,17 @@ class ReviewDescriptionProcessor(PostProcessorApi): return min(max(max_frames, 3), 20) - def process_data(self, data, data_type): - self.metrics.review_desc_dps.value = self.review_descs_dps.eps() + def process_data( + self, data: dict[str, Any], data_type: PostProcessDataEnum + ) -> None: + self.metrics.review_desc_dps.value = self.review_desc_dps.eps() if data_type != PostProcessDataEnum.review: return + if self.genai_manager.description_client is None: + return + camera = data["after"]["camera"] camera_config = self.config.cameras[camera] @@ -143,10 +158,13 @@ class ReviewDescriptionProcessor(PostProcessorApi): additional_buffer_per_side = (MIN_RECORDING_DURATION - duration) / 2 buffer_extension = min(5, additional_buffer_per_side) + final_data["start_time"] -= buffer_extension + final_data["end_time"] += buffer_extension + thumbs = self.get_recording_frames( camera, - final_data["start_time"] - buffer_extension, - final_data["end_time"] + buffer_extension, + final_data["start_time"], + final_data["end_time"], height=480, # Use 480p for good balance between quality and token usage ) @@ -186,12 +204,12 @@ class ReviewDescriptionProcessor(PostProcessorApi): ) # kickoff analysis - self.review_descs_dps.update() + self.review_desc_dps.update() threading.Thread( target=run_analysis, args=( self.requestor, - self.genai_client, + self.genai_manager.description_client, self.review_desc_speed, camera_config, final_data, @@ -202,7 +220,7 @@ class ReviewDescriptionProcessor(PostProcessorApi): ), ).start() - def handle_request(self, topic, request_data): + def handle_request(self, topic: str, request_data: dict[str, Any]) -> str | None: if topic == EmbeddingsRequestEnum.summarize_review.value: start_ts = request_data["start_ts"] end_ts = request_data["end_ts"] @@ -307,7 +325,12 @@ class ReviewDescriptionProcessor(PostProcessorApi): os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}") ).mkdir(parents=True, exist_ok=True) - return self.genai_client.generate_review_summary( + client = self.genai_manager.description_client + + if client is None: + return None + + return client.generate_review_summary( start_ts, end_ts, events_with_context, @@ -327,7 +350,7 @@ class ReviewDescriptionProcessor(PostProcessorApi): file_start = f"preview_{camera}-" start_file = f"{file_start}{start_time}.webp" end_file = f"{file_start}{end_time}.webp" - all_frames = [] + all_frames: list[str] = [] for file in sorted(os.listdir(preview_dir)): if not file.startswith(file_start): @@ -465,7 +488,7 @@ class ReviewDescriptionProcessor(PostProcessorApi): thumb_data = cv2.imread(thumb_path) if thumb_data is None: - logger.warning( + logger.warning( # type: ignore[unreachable] "Could not read preview frame at %s, skipping", thumb_path ) continue @@ -488,13 +511,12 @@ class ReviewDescriptionProcessor(PostProcessorApi): return thumbs -@staticmethod def run_analysis( requestor: InterProcessRequestor, genai_client: GenAIClient, review_inference_speed: InferenceSpeed, camera_config: CameraConfig, - final_data: dict[str, str], + final_data: dict[str, Any], thumbs: list[bytes], genai_config: GenAIReviewConfig, labelmap_objects: list[str], diff --git a/frigate/data_processing/post/semantic_trigger.py b/frigate/data_processing/post/semantic_trigger.py index ec9e5d220..08f8a2e76 100644 --- a/frigate/data_processing/post/semantic_trigger.py +++ b/frigate/data_processing/post/semantic_trigger.py @@ -19,6 +19,7 @@ from frigate.config import FrigateConfig from frigate.const import CONFIG_DIR from frigate.data_processing.types import PostProcessDataEnum from frigate.db.sqlitevecq import SqliteVecQueueDatabase +from frigate.embeddings.embeddings import Embeddings from frigate.embeddings.util import ZScoreNormalization from frigate.models import Event, Trigger from frigate.util.builtin import cosine_distance @@ -40,8 +41,8 @@ class SemanticTriggerProcessor(PostProcessorApi): requestor: InterProcessRequestor, sub_label_publisher: EventMetadataPublisher, metrics: DataProcessorMetrics, - embeddings, - ): + embeddings: Embeddings, + ) -> None: super().__init__(config, metrics, None) self.db = db self.embeddings = embeddings @@ -236,11 +237,14 @@ class SemanticTriggerProcessor(PostProcessorApi): return # Skip the event if not an object - if event.data.get("type") != "object": + if event.data.get("type") != "object": # type: ignore[attr-defined] return thumbnail_bytes = get_event_thumbnail_bytes(event) + if thumbnail_bytes is None: + return + nparr = np.frombuffer(thumbnail_bytes, np.uint8) thumbnail = cv2.imdecode(nparr, cv2.IMREAD_COLOR) @@ -262,8 +266,10 @@ class SemanticTriggerProcessor(PostProcessorApi): thumbnail, ) - def handle_request(self, topic, request_data): + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | str | None: return None - def expire_object(self, object_id, camera): + def expire_object(self, object_id: str, camera: str) -> None: pass diff --git a/frigate/data_processing/real_time/audio_transcription.py b/frigate/data_processing/real_time/audio_transcription.py index 2e6d599eb..3d1536f73 100644 --- a/frigate/data_processing/real_time/audio_transcription.py +++ b/frigate/data_processing/real_time/audio_transcription.py @@ -4,7 +4,7 @@ import logging import os import queue import threading -from typing import Optional +from typing import Any, Optional import numpy as np @@ -39,11 +39,11 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): self.config = config self.camera_config = camera_config self.requestor = requestor - self.stream = None - self.whisper_model = None + self.stream: Any = None + self.whisper_model: FasterWhisperASR | None = None self.model_runner = model_runner - self.transcription_segments = [] - self.audio_queue = queue.Queue() + self.transcription_segments: list[str] = [] + self.audio_queue: queue.Queue[tuple[dict[str, Any], np.ndarray]] = queue.Queue() self.stop_event = stop_event def __build_recognizer(self) -> None: @@ -142,10 +142,10 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): logger.error(f"Error processing audio stream: {e}") return None - def process_frame(self, obj_data: dict[str, any], frame: np.ndarray) -> None: + def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None: pass - def process_audio(self, obj_data: dict[str, any], audio: np.ndarray) -> bool | None: + def process_audio(self, obj_data: dict[str, Any], audio: np.ndarray) -> bool | None: if audio is None or audio.size == 0: logger.debug("No audio data provided for transcription") return None @@ -269,13 +269,13 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi): ) def handle_request( - self, topic: str, request_data: dict[str, any] - ) -> dict[str, any] | None: + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: if topic == "clear_audio_recognizer": self.stream = None self.__build_recognizer() return {"message": "Audio recognizer cleared and rebuilt", "success": True} return None - def expire_object(self, object_id: str) -> None: + def expire_object(self, object_id: str, camera: str) -> None: pass diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index 38ff1a950..48663f971 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -14,7 +14,7 @@ from frigate.comms.event_metadata_updater import ( from frigate.config import FrigateConfig from frigate.const import MODEL_CACHE_DIR from frigate.log import suppress_stderr_during -from frigate.util.object import calculate_region +from frigate.util.image import calculate_region from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi @@ -35,10 +35,10 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): metrics: DataProcessorMetrics, ): super().__init__(config, metrics) - self.interpreter: Interpreter = None + self.interpreter: Interpreter | None = None self.sub_label_publisher = sub_label_publisher - self.tensor_input_details: dict[str, Any] = None - self.tensor_output_details: dict[str, Any] = None + self.tensor_input_details: list[dict[str, Any]] | None = None + self.tensor_output_details: list[dict[str, Any]] | None = None self.detected_birds: dict[str, float] = {} self.labelmap: dict[int, str] = {} @@ -61,7 +61,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): self.downloader = ModelDownloader( model_name="bird", download_path=download_path, - file_names=self.model_files.keys(), + file_names=list(self.model_files.keys()), download_func=self.__download_models, complete_func=self.__build_detector, ) @@ -102,8 +102,12 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): i += 1 line = f.readline() - def process_frame(self, obj_data, frame): - if not self.interpreter: + def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None: + if ( + not self.interpreter + or not self.tensor_input_details + or not self.tensor_output_details + ): return if obj_data["label"] != "bird": @@ -145,7 +149,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): self.tensor_output_details[0]["index"] )[0] probs = res / res.sum(axis=0) - best_id = np.argmax(probs) + best_id = int(np.argmax(probs)) if best_id == 964: logger.debug("No bird classification was detected.") @@ -179,9 +183,11 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): self.config.classification = payload logger.debug("Bird classification config updated dynamically") - def handle_request(self, topic, request_data): + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: return None - def expire_object(self, object_id, camera): + def expire_object(self, object_id: str, camera: str) -> None: if object_id in self.detected_birds: self.detected_birds.pop(object_id) diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 1a2512e43..1dcf59052 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -24,7 +24,8 @@ from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.log import suppress_stderr_during from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels -from frigate.util.object import box_overlaps, calculate_region +from frigate.util.image import calculate_region +from frigate.util.object import box_overlaps from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi @@ -49,12 +50,16 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ): super().__init__(config, metrics) self.model_config = model_config + + if not self.model_config.name: + raise ValueError("Custom classification model name must be set.") + self.requestor = requestor self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") - self.interpreter: Interpreter = None - self.tensor_input_details: dict[str, Any] | None = None - self.tensor_output_details: dict[str, Any] | None = None + self.interpreter: Interpreter | None = None + self.tensor_input_details: list[dict[str, Any]] | None = None + self.tensor_output_details: list[dict[str, Any]] | None = None self.labelmap: dict[int, str] = {} self.classifications_per_second = EventsPerSecond() self.state_history: dict[str, dict[str, Any]] = {} @@ -63,7 +68,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.metrics and self.model_config.name in self.metrics.classification_speeds ): - self.inference_speed = InferenceSpeed( + self.inference_speed: InferenceSpeed | None = InferenceSpeed( self.metrics.classification_speeds[self.model_config.name] ) else: @@ -172,12 +177,20 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): return None - def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray): + def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray) -> None: + if ( + not self.model_config.name + or not self.model_config.state_config + or not self.tensor_input_details + or not self.tensor_output_details + ): + return + if self.metrics and self.model_config.name in self.metrics.classification_cps: self.metrics.classification_cps[ self.model_config.name ].value = self.classifications_per_second.eps() - camera = frame_data.get("camera") + camera = str(frame_data.get("camera")) if camera not in self.model_config.state_config.cameras: return @@ -283,7 +296,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): logger.debug( f"{self.model_config.name} Ran state classification with probabilities: {probs}" ) - best_id = np.argmax(probs) + best_id = int(np.argmax(probs)) score = round(probs[best_id], 2) self.__update_metrics(datetime.datetime.now().timestamp() - now) @@ -319,7 +332,9 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): verified_state, ) - def handle_request(self, topic, request_data): + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: self.__build_detector() @@ -335,7 +350,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): else: return None - def expire_object(self, object_id, camera): + def expire_object(self, object_id: str, camera: str) -> None: pass @@ -350,13 +365,17 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ): super().__init__(config, metrics) self.model_config = model_config + + if not self.model_config.name: + raise ValueError("Custom classification model name must be set.") + self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") - self.interpreter: Interpreter = None + self.interpreter: Interpreter | None = None self.sub_label_publisher = sub_label_publisher self.requestor = requestor - self.tensor_input_details: dict[str, Any] | None = None - self.tensor_output_details: dict[str, Any] | None = None + self.tensor_input_details: list[dict[str, Any]] | None = None + self.tensor_output_details: list[dict[str, Any]] | None = None self.classification_history: dict[str, list[tuple[str, float, float]]] = {} self.labelmap: dict[int, str] = {} self.classifications_per_second = EventsPerSecond() @@ -365,7 +384,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.metrics and self.model_config.name in self.metrics.classification_speeds ): - self.inference_speed = InferenceSpeed( + self.inference_speed: InferenceSpeed | None = InferenceSpeed( self.metrics.classification_speeds[self.model_config.name] ) else: @@ -431,8 +450,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) return None, 0.0 - label_counts = {} - label_scores = {} + label_counts: dict[str, int] = {} + label_scores: dict[str, list[float]] = {} total_attempts = len(history) for label, score, timestamp in history: @@ -443,7 +462,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): label_counts[label] += 1 label_scores[label].append(score) - best_label = max(label_counts, key=label_counts.get) + best_label = max(label_counts, key=lambda k: label_counts[k]) best_count = label_counts[best_label] consensus_threshold = total_attempts * 0.6 @@ -470,7 +489,15 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) return best_label, avg_score - def process_frame(self, obj_data, frame): + def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None: + if ( + not self.model_config.name + or not self.model_config.object_config + or not self.tensor_input_details + or not self.tensor_output_details + ): + return + if self.metrics and self.model_config.name in self.metrics.classification_cps: self.metrics.classification_cps[ self.model_config.name @@ -555,7 +582,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): logger.debug( f"{self.model_config.name} Ran object classification with probabilities: {probs}" ) - best_id = np.argmax(probs) + best_id = int(np.argmax(probs)) score = round(probs[best_id], 2) self.__update_metrics(datetime.datetime.now().timestamp() - now) @@ -650,7 +677,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ), ) - def handle_request(self, topic, request_data): + def handle_request(self, topic: str, request_data: dict) -> dict | None: if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: self.__build_detector() @@ -666,12 +693,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): else: return None - def expire_object(self, object_id, camera): + def expire_object(self, object_id: str, camera: str) -> None: if object_id in self.classification_history: self.classification_history.pop(object_id) -@staticmethod def write_classification_attempt( folder: str, frame: np.ndarray, diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index d886a86e5..c6b6346b5 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -52,11 +52,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.face_config = config.face_recognition self.requestor = requestor self.sub_label_publisher = sub_label_publisher - self.face_detector: cv2.FaceDetectorYN = None + self.face_detector: cv2.FaceDetectorYN | None = None self.requires_face_detection = "face" not in self.config.objects.all_objects self.person_face_history: dict[str, list[tuple[str, float, int]]] = {} self.camera_current_people: dict[str, list[str]] = {} - self.recognizer: FaceRecognizer | None = None + self.recognizer: FaceRecognizer self.faces_per_second = EventsPerSecond() self.inference_speed = InferenceSpeed(self.metrics.face_rec_speed) @@ -78,7 +78,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.downloader = ModelDownloader( model_name="facedet", download_path=download_path, - file_names=self.model_files.keys(), + file_names=list(self.model_files.keys()), download_func=self.__download_models, complete_func=self.__build_detector, ) @@ -134,7 +134,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): def __detect_face( self, input: np.ndarray, threshold: float - ) -> tuple[int, int, int, int]: + ) -> tuple[int, int, int, int] | None: """Detect faces in input image.""" if not self.face_detector: return None @@ -153,7 +153,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): faces = self.face_detector.detect(input) if faces is None or faces[1] is None: - return None + return None # type: ignore[unreachable] face = None @@ -168,7 +168,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): h: int = int(raw_bbox[3] / scale_factor) bbox = (x, y, x + w, y + h) - if face is None or area(bbox) > area(face): + if face is None or area(bbox) > area(face): # type: ignore[unreachable] face = bbox return face @@ -177,7 +177,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.faces_per_second.update() self.inference_speed.update(duration) - def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray): + def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None: """Look for faces in image.""" self.metrics.face_rec_fps.value = self.faces_per_second.eps() camera = obj_data["camera"] @@ -349,7 +349,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.__update_metrics(datetime.datetime.now().timestamp() - start) - def handle_request(self, topic, request_data) -> dict[str, Any] | None: + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: if topic == EmbeddingsRequestEnum.clear_face_classifier.value: self.recognizer.clear() return {"success": True, "message": "Face classifier cleared."} @@ -432,7 +434,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): img = cv2.imread(current_file) if img is None: - return { + return { # type: ignore[unreachable] "message": "Invalid image file.", "success": False, } @@ -469,7 +471,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): "score": score, } - def expire_object(self, object_id: str, camera: str): + return None + + def expire_object(self, object_id: str, camera: str) -> None: if object_id in self.person_face_history: self.person_face_history.pop(object_id) @@ -478,7 +482,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): def weighted_average( self, results_list: list[tuple[str, float, int]], max_weight: int = 4000 - ): + ) -> tuple[str | None, float]: """ Calculates a robust weighted average, capping the area weight and giving more weight to higher scores. @@ -493,8 +497,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): return None, 0.0 counts: dict[str, int] = {} - weighted_scores: dict[str, int] = {} - total_weights: dict[str, int] = {} + weighted_scores: dict[str, float] = {} + total_weights: dict[str, float] = {} for name, score, face_area in results_list: if name == "unknown": @@ -509,7 +513,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): counts[name] += 1 # Capped weight based on face area - weight = min(face_area, max_weight) + weight: float = min(face_area, max_weight) # Score-based weighting (higher scores get more weight) weight *= (score - self.face_config.unknown_score) * 10 @@ -519,7 +523,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): if not weighted_scores: return None, 0.0 - best_name = max(weighted_scores, key=weighted_scores.get) + best_name = max(weighted_scores, key=lambda k: weighted_scores[k]) # If the number of faces for this person < min_faces, we are not confident it is a correct result if counts[best_name] < self.face_config.min_faces: diff --git a/frigate/data_processing/real_time/license_plate.py b/frigate/data_processing/real_time/license_plate.py index 298989c82..c2ea28b23 100644 --- a/frigate/data_processing/real_time/license_plate.py +++ b/frigate/data_processing/real_time/license_plate.py @@ -61,14 +61,16 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess self, obj_data: dict[str, Any], frame: np.ndarray, - dedicated_lpr: bool | None = False, - ): + dedicated_lpr: bool = False, + ) -> None: """Look for license plates in image.""" self.lpr_process(obj_data, frame, dedicated_lpr) - def handle_request(self, topic, request_data) -> dict[str, Any] | None: - return + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: + return None - def expire_object(self, object_id: str, camera: str): + def expire_object(self, object_id: str, camera: str) -> None: """Expire lpr objects.""" self.lpr_expire(object_id, camera) diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 263a8b987..5cd1f5008 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -1,8 +1,10 @@ """Embeddings types.""" +from __future__ import annotations + from enum import Enum -from multiprocessing.managers import SyncManager -from multiprocessing.sharedctypes import Synchronized +from multiprocessing.managers import DictProxy, SyncManager, ValueProxy +from typing import Any import sherpa_onnx @@ -10,22 +12,22 @@ from frigate.data_processing.real_time.whisper_online import FasterWhisperASR class DataProcessorMetrics: - image_embeddings_speed: Synchronized - image_embeddings_eps: Synchronized - text_embeddings_speed: Synchronized - text_embeddings_eps: Synchronized - face_rec_speed: Synchronized - face_rec_fps: Synchronized - alpr_speed: Synchronized - alpr_pps: Synchronized - yolov9_lpr_speed: Synchronized - yolov9_lpr_pps: Synchronized - review_desc_speed: Synchronized - review_desc_dps: Synchronized - object_desc_speed: Synchronized - object_desc_dps: Synchronized - classification_speeds: dict[str, Synchronized] - classification_cps: dict[str, Synchronized] + image_embeddings_speed: ValueProxy[float] + image_embeddings_eps: ValueProxy[float] + text_embeddings_speed: ValueProxy[float] + text_embeddings_eps: ValueProxy[float] + face_rec_speed: ValueProxy[float] + face_rec_fps: ValueProxy[float] + alpr_speed: ValueProxy[float] + alpr_pps: ValueProxy[float] + yolov9_lpr_speed: ValueProxy[float] + yolov9_lpr_pps: ValueProxy[float] + review_desc_speed: ValueProxy[float] + review_desc_dps: ValueProxy[float] + object_desc_speed: ValueProxy[float] + object_desc_dps: ValueProxy[float] + classification_speeds: DictProxy[str, ValueProxy[float]] + classification_cps: DictProxy[str, ValueProxy[float]] def __init__(self, manager: SyncManager, custom_classification_models: list[str]): self.image_embeddings_speed = manager.Value("d", 0.0) @@ -52,7 +54,7 @@ class DataProcessorMetrics: class DataProcessorModelRunner: - def __init__(self, requestor, device: str = "CPU", model_size: str = "large"): + def __init__(self, requestor: Any, device: str = "CPU", model_size: str = "large"): self.requestor = requestor self.device = device self.model_size = model_size diff --git a/frigate/db/sqlitevecq.py b/frigate/db/sqlitevecq.py index aa4928e84..a72e99b6a 100644 --- a/frigate/db/sqlitevecq.py +++ b/frigate/db/sqlitevecq.py @@ -1,18 +1,21 @@ import re import sqlite3 +from typing import Any from playhouse.sqliteq import SqliteQueueDatabase class SqliteVecQueueDatabase(SqliteQueueDatabase): - def __init__(self, *args, load_vec_extension: bool = False, **kwargs) -> None: + def __init__( + self, *args: Any, load_vec_extension: bool = False, **kwargs: Any + ) -> None: self.load_vec_extension: bool = load_vec_extension # no extension necessary, sqlite will load correctly for each platform self.sqlite_vec_path = "/usr/local/lib/vec0" super().__init__(*args, **kwargs) - def _connect(self, *args, **kwargs) -> sqlite3.Connection: - conn: sqlite3.Connection = super()._connect(*args, **kwargs) + def _connect(self, *args: Any, **kwargs: Any) -> sqlite3.Connection: + conn: sqlite3.Connection = super()._connect(*args, **kwargs) # type: ignore[misc] if self.load_vec_extension: self._load_vec_extension(conn) @@ -27,7 +30,7 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase): conn.enable_load_extension(False) def _register_regexp(self, conn: sqlite3.Connection) -> None: - def regexp(expr: str, item: str) -> bool: + def regexp(expr: str, item: str | None) -> bool: if item is None: return False try: diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index 22623c7d7..5071e3a74 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -47,7 +47,7 @@ class ModelTypeEnum(str, Enum): class ModelConfig(BaseModel): path: Optional[str] = Field( None, - title="Custom Object detection model path", + title="Custom object detector model path", description="Path to a custom detection model file (or plus:// for Frigate+ models).", ) labelmap_path: Optional[str] = Field( diff --git a/frigate/detectors/plugins/memryx.py b/frigate/detectors/plugins/memryx.py index e0ad401cb..2c03d14a4 100644 --- a/frigate/detectors/plugins/memryx.py +++ b/frigate/detectors/plugins/memryx.py @@ -317,7 +317,7 @@ class MemryXDetector(DetectionApi): f"Failed to remove downloaded zip {zip_path}: {e}" ) - def send_input(self, connection_id, tensor_input: np.ndarray): + def send_input(self, connection_id, tensor_input: np.ndarray) -> None: """Pre-process (if needed) and send frame to MemryX input queue""" if tensor_input is None: raise ValueError("[send_input] No image data provided for inference") diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index c52480642..b9aa00fbd 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -8,6 +8,8 @@ from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_runners import get_optimized_runner from frigate.detectors.detector_config import ( BaseDetectorConfig, + InputDTypeEnum, + InputTensorEnum, ModelTypeEnum, ) from frigate.util.model import ( @@ -59,8 +61,34 @@ class ONNXDetector(DetectionApi): if self.onnx_model_type == ModelTypeEnum.yolox: self.calculate_grids_strides() + self._warmup(detector_config) logger.info(f"ONNX: {path} loaded") + def _warmup(self, detector_config: ONNXDetectorConfig) -> None: + """Run a warmup inference to front-load one-time compilation costs. + + Some GPU backends have a slow first inference: CUDA may need PTX JIT + compilation on newer architectures (e.g. NVIDIA 50-series / Blackwell), + and MIGraphX compiles the model graph on first run. Running it here + (during detector creation) keeps the watchdog start_time at 0.0 so the + process won't be killed. + """ + if detector_config.model.input_tensor == InputTensorEnum.nchw: + shape = (1, 3, detector_config.model.height, detector_config.model.width) + else: + shape = (1, detector_config.model.height, detector_config.model.width, 3) + + if detector_config.model.input_dtype in ( + InputDTypeEnum.float, + InputDTypeEnum.float_denorm, + ): + dtype = np.float32 + else: + dtype = np.uint8 + + logger.info("ONNX: warming up detector (may take a while on first run)...") + self.detect_raw(np.zeros(shape, dtype=dtype)) + def detect_raw(self, tensor_input: np.ndarray): if self.onnx_model_type == ModelTypeEnum.dfine: tensor_output = self.runner.run( diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 18b757743..9247f4fb4 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -202,15 +202,13 @@ class EmbeddingMaintainer(threading.Thread): # post processors self.post_processors: list[PostProcessorApi] = [] - if self.genai_manager.vision_client is not None and any( - c.review.genai.enabled_in_config for c in self.config.cameras.values() - ): + if any(c.review.genai.enabled_in_config for c in self.config.cameras.values()): self.post_processors.append( ReviewDescriptionProcessor( self.config, self.requestor, self.metrics, - self.genai_manager.vision_client, + self.genai_manager, ) ) @@ -248,16 +246,14 @@ class EmbeddingMaintainer(threading.Thread): ) self.post_processors.append(semantic_trigger_processor) - if self.genai_manager.vision_client is not None and any( - c.objects.genai.enabled_in_config for c in self.config.cameras.values() - ): + if any(c.objects.genai.enabled_in_config for c in self.config.cameras.values()): self.post_processors.append( ObjectDescriptionProcessor( self.config, self.embeddings, self.requestor, self.metrics, - self.genai_manager.vision_client, + self.genai_manager, semantic_trigger_processor, ) ) diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 6afa1c237..f6c41fa30 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -2,17 +2,19 @@ import datetime import logging +import subprocess import threading import time from multiprocessing.managers import DictProxy from multiprocessing.synchronize import Event as MpEvent -from typing import Tuple +from typing import Any, Tuple import numpy as np from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.inter_process import InterProcessRequestor -from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig +from frigate.config import CameraConfig, CameraInput, FrigateConfig +from frigate.config.camera.ffmpeg import CameraFfmpegConfig from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateSubscriber, @@ -35,10 +37,9 @@ from frigate.data_processing.real_time.audio_transcription import ( ) from frigate.ffmpeg_presets import parse_preset_input from frigate.log import LogPipe, suppress_stderr_during -from frigate.object_detection.base import load_labels -from frigate.util.builtin import get_ffmpeg_arg_list +from frigate.util.builtin import get_ffmpeg_arg_list, load_labels +from frigate.util.ffmpeg import start_or_restart_ffmpeg, stop_ffmpeg from frigate.util.process import FrigateProcess -from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: from tflite_runtime.interpreter import Interpreter @@ -49,7 +50,7 @@ except ModuleNotFoundError: logger = logging.getLogger(__name__) -def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]: +def get_ffmpeg_command(ffmpeg: CameraFfmpegConfig) -> list[str]: ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0] input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + ( parse_preset_input(ffmpeg_input.input_args, 1) @@ -102,9 +103,11 @@ class AudioProcessor(FrigateProcess): threading.current_thread().name = "process:audio_manager" if self.config.audio_transcription.enabled: - self.transcription_model_runner = AudioTranscriptionModelRunner( - self.config.audio_transcription.device, - self.config.audio_transcription.model_size, + self.transcription_model_runner: AudioTranscriptionModelRunner | None = ( + AudioTranscriptionModelRunner( + self.config.audio_transcription.device or "AUTO", + self.config.audio_transcription.model_size, + ) ) else: self.transcription_model_runner = None @@ -118,7 +121,7 @@ class AudioProcessor(FrigateProcess): self.config, self.camera_metrics, self.transcription_model_runner, - self.stop_event, + self.stop_event, # type: ignore[arg-type] ) audio_threads.append(audio_thread) audio_thread.start() @@ -162,7 +165,7 @@ class AudioEventMaintainer(threading.Thread): self.logger = logging.getLogger(f"audio.{self.camera_config.name}") self.ffmpeg_cmd = get_ffmpeg_command(self.camera_config.ffmpeg) self.logpipe = LogPipe(f"ffmpeg.{self.camera_config.name}.audio") - self.audio_listener = None + self.audio_listener: subprocess.Popen[Any] | None = None self.audio_transcription_model_runner = audio_transcription_model_runner self.transcription_processor = None self.transcription_thread = None @@ -171,7 +174,7 @@ class AudioEventMaintainer(threading.Thread): self.requestor = InterProcessRequestor() self.config_subscriber = CameraConfigUpdateSubscriber( None, - {self.camera_config.name: self.camera_config}, + {str(self.camera_config.name): self.camera_config}, [ CameraConfigUpdateEnum.audio, CameraConfigUpdateEnum.enabled, @@ -180,7 +183,10 @@ class AudioEventMaintainer(threading.Thread): ) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio.value) - if self.config.audio_transcription.enabled: + if ( + self.config.audio_transcription.enabled + and self.audio_transcription_model_runner is not None + ): # init the transcription processor for this camera self.transcription_processor = AudioTranscriptionRealTimeProcessor( config=self.config, @@ -200,11 +206,11 @@ class AudioEventMaintainer(threading.Thread): self.was_enabled = camera.enabled - def detect_audio(self, audio) -> None: + def detect_audio(self, audio: np.ndarray) -> None: if not self.camera_config.audio.enabled or self.stop_event.is_set(): return - audio_as_float = audio.astype(np.float32) + audio_as_float: np.ndarray = audio.astype(np.float32) rms, dBFS = self.calculate_audio_levels(audio_as_float) self.camera_metrics[self.camera_config.name].audio_rms.value = rms @@ -261,7 +267,7 @@ class AudioEventMaintainer(threading.Thread): else: self.transcription_processor.check_unload_model() - def calculate_audio_levels(self, audio_as_float: np.float32) -> Tuple[float, float]: + def calculate_audio_levels(self, audio_as_float: np.ndarray) -> Tuple[float, float]: # Calculate RMS (Root-Mean-Square) which represents the average signal amplitude # Note: np.float32 isn't serializable, we must use np.float64 to publish the message rms = np.sqrt(np.mean(np.absolute(np.square(audio_as_float)))) @@ -296,6 +302,10 @@ class AudioEventMaintainer(threading.Thread): self.logpipe.dump() self.start_or_restart_ffmpeg() + if self.audio_listener is None or self.audio_listener.stdout is None: + log_and_restart() + return + try: chunk = self.audio_listener.stdout.read(self.chunk_size) @@ -341,7 +351,10 @@ class AudioEventMaintainer(threading.Thread): self.requestor.send_data( EXPIRE_AUDIO_ACTIVITY, self.camera_config.name ) - stop_ffmpeg(self.audio_listener, self.logger) + + if self.audio_listener: + stop_ffmpeg(self.audio_listener, self.logger) + self.audio_listener = None self.was_enabled = enabled continue @@ -367,7 +380,7 @@ class AudioEventMaintainer(threading.Thread): class AudioTfl: - def __init__(self, stop_event: threading.Event, num_threads=2): + def __init__(self, stop_event: threading.Event, num_threads: int = 2) -> None: self.stop_event = stop_event self.num_threads = num_threads self.labels = load_labels("/audio-labelmap.txt", prefill=521) @@ -382,7 +395,7 @@ class AudioTfl: self.tensor_input_details = self.interpreter.get_input_details() self.tensor_output_details = self.interpreter.get_output_details() - def _detect_raw(self, tensor_input): + def _detect_raw(self, tensor_input: np.ndarray) -> np.ndarray: self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.invoke() detections = np.zeros((20, 6), np.float32) @@ -410,8 +423,10 @@ class AudioTfl: return detections - def detect(self, tensor_input, threshold=AUDIO_MIN_CONFIDENCE): - detections = [] + def detect( + self, tensor_input: np.ndarray, threshold: float = AUDIO_MIN_CONFIDENCE + ) -> list[tuple[str, float, tuple[float, float, float, float]]]: + detections: list[tuple[str, float, tuple[float, float, float, float]]] = [] if self.stop_event.is_set(): return detections diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py index 263c5f18e..b867bf947 100644 --- a/frigate/events/cleanup.py +++ b/frigate/events/cleanup.py @@ -29,7 +29,7 @@ class EventCleanup(threading.Thread): self.stop_event = stop_event self.db = db self.camera_keys = list(self.config.cameras.keys()) - self.removed_camera_labels: list[str] = None + self.removed_camera_labels: list[Event] | None = None self.camera_labels: dict[str, dict[str, Any]] = {} def get_removed_camera_labels(self) -> list[Event]: @@ -37,7 +37,7 @@ class EventCleanup(threading.Thread): if self.removed_camera_labels is None: self.removed_camera_labels = list( Event.select(Event.label) - .where(Event.camera.not_in(self.camera_keys)) + .where(Event.camera.not_in(self.camera_keys)) # type: ignore[arg-type,call-arg,misc] .distinct() .execute() ) @@ -61,7 +61,7 @@ class EventCleanup(threading.Thread): ), } - return self.camera_labels[camera]["labels"] + return self.camera_labels[camera]["labels"] # type: ignore[no-any-return] def expire_snapshots(self) -> list[str]: ## Expire events from unlisted cameras based on the global config @@ -74,7 +74,9 @@ class EventCleanup(threading.Thread): # loop over object types in db for event in distinct_labels: # get expiration time for this label - expire_days = retain_config.objects.get(event.label, retain_config.default) + expire_days = retain_config.objects.get( + str(event.label), retain_config.default + ) expire_after = ( datetime.datetime.now() - datetime.timedelta(days=expire_days) @@ -87,7 +89,7 @@ class EventCleanup(threading.Thread): Event.thumbnail, ) .where( - Event.camera.not_in(self.camera_keys), + Event.camera.not_in(self.camera_keys), # type: ignore[arg-type,call-arg,misc] Event.start_time < expire_after, Event.label == event.label, Event.retain_indefinitely == False, @@ -109,16 +111,16 @@ class EventCleanup(threading.Thread): # update the clips attribute for the db entry query = Event.select(Event.id).where( - Event.camera.not_in(self.camera_keys), + Event.camera.not_in(self.camera_keys), # type: ignore[arg-type,call-arg,misc] Event.start_time < expire_after, Event.label == event.label, Event.retain_indefinitely == False, ) - events_to_update = [] + events_to_update: list[str] = [] for event in query.iterator(): - events_to_update.append(event.id) + events_to_update.append(str(event.id)) if len(events_to_update) >= CHUNK_SIZE: logger.debug( f"Updating {update_params} for {len(events_to_update)} events" @@ -150,7 +152,7 @@ class EventCleanup(threading.Thread): for event in distinct_labels: # get expiration time for this label expire_days = retain_config.objects.get( - event.label, retain_config.default + str(event.label), retain_config.default ) expire_after = ( @@ -177,7 +179,7 @@ class EventCleanup(threading.Thread): # only snapshots are stored in /clips # so no need to delete mp4 files for event in expired_events: - events_to_update.append(event.id) + events_to_update.append(str(event.id)) deleted = delete_event_snapshot(event) if not deleted: @@ -214,7 +216,7 @@ class EventCleanup(threading.Thread): Event.camera, ) .where( - Event.camera.not_in(self.camera_keys), + Event.camera.not_in(self.camera_keys), # type: ignore[arg-type,call-arg,misc] Event.start_time < expire_after, Event.retain_indefinitely == False, ) @@ -245,7 +247,7 @@ class EventCleanup(threading.Thread): # update the clips attribute for the db entry query = Event.select(Event.id).where( - Event.camera.not_in(self.camera_keys), + Event.camera.not_in(self.camera_keys), # type: ignore[arg-type,call-arg,misc] Event.start_time < expire_after, Event.retain_indefinitely == False, ) @@ -358,7 +360,7 @@ class EventCleanup(threading.Thread): logger.debug(f"Found {len(events_to_delete)} events that can be expired") if len(events_to_delete) > 0: - ids_to_delete = [e.id for e in events_to_delete] + ids_to_delete = [str(e.id) for e in events_to_delete] for i in range(0, len(ids_to_delete), CHUNK_SIZE): chunk = ids_to_delete[i : i + CHUNK_SIZE] logger.debug(f"Deleting {len(chunk)} events from the database") diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 6a8da45b2..80bdaccd3 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -2,7 +2,7 @@ import logging import threading from multiprocessing import Queue from multiprocessing.synchronize import Event as MpEvent -from typing import Dict +from typing import Any, Dict from frigate.comms.events_updater import EventEndPublisher, EventUpdateSubscriber from frigate.config import FrigateConfig @@ -15,7 +15,7 @@ from frigate.util.builtin import to_relative_box logger = logging.getLogger(__name__) -def should_update_db(prev_event: Event, current_event: Event) -> bool: +def should_update_db(prev_event: dict[str, Any], current_event: dict[str, Any]) -> bool: """If current_event has updated fields and (clip or snapshot).""" # If event is ending and was previously saved, always update to set end_time # This ensures events are properly ended even when alerts/detections are disabled @@ -47,7 +47,9 @@ def should_update_db(prev_event: Event, current_event: Event) -> bool: return False -def should_update_state(prev_event: Event, current_event: Event) -> bool: +def should_update_state( + prev_event: dict[str, Any], current_event: dict[str, Any] +) -> bool: """If current event should update state, but not necessarily update the db.""" if prev_event["stationary"] != current_event["stationary"]: return True @@ -74,7 +76,7 @@ class EventProcessor(threading.Thread): super().__init__(name="event_processor") self.config = config self.timeline_queue = timeline_queue - self.events_in_process: Dict[str, Event] = {} + self.events_in_process: Dict[str, dict[str, Any]] = {} self.stop_event = stop_event self.event_receiver = EventUpdateSubscriber() @@ -92,7 +94,7 @@ class EventProcessor(threading.Thread): if update == None: continue - source_type, event_type, camera, _, event_data = update + source_type, event_type, camera, _, event_data = update # type: ignore[misc] logger.debug( f"Event received: {source_type} {event_type} {camera} {event_data['id']}" @@ -140,7 +142,7 @@ class EventProcessor(threading.Thread): self, event_type: str, camera: str, - event_data: Event, + event_data: dict[str, Any], ) -> None: """handle tracked object event updates.""" updated_db = False @@ -150,8 +152,13 @@ class EventProcessor(threading.Thread): camera_config = self.config.cameras.get(camera) if camera_config is None: return + width = camera_config.detect.width height = camera_config.detect.height + + if width is None or height is None: + return + first_detector = list(self.config.detectors.values())[0] start_time = event_data["start_time"] @@ -222,8 +229,12 @@ class EventProcessor(threading.Thread): Event.thumbnail: event_data.get("thumbnail"), Event.has_clip: event_data["has_clip"], Event.has_snapshot: event_data["has_snapshot"], - Event.model_hash: first_detector.model.model_hash, - Event.model_type: first_detector.model.model_type, + Event.model_hash: first_detector.model.model_hash + if first_detector.model + else None, + Event.model_type: first_detector.model.model_type + if first_detector.model + else None, Event.detector_type: first_detector.type, Event.data: { "box": box, @@ -287,10 +298,10 @@ class EventProcessor(threading.Thread): if event_type == EventStateEnum.end: del self.events_in_process[event_data["id"]] - self.event_end_publisher.publish((event_data["id"], camera, updated_db)) + self.event_end_publisher.publish((event_data["id"], camera, updated_db)) # type: ignore[arg-type] def handle_external_detection( - self, event_type: EventStateEnum, event_data: Event + self, event_type: EventStateEnum, event_data: dict[str, Any] ) -> None: # Skip replay cameras if event_data.get("camera", "").startswith(REPLAY_CAMERA_PREFIX): diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index 0652ec645..c314b30ea 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -3,7 +3,7 @@ import logging import os from enum import Enum -from typing import Any +from typing import Any, Optional from frigate.const import ( FFMPEG_HVC1_ARGS, @@ -215,7 +215,7 @@ def parse_preset_hardware_acceleration_decode( width: int, height: int, gpu: int, -) -> list[str]: +) -> Optional[list[str]]: """Return the correct preset if in preset format otherwise return None.""" if not isinstance(arg, str): return None @@ -242,9 +242,9 @@ def parse_preset_hardware_acceleration_scale( else: scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"]) - scale = scale.format(fps, width, height).split(" ") - scale.extend(detect_args) - return scale + scale_args = scale.format(fps, width, height).split(" ") + scale_args.extend(detect_args) + return scale_args class EncodeTypeEnum(str, Enum): @@ -420,7 +420,7 @@ PRESETS_INPUT = { } -def parse_preset_input(arg: Any, detect_fps: int) -> list[str]: +def parse_preset_input(arg: Any, detect_fps: int) -> Optional[list[str]]: """Return the correct preset if in preset format otherwise return None.""" if not isinstance(arg, str): return None @@ -465,6 +465,16 @@ PRESETS_RECORD_OUTPUT = { "-c:a", "aac", ], + # NOTE: This preset originally used "-c:a copy" to pass through audio + # without re-encoding. FFmpeg 7.x introduced a threaded pipeline where + # demuxing, encoding, and muxing run in parallel via a Scheduler. This + # broke audio streamcopy from RTSP sources: packets are demuxed correctly + # but silently dropped before reaching the muxer (0 bytes written). The + # issue is specific to RTSP + streamcopy; file inputs and transcoding both + # work. Transcoding AAC audio is very lightweight (~30KiB per 10s segment) + # and adds negligible CPU overhead, so this is an acceptable workaround. + # The benefits of FFmpeg 7.x — particularly the removal of gamma correction + # hacks required by earlier versions — outweigh this trade-off. "preset-record-generic-audio-copy": [ "-f", "segment", @@ -476,8 +486,10 @@ PRESETS_RECORD_OUTPUT = { "1", "-strftime", "1", - "-c", + "-c:v", "copy", + "-c:a", + "aac", ], "preset-record-mjpeg": [ "-f", @@ -530,7 +542,9 @@ PRESETS_RECORD_OUTPUT = { } -def parse_preset_output_record(arg: Any, force_record_hvc1: bool) -> list[str]: +def parse_preset_output_record( + arg: Any, force_record_hvc1: bool +) -> Optional[list[str]]: """Return the correct preset if in preset format otherwise return None.""" if not isinstance(arg, str): return None diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index f799931ec..438661f5d 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -5,7 +5,7 @@ import importlib import logging import os import re -from typing import Any, Optional +from typing import Any, Callable, Optional import numpy as np from playhouse.shortcuts import model_to_dict @@ -31,10 +31,10 @@ __all__ = [ PROVIDERS = {} -def register_genai_provider(key: GenAIProviderEnum): +def register_genai_provider(key: GenAIProviderEnum) -> Callable: """Register a GenAI provider.""" - def decorator(cls): + def decorator(cls: type) -> type: PROVIDERS[key] = cls return cls @@ -297,7 +297,7 @@ Guidelines: """Generate a description for the frame.""" try: prompt = camera_config.objects.genai.object_prompts.get( - event.label, + str(event.label), camera_config.objects.genai.prompt, ).format(**model_to_dict(event)) except KeyError as e: @@ -307,7 +307,7 @@ Guidelines: logger.debug(f"Sending images to genai provider with prompt: {prompt}") return self._send(prompt, thumbnails) - def _init_provider(self): + def _init_provider(self) -> Any: """Initialize the client.""" return None @@ -320,6 +320,22 @@ Guidelines: """Submit a request to the provider.""" return None + @property + def supports_vision(self) -> bool: + """Whether the model supports vision/image input. + + Defaults to True for cloud providers. Providers that can detect + capability at runtime (e.g. llama.cpp) should override this. + """ + return True + + def list_models(self) -> list[str]: + """Return the list of model names available from this provider. + + Providers should override this to query their backend. + """ + return [] + def get_context_size(self) -> int: """Get the context window size for this provider in tokens.""" return 4096 @@ -402,7 +418,7 @@ Guidelines: } -def load_providers(): +def load_providers() -> None: package_dir = os.path.dirname(__file__) for filename in os.listdir(package_dir): if filename.endswith(".py") and filename != "__init__.py": diff --git a/frigate/genai/azure-openai.py b/frigate/genai/azure-openai.py index f424f7610..66d7d1568 100644 --- a/frigate/genai/azure-openai.py +++ b/frigate/genai/azure-openai.py @@ -3,7 +3,7 @@ import base64 import json import logging -from typing import Any, Optional +from typing import Any, AsyncGenerator, Optional from urllib.parse import parse_qs, urlparse from openai import AzureOpenAI @@ -20,10 +20,10 @@ class OpenAIClient(GenAIClient): provider: AzureOpenAI - def _init_provider(self): + def _init_provider(self) -> AzureOpenAI | None: """Initialize the client.""" try: - parsed_url = urlparse(self.genai_config.base_url) + parsed_url = urlparse(self.genai_config.base_url or "") query_params = parse_qs(parsed_url.query) api_version = query_params.get("api-version", [None])[0] azure_endpoint = f"{parsed_url.scheme}://{parsed_url.netloc}/" @@ -79,9 +79,17 @@ class OpenAIClient(GenAIClient): logger.warning("Azure OpenAI returned an error: %s", str(e)) return None if len(result.choices) > 0: - return result.choices[0].message.content.strip() + return str(result.choices[0].message.content.strip()) return None + def list_models(self) -> list[str]: + """Return available model IDs from Azure OpenAI.""" + try: + return sorted(m.id for m in self.provider.models.list().data) + except Exception as e: + logger.warning("Failed to list Azure OpenAI models: %s", e) + return [] + def get_context_size(self) -> int: """Get the context window size for Azure OpenAI.""" return 128000 @@ -113,7 +121,7 @@ class OpenAIClient(GenAIClient): if openai_tool_choice is not None: request_params["tool_choice"] = openai_tool_choice - result = self.provider.chat.completions.create(**request_params) + result = self.provider.chat.completions.create(**request_params) # type: ignore[call-overload] if ( result is None @@ -181,7 +189,7 @@ class OpenAIClient(GenAIClient): messages: list[dict[str, Any]], tools: Optional[list[dict[str, Any]]] = None, tool_choice: Optional[str] = "auto", - ): + ) -> AsyncGenerator[tuple[str, Any], None]: """ Stream chat with tools; yields content deltas then final message. @@ -214,7 +222,7 @@ class OpenAIClient(GenAIClient): tool_calls_by_index: dict[int, dict[str, Any]] = {} finish_reason = "stop" - stream = self.provider.chat.completions.create(**request_params) + stream = self.provider.chat.completions.create(**request_params) # type: ignore[call-overload] for chunk in stream: if not chunk or not chunk.choices: diff --git a/frigate/genai/gemini.py b/frigate/genai/gemini.py index f32d37e80..cfa9cb802 100644 --- a/frigate/genai/gemini.py +++ b/frigate/genai/gemini.py @@ -2,10 +2,11 @@ import json import logging -from typing import Any, Optional +from typing import Any, AsyncGenerator, Optional from google import genai from google.genai import errors, types +from google.genai.types import FunctionCallingConfigMode from frigate.config import GenAIProviderEnum from frigate.genai import GenAIClient, register_genai_provider @@ -19,10 +20,10 @@ class GeminiClient(GenAIClient): provider: genai.Client - def _init_provider(self): + def _init_provider(self) -> genai.Client: """Initialize the client.""" # Merge provider_options into HttpOptions - http_options_dict = { + http_options_dict: dict[str, Any] = { "timeout": int(self.timeout * 1000), # requires milliseconds "retry_options": types.HttpRetryOptions( attempts=3, @@ -49,12 +50,12 @@ class GeminiClient(GenAIClient): response_format: Optional[dict] = None, ) -> Optional[str]: """Submit a request to Gemini.""" - contents = [ + contents = [prompt] + [ types.Part.from_bytes(data=img, mime_type="image/jpeg") for img in images - ] + [prompt] + ] try: # Merge runtime_options into generation_config if provided - generation_config_dict = {"candidate_count": 1} + generation_config_dict: dict[str, Any] = {"candidate_count": 1} generation_config_dict.update(self.genai_config.runtime_options) if response_format and response_format.get("type") == "json_schema": @@ -65,7 +66,7 @@ class GeminiClient(GenAIClient): response = self.provider.models.generate_content( model=self.genai_config.model, - contents=contents, + contents=contents, # type: ignore[arg-type] config=types.GenerateContentConfig( **generation_config_dict, ), @@ -78,12 +79,22 @@ class GeminiClient(GenAIClient): return None try: + if response.text is None: + return None description = response.text.strip() except (ValueError, AttributeError): # No description was generated return None return description + def list_models(self) -> list[str]: + """Return available model names from Gemini.""" + try: + return sorted(m.name or "" for m in self.provider.models.list()) + except Exception as e: + logger.warning("Failed to list Gemini models: %s", e) + return [] + def get_context_size(self) -> int: """Get the context window size for Gemini.""" # Gemini Pro Vision has a 1M token context window @@ -102,7 +113,7 @@ class GeminiClient(GenAIClient): """ try: # Convert messages to Gemini format - gemini_messages = [] + gemini_messages: list[types.Content] = [] for msg in messages: role = msg.get("role", "user") content = msg.get("content", "") @@ -110,7 +121,11 @@ class GeminiClient(GenAIClient): # Map roles to Gemini format if role == "system": # Gemini doesn't have system role, prepend to first user message - if gemini_messages and gemini_messages[0].role == "user": + if ( + gemini_messages + and gemini_messages[0].role == "user" + and gemini_messages[0].parts + ): gemini_messages[0].parts[ 0 ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" @@ -136,7 +151,7 @@ class GeminiClient(GenAIClient): types.Content( role="function", parts=[ - types.Part.from_function_response(function_response) + types.Part.from_function_response(function_response) # type: ignore[misc,call-arg,arg-type] ], ) ) @@ -171,19 +186,25 @@ class GeminiClient(GenAIClient): if tool_choice: if tool_choice == "none": tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="NONE") + function_calling_config=types.FunctionCallingConfig( + mode=FunctionCallingConfigMode.NONE + ) ) elif tool_choice == "auto": tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="AUTO") + function_calling_config=types.FunctionCallingConfig( + mode=FunctionCallingConfigMode.AUTO + ) ) elif tool_choice == "required": tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="ANY") + function_calling_config=types.FunctionCallingConfig( + mode=FunctionCallingConfigMode.ANY + ) ) # Build request config - config_params = {"candidate_count": 1} + config_params: dict[str, Any] = {"candidate_count": 1} if gemini_tools: config_params["tools"] = gemini_tools @@ -197,7 +218,7 @@ class GeminiClient(GenAIClient): response = self.provider.models.generate_content( model=self.genai_config.model, - contents=gemini_messages, + contents=gemini_messages, # type: ignore[arg-type] config=types.GenerateContentConfig(**config_params), ) @@ -291,7 +312,7 @@ class GeminiClient(GenAIClient): messages: list[dict[str, Any]], tools: Optional[list[dict[str, Any]]] = None, tool_choice: Optional[str] = "auto", - ): + ) -> AsyncGenerator[tuple[str, Any], None]: """ Stream chat with tools; yields content deltas then final message. @@ -299,7 +320,7 @@ class GeminiClient(GenAIClient): """ try: # Convert messages to Gemini format - gemini_messages = [] + gemini_messages: list[types.Content] = [] for msg in messages: role = msg.get("role", "user") content = msg.get("content", "") @@ -307,7 +328,11 @@ class GeminiClient(GenAIClient): # Map roles to Gemini format if role == "system": # Gemini doesn't have system role, prepend to first user message - if gemini_messages and gemini_messages[0].role == "user": + if ( + gemini_messages + and gemini_messages[0].role == "user" + and gemini_messages[0].parts + ): gemini_messages[0].parts[ 0 ].text = f"{content}\n\n{gemini_messages[0].parts[0].text}" @@ -333,7 +358,7 @@ class GeminiClient(GenAIClient): types.Content( role="function", parts=[ - types.Part.from_function_response(function_response) + types.Part.from_function_response(function_response) # type: ignore[misc,call-arg,arg-type] ], ) ) @@ -368,19 +393,25 @@ class GeminiClient(GenAIClient): if tool_choice: if tool_choice == "none": tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="NONE") + function_calling_config=types.FunctionCallingConfig( + mode=FunctionCallingConfigMode.NONE + ) ) elif tool_choice == "auto": tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="AUTO") + function_calling_config=types.FunctionCallingConfig( + mode=FunctionCallingConfigMode.AUTO + ) ) elif tool_choice == "required": tool_config = types.ToolConfig( - function_calling_config=types.FunctionCallingConfig(mode="ANY") + function_calling_config=types.FunctionCallingConfig( + mode=FunctionCallingConfigMode.ANY + ) ) # Build request config - config_params = {"candidate_count": 1} + config_params: dict[str, Any] = {"candidate_count": 1} if gemini_tools: config_params["tools"] = gemini_tools @@ -399,7 +430,7 @@ class GeminiClient(GenAIClient): stream = await self.provider.aio.models.generate_content_stream( model=self.genai_config.model, - contents=gemini_messages, + contents=gemini_messages, # type: ignore[arg-type] config=types.GenerateContentConfig(**config_params), ) diff --git a/frigate/genai/llama_cpp.py b/frigate/genai/llama_cpp.py index 48ea9747c..51e8d160d 100644 --- a/frigate/genai/llama_cpp.py +++ b/frigate/genai/llama_cpp.py @@ -4,7 +4,7 @@ import base64 import io import json import logging -from typing import Any, Optional +from typing import Any, AsyncGenerator, Optional import httpx import numpy as np @@ -23,7 +23,7 @@ def _to_jpeg(img_bytes: bytes) -> bytes | None: try: img = Image.open(io.BytesIO(img_bytes)) if img.mode != "RGB": - img = img.convert("RGB") + img = img.convert("RGB") # type: ignore[assignment] buf = io.BytesIO() img.save(buf, format="JPEG", quality=85) return buf.getvalue() @@ -36,20 +36,124 @@ def _to_jpeg(img_bytes: bytes) -> bytes | None: class LlamaCppClient(GenAIClient): """Generative AI client for Frigate using llama.cpp server.""" - provider: str # base_url + provider: str | None # base_url provider_options: dict[str, Any] + _context_size: int | None + _supports_vision: bool + _supports_audio: bool + _supports_tools: bool - def _init_provider(self): - """Initialize the client.""" + def _init_provider(self) -> str | None: + """Initialize the client and query model metadata from the server.""" self.provider_options = { **self.genai_config.provider_options, } - return ( + self._context_size = None + self._supports_vision = False + self._supports_audio = False + self._supports_tools = False + + base_url = ( self.genai_config.base_url.rstrip("/") if self.genai_config.base_url else None ) + if base_url is None: + return None + + configured_model = self.genai_config.model + + # Query /v1/models to validate the configured model exists + try: + response = requests.get( + f"{base_url}/v1/models", + timeout=10, + ) + response.raise_for_status() + models_data = response.json() + + model_found = False + for model in models_data.get("data", []): + model_ids = {model.get("id")} + for alias in model.get("aliases", []): + model_ids.add(alias) + if configured_model in model_ids: + model_found = True + break + + if not model_found: + available = [] + for m in models_data.get("data", []): + available.append(m.get("id", "unknown")) + for alias in m.get("aliases", []): + available.append(alias) + logger.error( + "Model '%s' not found on llama.cpp server. Available models: %s", + configured_model, + available, + ) + return None + except Exception as e: + logger.warning( + "Failed to query llama.cpp /v1/models endpoint: %s. " + "Model validation skipped.", + e, + ) + + # Query /props for context size, modalities, and tool support. + # The standard /props?model= endpoint works with llama-server. + # If it fails, try the llama-swap per-model passthrough endpoint which + # returns props for a specific model without requiring it to be loaded. + try: + try: + response = requests.get( + f"{base_url}/props", + params={"model": configured_model}, + timeout=10, + ) + response.raise_for_status() + props = response.json() + except Exception: + response = requests.get( + f"{base_url}/upstream/{configured_model}/props", + timeout=10, + ) + response.raise_for_status() + props = response.json() + + # Context size from server runtime config + default_settings = props.get("default_generation_settings", {}) + n_ctx = default_settings.get("n_ctx") + if n_ctx: + self._context_size = int(n_ctx) + + # Modalities (vision, audio) + modalities = props.get("modalities", {}) + self._supports_vision = modalities.get("vision", False) + self._supports_audio = modalities.get("audio", False) + + # Tool support from chat template capabilities + chat_caps = props.get("chat_template_caps", {}) + self._supports_tools = chat_caps.get("supports_tools", False) + + logger.info( + "llama.cpp model '%s' initialized — context: %s, vision: %s, audio: %s, tools: %s", + configured_model, + self._context_size or "unknown", + self._supports_vision, + self._supports_audio, + self._supports_tools, + ) + except Exception as e: + logger.warning( + "Failed to query llama.cpp /props endpoint: %s. " + "Using defaults for context size and capabilities.", + e, + ) + + return base_url + def _send( self, prompt: str, @@ -75,7 +179,7 @@ class LlamaCppClient(GenAIClient): content.append( { "type": "image_url", - "image_url": { + "image_url": { # type: ignore[dict-item] "url": f"data:image/jpeg;base64,{encoded_image}", }, } @@ -111,15 +215,57 @@ class LlamaCppClient(GenAIClient): ): choice = result["choices"][0] if "message" in choice and "content" in choice["message"]: - return choice["message"]["content"].strip() + return str(choice["message"]["content"].strip()) return None except Exception as e: logger.warning("llama.cpp returned an error: %s", str(e)) return None + @property + def supports_vision(self) -> bool: + """Whether the loaded model supports vision/image input.""" + return self._supports_vision + + @property + def supports_audio(self) -> bool: + """Whether the loaded model supports audio input.""" + return self._supports_audio + + @property + def supports_tools(self) -> bool: + """Whether the loaded model supports tool/function calling.""" + return self._supports_tools + + def list_models(self) -> list[str]: + """Return available model IDs from the llama.cpp server.""" + if self.provider is None: + return [] + try: + response = requests.get(f"{self.provider}/v1/models", timeout=10) + response.raise_for_status() + models = [] + for m in response.json().get("data", []): + models.append(m.get("id", "unknown")) + for alias in m.get("aliases", []): + models.append(alias) + return sorted(models) + except Exception as e: + logger.warning("Failed to list llama.cpp models: %s", e) + return [] + def get_context_size(self) -> int: - """Get the context window size for llama.cpp.""" - return int(self.provider_options.get("context_size", 4096)) + """Get the context window size for llama.cpp. + + Resolution order: + 1. provider_options["context_size"] (user override) + 2. Value queried from llama.cpp server at init + 3. Default fallback of 4096 + """ + if "context_size" in self.provider_options: + return int(self.provider_options["context_size"]) + if self._context_size is not None: + return self._context_size + return 4096 def _build_payload( self, @@ -229,7 +375,7 @@ class LlamaCppClient(GenAIClient): content.append( { "prompt_string": "<__media__>\n", - "multimodal_data": [encoded], + "multimodal_data": [encoded], # type: ignore[dict-item] } ) @@ -367,7 +513,7 @@ class LlamaCppClient(GenAIClient): messages: list[dict[str, Any]], tools: Optional[list[dict[str, Any]]] = None, tool_choice: Optional[str] = "auto", - ): + ) -> AsyncGenerator[tuple[str, Any], None]: """Stream chat with tools via OpenAI-compatible streaming API.""" if self.provider is None: logger.warning( diff --git a/frigate/genai/manager.py b/frigate/genai/manager.py index 01daa35e0..94719f429 100644 --- a/frigate/genai/manager.py +++ b/frigate/genai/manager.py @@ -1,15 +1,15 @@ """GenAI client manager for Frigate. -Manages GenAI provider clients from Frigate config. Configuration is read only -in _update_config(); no other code should read config.genai. Exposes clients -by role: tool_client, vision_client, embeddings_client. +Manages GenAI provider clients from Frigate config. Clients are created lazily +on first access so that providers whose roles are never used (e.g. chat when +no chat feature is active) are never initialized. """ import logging from typing import TYPE_CHECKING, Optional from frigate.config import FrigateConfig -from frigate.config.camera.genai import GenAIRoleEnum +from frigate.config.camera.genai import GenAIConfig, GenAIRoleEnum if TYPE_CHECKING: from frigate.genai import GenAIClient @@ -21,68 +21,98 @@ class GenAIClientManager: """Manages GenAI provider clients from Frigate config.""" def __init__(self, config: FrigateConfig) -> None: - self._tool_client: Optional[GenAIClient] = None - self._vision_client: Optional[GenAIClient] = None - self._embeddings_client: Optional[GenAIClient] = None + self._configs: dict[str, GenAIConfig] = {} + self._role_map: dict[GenAIRoleEnum, str] = {} + self._clients: dict[str, "GenAIClient"] = {} self.update_config(config) def update_config(self, config: FrigateConfig) -> None: - """Build role clients from current Frigate config.genai. + """Store provider configs and build the role→name mapping. Called from __init__ and can be called again when config is reloaded. - Each role (tools, vision, embeddings) gets the client for the provider - that has that role in its roles list. + Clients are not created here; they are instantiated lazily on first + access via a role property or list_models(). """ from frigate.genai import PROVIDERS, load_providers - self._tool_client = None - self._vision_client = None - self._embeddings_client = None + self._configs = {} + self._role_map = {} + self._clients = {} if not config.genai: return load_providers() - for _name, genai_cfg in config.genai.items(): + for name, genai_cfg in config.genai.items(): if not genai_cfg.provider: continue - provider_cls = PROVIDERS.get(genai_cfg.provider) - if not provider_cls: + if genai_cfg.provider not in PROVIDERS: logger.warning( "Unknown GenAI provider %s in config, skipping.", genai_cfg.provider, ) continue - try: - client = provider_cls(genai_cfg) - except Exception as e: - logger.exception( - "Failed to create GenAI client for provider %s: %s", - genai_cfg.provider, - e, - ) - continue + + self._configs[name] = genai_cfg for role in genai_cfg.roles: - if role == GenAIRoleEnum.tools: - self._tool_client = client - elif role == GenAIRoleEnum.vision: - self._vision_client = client - elif role == GenAIRoleEnum.embeddings: - self._embeddings_client = client + self._role_map[role] = name + + def _get_client(self, name: str) -> "Optional[GenAIClient]": + """Return the client for *name*, creating it on first access.""" + if name in self._clients: + return self._clients[name] + + from frigate.genai import PROVIDERS + + genai_cfg = self._configs.get(name) + if not genai_cfg: + return None + + if not genai_cfg.provider: + return None + + provider_cls = PROVIDERS.get(genai_cfg.provider) + if not provider_cls: + return None + + try: + client: "GenAIClient" = provider_cls(genai_cfg) + except Exception as e: + logger.exception( + "Failed to create GenAI client for provider %s: %s", + genai_cfg.provider, + e, + ) + return None + + self._clients[name] = client + return client @property - def tool_client(self) -> "Optional[GenAIClient]": - """Client configured for the tools role (e.g. chat with function calling).""" - return self._tool_client + def chat_client(self) -> "Optional[GenAIClient]": + """Client configured for the chat role (e.g. chat with function calling).""" + name = self._role_map.get(GenAIRoleEnum.chat) + return self._get_client(name) if name else None @property - def vision_client(self) -> "Optional[GenAIClient]": - """Client configured for the vision role (e.g. review descriptions, object descriptions).""" - return self._vision_client + def description_client(self) -> "Optional[GenAIClient]": + """Client configured for the descriptions role (e.g. review descriptions, object descriptions).""" + name = self._role_map.get(GenAIRoleEnum.descriptions) + return self._get_client(name) if name else None @property def embeddings_client(self) -> "Optional[GenAIClient]": """Client configured for the embeddings role.""" - return self._embeddings_client + name = self._role_map.get(GenAIRoleEnum.embeddings) + return self._get_client(name) if name else None + + def list_models(self) -> dict[str, list[str]]: + """Return available models keyed by config entry name.""" + result: dict[str, list[str]] = {} + for name in self._configs: + client = self._get_client(name) + if client: + result[name] = client.list_models() + return result diff --git a/frigate/genai/ollama.py b/frigate/genai/ollama.py index 0bfb95000..7315b6e39 100644 --- a/frigate/genai/ollama.py +++ b/frigate/genai/ollama.py @@ -2,7 +2,7 @@ import json import logging -from typing import Any, Optional +from typing import Any, AsyncGenerator, Optional from httpx import RemoteProtocolError, TimeoutException from ollama import AsyncClient as OllamaAsyncClient @@ -28,10 +28,10 @@ class OllamaClient(GenAIClient): }, } - provider: ApiClient + provider: ApiClient | None provider_options: dict[str, Any] - def _init_provider(self): + def _init_provider(self) -> ApiClient | None: """Initialize the client.""" self.provider_options = { **self.LOCAL_OPTIMIZED_OPTIONS, @@ -73,7 +73,7 @@ class OllamaClient(GenAIClient): "exclusiveMinimum", "exclusiveMaximum", } - result = {} + result: dict[str, Any] = {} for key, value in schema.items(): if not _is_properties and key in STRIP_KEYS: continue @@ -122,7 +122,7 @@ class OllamaClient(GenAIClient): logger.debug( f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}" ) - return result["response"].strip() + return str(result["response"]).strip() except ( TimeoutException, ResponseError, @@ -132,6 +132,19 @@ class OllamaClient(GenAIClient): logger.warning("Ollama returned an error: %s", str(e)) return None + def list_models(self) -> list[str]: + """Return available model names from the Ollama server.""" + if self.provider is None: + return [] + try: + response = self.provider.list() + return sorted( + m.get("name", m.get("model", "")) for m in response.get("models", []) + ) + except Exception as e: + logger.warning("Failed to list Ollama models: %s", e) + return [] + def get_context_size(self) -> int: """Get the context window size for Ollama.""" return int( @@ -263,7 +276,7 @@ class OllamaClient(GenAIClient): messages: list[dict[str, Any]], tools: Optional[list[dict[str, Any]]] = None, tool_choice: Optional[str] = "auto", - ): + ) -> AsyncGenerator[tuple[str, Any], None]: """Stream chat with tools; yields content deltas then final message. When tools are provided, Ollama streaming does not include tool_calls diff --git a/frigate/genai/openai.py b/frigate/genai/openai.py index 7d8700579..88108e730 100644 --- a/frigate/genai/openai.py +++ b/frigate/genai/openai.py @@ -3,7 +3,7 @@ import base64 import json import logging -from typing import Any, Optional +from typing import Any, AsyncGenerator, Optional from httpx import TimeoutException from openai import OpenAI @@ -21,7 +21,7 @@ class OpenAIClient(GenAIClient): provider: OpenAI context_size: Optional[int] = None - def _init_provider(self): + def _init_provider(self) -> OpenAI: """Initialize the client.""" # Extract context_size from provider_options as it's not a valid OpenAI client parameter # It will be used in get_context_size() instead @@ -44,7 +44,12 @@ class OpenAIClient(GenAIClient): ) -> Optional[str]: """Submit a request to OpenAI.""" encoded_images = [base64.b64encode(image).decode("utf-8") for image in images] - messages_content = [] + messages_content: list[dict] = [ + { + "type": "text", + "text": prompt, + } + ] for image in encoded_images: messages_content.append( { @@ -55,12 +60,6 @@ class OpenAIClient(GenAIClient): }, } ) - messages_content.append( - { - "type": "text", - "text": prompt, - } - ) try: request_params = { "model": self.genai_config.model, @@ -81,12 +80,20 @@ class OpenAIClient(GenAIClient): and hasattr(result, "choices") and len(result.choices) > 0 ): - return result.choices[0].message.content.strip() + return str(result.choices[0].message.content.strip()) return None except (TimeoutException, Exception) as e: logger.warning("OpenAI returned an error: %s", str(e)) return None + def list_models(self) -> list[str]: + """Return available model IDs from the OpenAI-compatible API.""" + try: + return sorted(m.id for m in self.provider.models.list().data) + except Exception as e: + logger.warning("Failed to list OpenAI models: %s", e) + return [] + def get_context_size(self) -> int: """Get the context window size for OpenAI.""" if self.context_size is not None: @@ -171,7 +178,7 @@ class OpenAIClient(GenAIClient): } request_params.update(provider_opts) - result = self.provider.chat.completions.create(**request_params) + result = self.provider.chat.completions.create(**request_params) # type: ignore[call-overload] if ( result is None @@ -245,7 +252,7 @@ class OpenAIClient(GenAIClient): messages: list[dict[str, Any]], tools: Optional[list[dict[str, Any]]] = None, tool_choice: Optional[str] = "auto", - ): + ) -> AsyncGenerator[tuple[str, Any], None]: """ Stream chat with tools; yields content deltas then final message. @@ -287,7 +294,7 @@ class OpenAIClient(GenAIClient): tool_calls_by_index: dict[int, dict[str, Any]] = {} finish_reason = "stop" - stream = self.provider.chat.completions.create(**request_params) + stream = self.provider.chat.completions.create(**request_params) # type: ignore[call-overload] for chunk in stream: if not chunk or not chunk.choices: diff --git a/frigate/jobs/media_sync.py b/frigate/jobs/media_sync.py index 803a80a9d..4a3fdc355 100644 --- a/frigate/jobs/media_sync.py +++ b/frigate/jobs/media_sync.py @@ -5,7 +5,7 @@ import os import threading from dataclasses import dataclass, field from datetime import datetime -from typing import Optional +from typing import Optional, cast from frigate.comms.inter_process import InterProcessRequestor from frigate.const import CONFIG_DIR, UPDATE_JOB_STATE @@ -122,7 +122,7 @@ def start_media_sync_job( if job_is_running("media_sync"): current = get_current_job("media_sync") logger.warning( - f"Media sync job {current.id} is already running. Rejecting new request." + f"Media sync job {current.id if current else 'unknown'} is already running. Rejecting new request." ) return None @@ -146,9 +146,9 @@ def start_media_sync_job( def get_current_media_sync_job() -> Optional[MediaSyncJob]: """Get the current running/queued media sync job, if any.""" - return get_current_job("media_sync") + return cast(Optional[MediaSyncJob], get_current_job("media_sync")) def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]: """Get media sync job by ID. Currently only tracks the current job.""" - return get_job_by_id("media_sync", job_id) + return cast(Optional[MediaSyncJob], get_job_by_id("media_sync", job_id)) diff --git a/frigate/jobs/motion_search.py b/frigate/jobs/motion_search.py index d7c8f8fbc..1a90f0bb9 100644 --- a/frigate/jobs/motion_search.py +++ b/frigate/jobs/motion_search.py @@ -6,7 +6,7 @@ import threading from concurrent.futures import Future, ThreadPoolExecutor, as_completed from dataclasses import asdict, dataclass, field from datetime import datetime -from typing import Any, Optional +from typing import Any, Optional, cast import cv2 import numpy as np @@ -96,7 +96,7 @@ def create_polygon_mask( dtype=np.int32, ) mask = np.zeros((frame_height, frame_width), dtype=np.uint8) - cv2.fillPoly(mask, [motion_points], 255) + cv2.fillPoly(mask, [motion_points], (255,)) return mask @@ -116,7 +116,7 @@ def compute_roi_bbox_normalized( def heatmap_overlaps_roi( - heatmap: dict[str, int], roi_bbox: tuple[float, float, float, float] + heatmap: object, roi_bbox: tuple[float, float, float, float] ) -> bool: """Check if a sparse motion heatmap has any overlap with the ROI bounding box. @@ -155,9 +155,9 @@ def segment_passes_activity_gate(recording: Recordings) -> bool: Returns True if any of motion, objects, or regions is non-zero/non-null. Returns True if all are null (old segments without data). """ - motion = recording.motion - objects = recording.objects - regions = recording.regions + motion: Any = recording.motion + objects: Any = recording.objects + regions: Any = recording.regions # Old segments without metadata - pass through (conservative) if motion is None and objects is None and regions is None: @@ -278,6 +278,9 @@ class MotionSearchRunner(threading.Thread): frame_width = camera_config.detect.width frame_height = camera_config.detect.height + if frame_width is None or frame_height is None: + raise ValueError(f"Camera {camera_name} detect dimensions not configured") + # Create polygon mask polygon_mask = create_polygon_mask( self.job.polygon_points, frame_width, frame_height @@ -415,11 +418,13 @@ class MotionSearchRunner(threading.Thread): if self._should_stop(): break + rec_start: float = recording.start_time # type: ignore[assignment] + rec_end: float = recording.end_time # type: ignore[assignment] future = executor.submit( self._process_recording_for_motion, - recording.path, - recording.start_time, - recording.end_time, + str(recording.path), + rec_start, + rec_end, self.job.start_time_range, self.job.end_time_range, polygon_mask, @@ -524,10 +529,12 @@ class MotionSearchRunner(threading.Thread): break try: + rec_start: float = recording.start_time # type: ignore[assignment] + rec_end: float = recording.end_time # type: ignore[assignment] results, frames = self._process_recording_for_motion( - recording.path, - recording.start_time, - recording.end_time, + str(recording.path), + rec_start, + rec_end, self.job.start_time_range, self.job.end_time_range, polygon_mask, @@ -672,7 +679,9 @@ class MotionSearchRunner(threading.Thread): # Handle frame dimension changes if gray.shape != polygon_mask.shape: resized_mask = cv2.resize( - polygon_mask, (gray.shape[1], gray.shape[0]), cv2.INTER_NEAREST + polygon_mask, + (gray.shape[1], gray.shape[0]), + interpolation=cv2.INTER_NEAREST, ) current_bbox = cv2.boundingRect(resized_mask) else: @@ -698,7 +707,7 @@ class MotionSearchRunner(threading.Thread): ) if prev_frame_gray is not None: - diff = cv2.absdiff(prev_frame_gray, masked_gray) + diff = cv2.absdiff(prev_frame_gray, masked_gray) # type: ignore[unreachable] diff_blurred = cv2.GaussianBlur(diff, (3, 3), 0) _, thresh = cv2.threshold( diff_blurred, threshold, 255, cv2.THRESH_BINARY @@ -825,7 +834,7 @@ def get_motion_search_job(job_id: str) -> Optional[MotionSearchJob]: if job_entry: return job_entry[0] # Check completed jobs via manager - return get_job_by_id("motion_search", job_id) + return cast(Optional[MotionSearchJob], get_job_by_id("motion_search", job_id)) def cancel_motion_search_job(job_id: str) -> bool: diff --git a/frigate/jobs/vlm_watch.py b/frigate/jobs/vlm_watch.py index dae5e5f41..cd64325d0 100644 --- a/frigate/jobs/vlm_watch.py +++ b/frigate/jobs/vlm_watch.py @@ -25,6 +25,9 @@ logger = logging.getLogger(__name__) _MIN_INTERVAL = 1 _MAX_INTERVAL = 300 +# Minimum seconds between VLM iterations when woken by detections (no zone filter) +_DETECTION_COOLDOWN_WITHOUT_ZONE = 10 + # Max user/assistant turn pairs to keep in conversation history _MAX_HISTORY = 10 @@ -40,6 +43,7 @@ class VLMWatchJob(Job): labels: list = field(default_factory=list) zones: list = field(default_factory=list) last_reasoning: str = "" + notification_message: str = "" iteration_count: int = 0 def to_dict(self) -> dict[str, Any]: @@ -54,9 +58,9 @@ class VLMWatchRunner(threading.Thread): job: VLMWatchJob, config: FrigateConfig, cancel_event: threading.Event, - frame_processor, - genai_manager, - dispatcher, + frame_processor: Any, + genai_manager: Any, + dispatcher: Any, ) -> None: super().__init__(daemon=True, name=f"vlm_watch_{job.id}") self.job = job @@ -117,11 +121,12 @@ class VLMWatchRunner(threading.Thread): def _run_iteration(self) -> float: """Run one VLM analysis iteration. Returns seconds until next run.""" - vision_client = ( - self.genai_manager.vision_client or self.genai_manager.tool_client - ) - if vision_client is None: - logger.warning("VLM watch job %s: no vision client available", self.job.id) + chat_client = self.genai_manager.chat_client + if chat_client is None or not chat_client.supports_vision: + logger.warning( + "VLM watch job %s: no chat client with vision support available", + self.job.id, + ) return 30 frame = self.frame_processor.get_current_frame(self.job.camera, {}) @@ -159,7 +164,7 @@ class VLMWatchRunner(threading.Thread): } ) - response = vision_client.chat_with_tools( + response = chat_client.chat_with_tools( messages=self.conversation, tools=None, tool_choice=None, @@ -196,6 +201,7 @@ class VLMWatchRunner(threading.Thread): min(_MAX_INTERVAL, int(parsed.get("next_run_in", 30))), ) reasoning = str(parsed.get("reasoning", "")) + notification_message = str(parsed.get("notification_message", "")) except (json.JSONDecodeError, ValueError, TypeError) as e: logger.warning( "VLM watch job %s: failed to parse VLM response: %s", self.job.id, e @@ -203,6 +209,7 @@ class VLMWatchRunner(threading.Thread): return 30 self.job.last_reasoning = reasoning + self.job.notification_message = notification_message self.job.iteration_count += 1 self._broadcast_status() @@ -213,22 +220,41 @@ class VLMWatchRunner(threading.Thread): self.job.camera, reasoning, ) - self._send_notification(reasoning) + self._send_notification(notification_message or reasoning) self.job.status = JobStatusTypesEnum.success return 0 return next_run_in def _wait_for_trigger(self, max_wait: float) -> None: - """Wait up to max_wait seconds, returning early if a relevant detection fires on the target camera.""" - deadline = time.time() + max_wait + """Wait up to max_wait seconds, returning early if a relevant detection fires on the target camera. + + With zones configured, a matching detection wakes immediately (events + are already filtered). Without zones, detections are frequent so a + cooldown is enforced: messages are continuously drained to prevent + queue backup, but the loop only exits once a match has been seen + *and* the cooldown period has elapsed. + """ + now = time.time() + deadline = now + max_wait + use_cooldown = not self.job.zones + earliest_wake = now + _DETECTION_COOLDOWN_WITHOUT_ZONE if use_cooldown else 0 + triggered = False + while not self.cancel_event.is_set(): remaining = deadline - time.time() if remaining <= 0: break - topic, payload = self.detection_subscriber.check_for_update( + + if triggered and time.time() >= earliest_wake: + break + + result = self.detection_subscriber.check_for_update( timeout=min(1.0, remaining) ) + if result is None: + continue + topic, payload = result if topic is None or payload is None: continue # payload = (camera, frame_name, frame_time, tracked_objects, motion_boxes, regions) @@ -247,12 +273,22 @@ class VLMWatchRunner(threading.Thread): if cam != self.job.camera or not tracked_objects: continue if self._detection_matches_filters(tracked_objects): - logger.debug( - "VLM watch job %s: woken early by detection event on %s", - self.job.id, - self.job.camera, - ) - break + if not use_cooldown: + logger.debug( + "VLM watch job %s: woken early by detection event on %s", + self.job.id, + self.job.camera, + ) + break + + if not triggered: + logger.debug( + "VLM watch job %s: detection match on %s, draining for %.0fs", + self.job.id, + self.job.camera, + max(0, earliest_wake - time.time()), + ) + triggered = True def _detection_matches_filters(self, tracked_objects: list) -> bool: """Return True if any tracked object passes the label and zone filters.""" @@ -281,7 +317,11 @@ class VLMWatchRunner(threading.Thread): f"You will receive a sequence of frames over time. Use the conversation history to understand " f"what is stationary vs. actively changing.\n\n" f"For each frame respond with JSON only:\n" - f'{{"condition_met": , "next_run_in": , "reasoning": ""}}\n\n' + f'{{"condition_met": , "next_run_in": , "reasoning": "", "notification_message": ""}}\n\n' + f"Guidelines for notification_message:\n" + f"- Only required when condition_met is true.\n" + f"- Write a short, natural notification a user would want to receive on their phone.\n" + f'- Example: "Your package has been delivered to the front porch."\n\n' f"Guidelines for next_run_in:\n" f"- Scene is empty / nothing of interest visible: 60-300.\n" f"- Relevant object(s) visible anywhere in frame (even outside the target zone): 3-10. " @@ -291,12 +331,13 @@ class VLMWatchRunner(threading.Thread): f"- Keep reasoning to 1-2 sentences." ) - def _send_notification(self, reasoning: str) -> None: + def _send_notification(self, message: str) -> None: """Publish a camera_monitoring event so downstream handlers (web push, MQTT) can notify users.""" payload = { "camera": self.job.camera, "condition": self.job.condition, - "reasoning": reasoning, + "message": message, + "reasoning": self.job.last_reasoning, "job_id": self.job.id, } @@ -328,9 +369,9 @@ def start_vlm_watch_job( condition: str, max_duration_minutes: int, config: FrigateConfig, - frame_processor, - genai_manager, - dispatcher, + frame_processor: Any, + genai_manager: Any, + dispatcher: Any, labels: list[str] | None = None, zones: list[str] | None = None, ) -> str: diff --git a/frigate/motion/__init__.py b/frigate/motion/__init__.py index 1f6785d5d..58f781f46 100644 --- a/frigate/motion/__init__.py +++ b/frigate/motion/__init__.py @@ -13,10 +13,10 @@ class MotionDetector(ABC): frame_shape: Tuple[int, int, int], config: MotionConfig, fps: int, - improve_contrast, - threshold, - contour_area, - ): + improve_contrast: bool, + threshold: int, + contour_area: int | None, + ) -> None: pass @abstractmethod @@ -25,7 +25,7 @@ class MotionDetector(ABC): pass @abstractmethod - def is_calibrating(self): + def is_calibrating(self) -> bool: """Return if motion is recalibrating.""" pass @@ -35,6 +35,6 @@ class MotionDetector(ABC): pass @abstractmethod - def stop(self): + def stop(self) -> None: """Stop any ongoing work and processes.""" pass diff --git a/frigate/motion/frigate_motion.py b/frigate/motion/frigate_motion.py index d49b0e861..8a067e1da 100644 --- a/frigate/motion/frigate_motion.py +++ b/frigate/motion/frigate_motion.py @@ -1,7 +1,9 @@ +from typing import Any + import cv2 import numpy as np -from frigate.config import MotionConfig +from frigate.config.config import RuntimeMotionConfig from frigate.motion import MotionDetector from frigate.util.image import grab_cv2_contours @@ -9,19 +11,20 @@ from frigate.util.image import grab_cv2_contours class FrigateMotionDetector(MotionDetector): def __init__( self, - frame_shape, - config: MotionConfig, + frame_shape: tuple[int, ...], + config: RuntimeMotionConfig, fps: int, - improve_contrast, - threshold, - contour_area, - ): + improve_contrast: Any, + threshold: Any, + contour_area: Any, + ) -> None: self.config = config self.frame_shape = frame_shape - self.resize_factor = frame_shape[0] / config.frame_height + frame_height = config.frame_height or frame_shape[0] + self.resize_factor = frame_shape[0] / frame_height self.motion_frame_size = ( - config.frame_height, - config.frame_height * frame_shape[1] // frame_shape[0], + frame_height, + frame_height * frame_shape[1] // frame_shape[0], ) self.avg_frame = np.zeros(self.motion_frame_size, np.float32) self.avg_delta = np.zeros(self.motion_frame_size, np.float32) @@ -38,10 +41,10 @@ class FrigateMotionDetector(MotionDetector): self.threshold = threshold self.contour_area = contour_area - def is_calibrating(self): + def is_calibrating(self) -> bool: return False - def detect(self, frame): + def detect(self, frame: np.ndarray) -> list: motion_boxes = [] gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] @@ -99,7 +102,7 @@ class FrigateMotionDetector(MotionDetector): # dilate the thresholded image to fill in holes, then find contours # on thresholded image - thresh_dilated = cv2.dilate(thresh, None, iterations=2) + thresh_dilated = cv2.dilate(thresh, None, iterations=2) # type: ignore[call-overload] contours = cv2.findContours( thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index b821e9532..6694dafff 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -1,11 +1,12 @@ import logging +from typing import Optional import cv2 import numpy as np from scipy.ndimage import gaussian_filter from frigate.camera import PTZMetrics -from frigate.config import MotionConfig +from frigate.config.config import RuntimeMotionConfig from frigate.motion import MotionDetector from frigate.util.image import grab_cv2_contours @@ -15,22 +16,23 @@ logger = logging.getLogger(__name__) class ImprovedMotionDetector(MotionDetector): def __init__( self, - frame_shape, - config: MotionConfig, + frame_shape: tuple[int, ...], + config: RuntimeMotionConfig, fps: int, - ptz_metrics: PTZMetrics = None, - name="improved", - blur_radius=1, - interpolation=cv2.INTER_NEAREST, - contrast_frame_history=50, - ): + ptz_metrics: Optional[PTZMetrics] = None, + name: str = "improved", + blur_radius: int = 1, + interpolation: int = cv2.INTER_NEAREST, + contrast_frame_history: int = 50, + ) -> None: self.name = name self.config = config self.frame_shape = frame_shape - self.resize_factor = frame_shape[0] / config.frame_height + frame_height = config.frame_height or frame_shape[0] + self.resize_factor = frame_shape[0] / frame_height self.motion_frame_size = ( - config.frame_height, - config.frame_height * frame_shape[1] // frame_shape[0], + frame_height, + frame_height * frame_shape[1] // frame_shape[0], ) self.avg_frame = np.zeros(self.motion_frame_size, np.float32) self.motion_frame_count = 0 @@ -44,20 +46,20 @@ class ImprovedMotionDetector(MotionDetector): self.contrast_values[:, 1:2] = 255 self.contrast_values_index = 0 self.ptz_metrics = ptz_metrics - self.last_stop_time = None + self.last_stop_time: float | None = None - def is_calibrating(self): + def is_calibrating(self) -> bool: return self.calibrating - def detect(self, frame): - motion_boxes = [] + def detect(self, frame: np.ndarray) -> list[tuple[int, int, int, int]]: + motion_boxes: list[tuple[int, int, int, int]] = [] if not self.config.enabled: return motion_boxes # if ptz motor is moving from autotracking, quickly return # a single box that is 80% of the frame - if ( + if self.ptz_metrics is not None and ( self.ptz_metrics.autotracker_enabled.value and not self.ptz_metrics.motor_stopped.is_set() ): @@ -130,19 +132,19 @@ class ImprovedMotionDetector(MotionDetector): # dilate the thresholded image to fill in holes, then find contours # on thresholded image - thresh_dilated = cv2.dilate(thresh, None, iterations=1) + thresh_dilated = cv2.dilate(thresh, None, iterations=1) # type: ignore[call-overload] contours = cv2.findContours( thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) contours = grab_cv2_contours(contours) # loop over the contours - total_contour_area = 0 + total_contour_area: float = 0 for c in contours: # if the contour is big enough, count it as motion contour_area = cv2.contourArea(c) total_contour_area += contour_area - if contour_area > self.config.contour_area: + if contour_area > (self.config.contour_area or 0): x, y, w, h = cv2.boundingRect(c) motion_boxes.append( ( @@ -159,7 +161,7 @@ class ImprovedMotionDetector(MotionDetector): # check if the motor has just stopped from autotracking # if so, reassign the average to the current frame so we begin with a new baseline - if ( + if self.ptz_metrics is not None and ( # ensure we only do this for cameras with autotracking enabled self.ptz_metrics.autotracker_enabled.value and self.ptz_metrics.motor_stopped.is_set() diff --git a/frigate/mypy.ini b/frigate/mypy.ini index 5bad10f49..3c643236f 100644 --- a/frigate/mypy.ini +++ b/frigate/mypy.ini @@ -22,50 +22,43 @@ warn_unreachable = true no_implicit_reexport = true [mypy-frigate.*] +ignore_errors = false + +# Third-party code imported from https://github.com/ufal/whisper_streaming +[mypy-frigate.data_processing.real_time.whisper_online] ignore_errors = true -[mypy-frigate.__main__] -ignore_errors = false -disallow_untyped_calls = false +# TODO: Remove ignores for these modules as they are updated with type annotations. -[mypy-frigate.app] -ignore_errors = false -disallow_untyped_calls = false +[mypy-frigate.api.*] +ignore_errors = true -[mypy-frigate.const] -ignore_errors = false +[mypy-frigate.config.*] +ignore_errors = true -[mypy-frigate.comms.*] -ignore_errors = false +[mypy-frigate.debug_replay] +ignore_errors = true -[mypy-frigate.events] -ignore_errors = false +[mypy-frigate.detectors.*] +ignore_errors = true -[mypy-frigate.log] -ignore_errors = false +[mypy-frigate.embeddings.*] +ignore_errors = true -[mypy-frigate.models] -ignore_errors = false +[mypy-frigate.http] +ignore_errors = true -[mypy-frigate.plus] -ignore_errors = false +[mypy-frigate.ptz.*] +ignore_errors = true -[mypy-frigate.stats] -ignore_errors = false +[mypy-frigate.stats.*] +ignore_errors = true -[mypy-frigate.track.*] -ignore_errors = false +[mypy-frigate.test.*] +ignore_errors = true -[mypy-frigate.types] -ignore_errors = false +[mypy-frigate.util.*] +ignore_errors = true -[mypy-frigate.version] -ignore_errors = false - -[mypy-frigate.watchdog] -ignore_errors = false -disallow_untyped_calls = false - - -[mypy-frigate.service_manager.*] -ignore_errors = false +[mypy-frigate.video.*] +ignore_errors = true diff --git a/frigate/object_detection/base.py b/frigate/object_detection/base.py index d2a54afbc..a62fe4843 100644 --- a/frigate/object_detection/base.py +++ b/frigate/object_detection/base.py @@ -7,6 +7,7 @@ from abc import ABC, abstractmethod from collections import deque from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent +from typing import Any, Optional import numpy as np import zmq @@ -34,26 +35,25 @@ logger = logging.getLogger(__name__) class ObjectDetector(ABC): @abstractmethod - def detect(self, tensor_input, threshold: float = 0.4): + def detect(self, tensor_input: np.ndarray, threshold: float = 0.4) -> list: pass class BaseLocalDetector(ObjectDetector): def __init__( self, - detector_config: BaseDetectorConfig = None, - labels: str = None, - stop_event: MpEvent = None, - ): + detector_config: Optional[BaseDetectorConfig] = None, + labels: Optional[str] = None, + stop_event: Optional[MpEvent] = None, + ) -> None: self.fps = EventsPerSecond() if labels is None: - self.labels = {} + self.labels: dict[int, str] = {} else: self.labels = load_labels(labels) - if detector_config: + if detector_config and detector_config.model: self.input_transform = tensor_transform(detector_config.model.input_tensor) - self.dtype = detector_config.model.input_dtype else: self.input_transform = None @@ -77,10 +77,10 @@ class BaseLocalDetector(ObjectDetector): return tensor_input - def detect(self, tensor_input: np.ndarray, threshold=0.4): + def detect(self, tensor_input: np.ndarray, threshold: float = 0.4) -> list: detections = [] - raw_detections = self.detect_raw(tensor_input) + raw_detections = self.detect_raw(tensor_input) # type: ignore[attr-defined] for d in raw_detections: if int(d[0]) < 0 or int(d[0]) >= len(self.labels): @@ -96,28 +96,28 @@ class BaseLocalDetector(ObjectDetector): class LocalObjectDetector(BaseLocalDetector): - def detect_raw(self, tensor_input: np.ndarray): + def detect_raw(self, tensor_input: np.ndarray) -> np.ndarray: tensor_input = self._transform_input(tensor_input) - return self.detect_api.detect_raw(tensor_input=tensor_input) + return self.detect_api.detect_raw(tensor_input=tensor_input) # type: ignore[no-any-return] class AsyncLocalObjectDetector(BaseLocalDetector): - def async_send_input(self, tensor_input: np.ndarray, connection_id: str): + def async_send_input(self, tensor_input: np.ndarray, connection_id: str) -> None: tensor_input = self._transform_input(tensor_input) - return self.detect_api.send_input(connection_id, tensor_input) + self.detect_api.send_input(connection_id, tensor_input) - def async_receive_output(self): + def async_receive_output(self) -> Any: return self.detect_api.receive_output() class DetectorRunner(FrigateProcess): def __init__( self, - name, + name: str, detection_queue: Queue, cameras: list[str], - avg_speed: Value, - start_time: Value, + avg_speed: Any, + start_time: Any, config: FrigateConfig, detector_config: BaseDetectorConfig, stop_event: MpEvent, @@ -129,11 +129,11 @@ class DetectorRunner(FrigateProcess): self.start_time = start_time self.config = config self.detector_config = detector_config - self.outputs: dict = {} + self.outputs: dict[str, Any] = {} - def create_output_shm(self, name: str): + def create_output_shm(self, name: str) -> None: out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False) - out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) + out_np: np.ndarray = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) self.outputs[name] = {"shm": out_shm, "np": out_np} def run(self) -> None: @@ -155,8 +155,8 @@ class DetectorRunner(FrigateProcess): connection_id, ( 1, - self.detector_config.model.height, - self.detector_config.model.width, + self.detector_config.model.height, # type: ignore[union-attr] + self.detector_config.model.width, # type: ignore[union-attr] 3, ), ) @@ -187,11 +187,11 @@ class DetectorRunner(FrigateProcess): class AsyncDetectorRunner(FrigateProcess): def __init__( self, - name, + name: str, detection_queue: Queue, cameras: list[str], - avg_speed: Value, - start_time: Value, + avg_speed: Any, + start_time: Any, config: FrigateConfig, detector_config: BaseDetectorConfig, stop_event: MpEvent, @@ -203,15 +203,15 @@ class AsyncDetectorRunner(FrigateProcess): self.start_time = start_time self.config = config self.detector_config = detector_config - self.outputs: dict = {} + self.outputs: dict[str, Any] = {} self._frame_manager: SharedMemoryFrameManager | None = None self._publisher: ObjectDetectorPublisher | None = None self._detector: AsyncLocalObjectDetector | None = None - self.send_times = deque() + self.send_times: deque[float] = deque() - def create_output_shm(self, name: str): + def create_output_shm(self, name: str) -> None: out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False) - out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) + out_np: np.ndarray = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) self.outputs[name] = {"shm": out_shm, "np": out_np} def _detect_worker(self) -> None: @@ -222,12 +222,13 @@ class AsyncDetectorRunner(FrigateProcess): except queue.Empty: continue + assert self._frame_manager is not None input_frame = self._frame_manager.get( connection_id, ( 1, - self.detector_config.model.height, - self.detector_config.model.width, + self.detector_config.model.height, # type: ignore[union-attr] + self.detector_config.model.width, # type: ignore[union-attr] 3, ), ) @@ -238,11 +239,13 @@ class AsyncDetectorRunner(FrigateProcess): # mark start time and send to accelerator self.send_times.append(time.perf_counter()) + assert self._detector is not None self._detector.async_send_input(input_frame, connection_id) def _result_worker(self) -> None: logger.info("Starting Result Worker Thread") while not self.stop_event.is_set(): + assert self._detector is not None connection_id, detections = self._detector.async_receive_output() # Handle timeout case (queue.Empty) - just continue @@ -256,6 +259,7 @@ class AsyncDetectorRunner(FrigateProcess): duration = time.perf_counter() - ts # release input buffer + assert self._frame_manager is not None self._frame_manager.close(connection_id) if connection_id not in self.outputs: @@ -264,6 +268,7 @@ class AsyncDetectorRunner(FrigateProcess): # write results and publish if detections is not None: self.outputs[connection_id]["np"][:] = detections[:] + assert self._publisher is not None self._publisher.publish(connection_id) # update timers @@ -330,11 +335,14 @@ class ObjectDetectProcess: self.stop_event = stop_event self.start_or_restart() - def stop(self): + def stop(self) -> None: # if the process has already exited on its own, just return if self.detect_process and self.detect_process.exitcode: return + if self.detect_process is None: + return + logging.info("Waiting for detection process to exit gracefully...") self.detect_process.join(timeout=30) if self.detect_process.exitcode is None: @@ -343,8 +351,8 @@ class ObjectDetectProcess: self.detect_process.join() logging.info("Detection process has exited...") - def start_or_restart(self): - self.detection_start.value = 0.0 + def start_or_restart(self) -> None: + self.detection_start.value = 0.0 # type: ignore[attr-defined] if (self.detect_process is not None) and self.detect_process.is_alive(): self.stop() @@ -389,17 +397,19 @@ class RemoteObjectDetector: self.detection_queue = detection_queue self.stop_event = stop_event self.shm = UntrackedSharedMemory(name=self.name, create=False) - self.np_shm = np.ndarray( + self.np_shm: np.ndarray = np.ndarray( (1, model_config.height, model_config.width, 3), dtype=np.uint8, buffer=self.shm.buf, ) self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False) - self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) + self.out_np_shm: np.ndarray = np.ndarray( + (20, 6), dtype=np.float32, buffer=self.out_shm.buf + ) self.detector_subscriber = ObjectDetectorSubscriber(name) - def detect(self, tensor_input, threshold=0.4): - detections = [] + def detect(self, tensor_input: np.ndarray, threshold: float = 0.4) -> list: + detections: list = [] if self.stop_event.is_set(): return detections @@ -431,7 +441,7 @@ class RemoteObjectDetector: self.fps.update() return detections - def cleanup(self): + def cleanup(self) -> None: self.detector_subscriber.stop() self.shm.unlink() self.out_shm.unlink() diff --git a/frigate/object_detection/util.py b/frigate/object_detection/util.py index ea8bd4226..4e351d66a 100644 --- a/frigate/object_detection/util.py +++ b/frigate/object_detection/util.py @@ -13,10 +13,10 @@ class RequestStore: A thread-safe hash-based response store that handles creating requests. """ - def __init__(self): + def __init__(self) -> None: self.request_counter = 0 self.request_counter_lock = threading.Lock() - self.input_queue = queue.Queue() + self.input_queue: queue.Queue[tuple[int, ndarray]] = queue.Queue() def __get_request_id(self) -> int: with self.request_counter_lock: @@ -45,17 +45,19 @@ class ResponseStore: their request's result appears. """ - def __init__(self): - self.responses = {} # Maps request_id -> (original_input, infer_results) + def __init__(self) -> None: + self.responses: dict[ + int, ndarray + ] = {} # Maps request_id -> (original_input, infer_results) self.lock = threading.Lock() self.cond = threading.Condition(self.lock) - def put(self, request_id: int, response: ndarray): + def put(self, request_id: int, response: ndarray) -> None: with self.cond: self.responses[request_id] = response self.cond.notify_all() - def get(self, request_id: int, timeout=None) -> ndarray: + def get(self, request_id: int, timeout: float | None = None) -> ndarray: with self.cond: if not self.cond.wait_for( lambda: request_id in self.responses, timeout=timeout @@ -65,7 +67,9 @@ class ResponseStore: return self.responses.pop(request_id) -def tensor_transform(desired_shape: InputTensorEnum): +def tensor_transform( + desired_shape: InputTensorEnum, +) -> tuple[int, int, int, int] | None: # Currently this function only supports BHWC permutations if desired_shape == InputTensorEnum.nhwc: return None diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index 5d80de33c..8b0fea6d7 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -4,13 +4,13 @@ import datetime import glob import logging import math -import multiprocessing as mp import os import queue import subprocess as sp import threading import time import traceback +from multiprocessing.synchronize import Event as MpEvent from typing import Any, Optional import cv2 @@ -74,25 +74,25 @@ class Canvas: self, canvas_width: int, canvas_height: int, - scaling_factor: int, + scaling_factor: float, ) -> None: self.scaling_factor = scaling_factor gcd = math.gcd(canvas_width, canvas_height) self.aspect = get_standard_aspect_ratio( - (canvas_width / gcd), (canvas_height / gcd) + int(canvas_width / gcd), int(canvas_height / gcd) ) self.width = canvas_width - self.height = (self.width * self.aspect[1]) / self.aspect[0] - self.coefficient_cache: dict[int, int] = {} + self.height: float = (self.width * self.aspect[1]) / self.aspect[0] + self.coefficient_cache: dict[int, float] = {} self.aspect_cache: dict[str, tuple[int, int]] = {} - def get_aspect(self, coefficient: int) -> tuple[int, int]: + def get_aspect(self, coefficient: float) -> tuple[float, float]: return (self.aspect[0] * coefficient, self.aspect[1] * coefficient) - def get_coefficient(self, camera_count: int) -> int: + def get_coefficient(self, camera_count: int) -> float: return self.coefficient_cache.get(camera_count, self.scaling_factor) - def set_coefficient(self, camera_count: int, coefficient: int) -> None: + def set_coefficient(self, camera_count: int, coefficient: float) -> None: self.coefficient_cache[camera_count] = coefficient def get_camera_aspect( @@ -105,7 +105,7 @@ class Canvas: gcd = math.gcd(camera_width, camera_height) camera_aspect = get_standard_aspect_ratio( - camera_width / gcd, camera_height / gcd + int(camera_width / gcd), int(camera_height / gcd) ) self.aspect_cache[cam_name] = camera_aspect return camera_aspect @@ -116,7 +116,7 @@ class FFMpegConverter(threading.Thread): self, ffmpeg: FfmpegConfig, input_queue: queue.Queue, - stop_event: mp.Event, + stop_event: MpEvent, in_width: int, in_height: int, out_width: int, @@ -128,7 +128,7 @@ class FFMpegConverter(threading.Thread): self.camera = "birdseye" self.input_queue = input_queue self.stop_event = stop_event - self.bd_pipe = None + self.bd_pipe: int | None = None if birdseye_rtsp: self.recreate_birdseye_pipe() @@ -181,7 +181,8 @@ class FFMpegConverter(threading.Thread): os.close(stdin) self.reading_birdseye = False - def __write(self, b) -> None: + def __write(self, b: bytes) -> None: + assert self.process.stdin is not None self.process.stdin.write(b) if self.bd_pipe: @@ -200,13 +201,13 @@ class FFMpegConverter(threading.Thread): return - def read(self, length): + def read(self, length: int) -> Any: try: - return self.process.stdout.read1(length) + return self.process.stdout.read1(length) # type: ignore[union-attr] except ValueError: return False - def exit(self): + def exit(self) -> None: if self.bd_pipe: os.close(self.bd_pipe) @@ -233,8 +234,8 @@ class BroadcastThread(threading.Thread): self, camera: str, converter: FFMpegConverter, - websocket_server, - stop_event: mp.Event, + websocket_server: Any, + stop_event: MpEvent, ): super().__init__() self.camera = camera @@ -242,7 +243,7 @@ class BroadcastThread(threading.Thread): self.websocket_server = websocket_server self.stop_event = stop_event - def run(self): + def run(self) -> None: while not self.stop_event.is_set(): buf = self.converter.read(65536) if buf: @@ -270,16 +271,16 @@ class BirdsEyeFrameManager: def __init__( self, config: FrigateConfig, - stop_event: mp.Event, + stop_event: MpEvent, ): self.config = config width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height) self.frame_shape = (height, width) self.yuv_shape = (height * 3 // 2, width) - self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) + self.frame: np.ndarray = np.ndarray(self.yuv_shape, dtype=np.uint8) self.canvas = Canvas(width, height, config.birdseye.layout.scaling_factor) self.stop_event = stop_event - self.last_refresh_time = 0 + self.last_refresh_time: float = 0 # initialize the frame as black and with the Frigate logo self.blank_frame = np.zeros(self.yuv_shape, np.uint8) @@ -323,15 +324,15 @@ class BirdsEyeFrameManager: self.frame[:] = self.blank_frame - self.cameras = {} + self.cameras: dict[str, Any] = {} for camera in self.config.cameras.keys(): self.add_camera(camera) - self.camera_layout = [] - self.active_cameras = set() + self.camera_layout: list[Any] = [] + self.active_cameras: set[str] = set() self.last_output_time = 0.0 - def add_camera(self, cam: str): + def add_camera(self, cam: str) -> None: """Add a camera to self.cameras with the correct structure.""" settings = self.config.cameras[cam] # precalculate the coordinates for all the channels @@ -361,16 +362,21 @@ class BirdsEyeFrameManager: }, } - def remove_camera(self, cam: str): + def remove_camera(self, cam: str) -> None: """Remove a camera from self.cameras.""" if cam in self.cameras: del self.cameras[cam] - def clear_frame(self): + def clear_frame(self) -> None: logger.debug("Clearing the birdseye frame") self.frame[:] = self.blank_frame - def copy_to_position(self, position, camera=None, frame: np.ndarray = None): + def copy_to_position( + self, + position: Any, + camera: Optional[str] = None, + frame: Optional[np.ndarray] = None, + ) -> None: if camera is None: frame = None channel_dims = None @@ -389,7 +395,9 @@ class BirdsEyeFrameManager: channel_dims, ) - def camera_active(self, mode, object_box_count, motion_box_count): + def camera_active( + self, mode: Any, object_box_count: int, motion_box_count: int + ) -> bool: if mode == BirdseyeModeEnum.continuous: return True @@ -399,6 +407,8 @@ class BirdsEyeFrameManager: if mode == BirdseyeModeEnum.objects and object_box_count > 0: return True + return False + def get_camera_coordinates(self) -> dict[str, dict[str, int]]: """Return the coordinates of each camera in the current layout.""" coordinates = {} @@ -451,7 +461,7 @@ class BirdsEyeFrameManager: - self.cameras[active_camera]["last_active_frame"] ), ) - active_cameras = limited_active_cameras[:max_cameras] + active_cameras = set(limited_active_cameras[:max_cameras]) max_camera_refresh = True self.last_refresh_time = now @@ -510,7 +520,7 @@ class BirdsEyeFrameManager: # center camera view in canvas and ensure that it fits if scaled_width < self.canvas.width: - coefficient = 1 + coefficient: float = 1 x_offset = int((self.canvas.width - scaled_width) / 2) else: coefficient = self.canvas.width / scaled_width @@ -557,7 +567,7 @@ class BirdsEyeFrameManager: calculating = False self.canvas.set_coefficient(len(active_cameras), coefficient) - self.camera_layout = layout_candidate + self.camera_layout = layout_candidate or [] frame_changed = True # Draw the layout @@ -577,10 +587,12 @@ class BirdsEyeFrameManager: self, cameras_to_add: list[str], coefficient: float, - ) -> tuple[Any]: + ) -> Optional[list[list[Any]]]: """Calculate the optimal layout for 2+ cameras.""" - def map_layout(camera_layout: list[list[Any]], row_height: int): + def map_layout( + camera_layout: list[list[Any]], row_height: int + ) -> tuple[int, int, Optional[list[list[Any]]]]: """Map the calculated layout.""" candidate_layout = [] starting_x = 0 @@ -777,11 +789,11 @@ class Birdseye: def __init__( self, config: FrigateConfig, - stop_event: mp.Event, - websocket_server, + stop_event: MpEvent, + websocket_server: Any, ) -> None: self.config = config - self.input = queue.Queue(maxsize=10) + self.input: queue.Queue[bytes] = queue.Queue(maxsize=10) self.converter = FFMpegConverter( config.ffmpeg, self.input, @@ -806,7 +818,7 @@ class Birdseye: ) if config.birdseye.restream: - self.birdseye_buffer = self.frame_manager.create( + self.birdseye_buffer: Any = self.frame_manager.create( "birdseye", self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1], ) diff --git a/frigate/output/camera.py b/frigate/output/camera.py index 2311ec659..917e38dd1 100644 --- a/frigate/output/camera.py +++ b/frigate/output/camera.py @@ -1,10 +1,11 @@ """Handle outputting individual cameras via jsmpeg.""" import logging -import multiprocessing as mp import queue import subprocess as sp import threading +from multiprocessing.synchronize import Event as MpEvent +from typing import Any from frigate.config import CameraConfig, FfmpegConfig @@ -17,7 +18,7 @@ class FFMpegConverter(threading.Thread): camera: str, ffmpeg: FfmpegConfig, input_queue: queue.Queue, - stop_event: mp.Event, + stop_event: MpEvent, in_width: int, in_height: int, out_width: int, @@ -64,16 +65,17 @@ class FFMpegConverter(threading.Thread): start_new_session=True, ) - def __write(self, b) -> None: + def __write(self, b: bytes) -> None: + assert self.process.stdin is not None self.process.stdin.write(b) - def read(self, length): + def read(self, length: int) -> Any: try: - return self.process.stdout.read1(length) + return self.process.stdout.read1(length) # type: ignore[union-attr] except ValueError: return False - def exit(self): + def exit(self) -> None: self.process.terminate() try: @@ -98,8 +100,8 @@ class BroadcastThread(threading.Thread): self, camera: str, converter: FFMpegConverter, - websocket_server, - stop_event: mp.Event, + websocket_server: Any, + stop_event: MpEvent, ): super().__init__() self.camera = camera @@ -107,7 +109,7 @@ class BroadcastThread(threading.Thread): self.websocket_server = websocket_server self.stop_event = stop_event - def run(self): + def run(self) -> None: while not self.stop_event.is_set(): buf = self.converter.read(65536) if buf: @@ -133,15 +135,15 @@ class BroadcastThread(threading.Thread): class JsmpegCamera: def __init__( - self, config: CameraConfig, stop_event: mp.Event, websocket_server + self, config: CameraConfig, stop_event: MpEvent, websocket_server: Any ) -> None: self.config = config - self.input = queue.Queue(maxsize=config.detect.fps) + self.input: queue.Queue[bytes] = queue.Queue(maxsize=config.detect.fps) width = int( config.live.height * (config.frame_shape[1] / config.frame_shape[0]) ) self.converter = FFMpegConverter( - config.name, + config.name or "", config.ffmpeg, self.input, stop_event, @@ -152,13 +154,13 @@ class JsmpegCamera: config.live.quality, ) self.broadcaster = BroadcastThread( - config.name, self.converter, websocket_server, stop_event + config.name or "", self.converter, websocket_server, stop_event ) self.converter.start() self.broadcaster.start() - def write_frame(self, frame_bytes) -> None: + def write_frame(self, frame_bytes: bytes) -> None: try: self.input.put_nowait(frame_bytes) except queue.Full: diff --git a/frigate/output/output.py b/frigate/output/output.py index 83962e1c9..22bcbb31f 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -61,6 +61,12 @@ def check_disabled_camera_update( # last camera update was more than 1 second ago # need to send empty data to birdseye because current # frame is now out of date + cam_width = config.cameras[camera].detect.width + cam_height = config.cameras[camera].detect.height + + if cam_width is None or cam_height is None: + raise ValueError(f"Camera {camera} detect dimensions not configured") + if birdseye and offline_time < 10: # we only need to send blank frames to birdseye at the beginning of a camera being offline birdseye.write_data( @@ -68,10 +74,7 @@ def check_disabled_camera_update( [], [], now, - get_blank_yuv_frame( - config.cameras[camera].detect.width, - config.cameras[camera].detect.height, - ), + get_blank_yuv_frame(cam_width, cam_height), ) if not has_enabled_camera and birdseye: @@ -173,7 +176,7 @@ class OutputProcess(FrigateProcess): birdseye_config_subscriber.check_for_update() ) - if update_topic is not None: + if update_topic is not None and birdseye_config is not None: previous_global_mode = self.config.birdseye.mode self.config.birdseye = birdseye_config @@ -198,7 +201,10 @@ class OutputProcess(FrigateProcess): birdseye, ) - (topic, data) = detection_subscriber.check_for_update(timeout=1) + _result = detection_subscriber.check_for_update(timeout=1) + if _result is None: + continue + (topic, data) = _result now = datetime.datetime.now().timestamp() if now - last_disabled_cam_check > 5: @@ -208,7 +214,7 @@ class OutputProcess(FrigateProcess): self.config, birdseye, preview_recorders, preview_write_times ) - if not topic: + if not topic or data is None: continue ( @@ -262,11 +268,15 @@ class OutputProcess(FrigateProcess): jsmpeg_cameras[camera].write_frame(frame.tobytes()) # send output data to birdseye if websocket is connected or restreaming - if self.config.birdseye.enabled and ( - self.config.birdseye.restream - or any( - ws.environ["PATH_INFO"].endswith("birdseye") - for ws in websocket_server.manager + if ( + self.config.birdseye.enabled + and birdseye is not None + and ( + self.config.birdseye.restream + or any( + ws.environ["PATH_INFO"].endswith("birdseye") + for ws in websocket_server.manager + ) ) ): birdseye.write_data( @@ -282,9 +292,12 @@ class OutputProcess(FrigateProcess): move_preview_frames("clips") while True: - (topic, data) = detection_subscriber.check_for_update(timeout=0) + _cleanup_result = detection_subscriber.check_for_update(timeout=0) + if _cleanup_result is None: + break + (topic, data) = _cleanup_result - if not topic: + if not topic or data is None: break ( @@ -322,7 +335,7 @@ class OutputProcess(FrigateProcess): logger.info("exiting output process...") -def move_preview_frames(loc: str): +def move_preview_frames(loc: str) -> None: preview_holdover = os.path.join(CLIPS_DIR, "preview_restart_cache") preview_cache = os.path.join(CACHE_DIR, "preview_frames") diff --git a/frigate/output/preview.py b/frigate/output/preview.py index 2c439038a..389a3c207 100644 --- a/frigate/output/preview.py +++ b/frigate/output/preview.py @@ -22,7 +22,6 @@ from frigate.ffmpeg_presets import ( parse_preset_hardware_acceleration_encode, ) from frigate.models import Previews -from frigate.track.object_processing import TrackedObject from frigate.util.image import copy_yuv_to_position, get_blank_yuv_frame, get_yuv_crop logger = logging.getLogger(__name__) @@ -66,7 +65,9 @@ def get_cache_image_name(camera: str, frame_time: float) -> str: ) -def get_most_recent_preview_frame(camera: str, before: float = None) -> str | None: +def get_most_recent_preview_frame( + camera: str, before: float | None = None +) -> str | None: """Get the most recent preview frame for a camera.""" if not os.path.exists(PREVIEW_CACHE_DIR): return None @@ -147,12 +148,12 @@ class FFMpegConverter(threading.Thread): if t_idx == item_count - 1: # last frame does not get a duration playlist.append( - f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'" + f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'" # type: ignore[arg-type] ) continue playlist.append( - f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'" + f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'" # type: ignore[arg-type] ) playlist.append( f"duration {self.frame_times[t_idx + 1] - self.frame_times[t_idx]}" @@ -199,30 +200,33 @@ class FFMpegConverter(threading.Thread): # unlink files from cache # don't delete last frame as it will be used as first frame in next segment for t in self.frame_times[0:-1]: - Path(get_cache_image_name(self.config.name, t)).unlink(missing_ok=True) + Path(get_cache_image_name(self.config.name, t)).unlink(missing_ok=True) # type: ignore[arg-type] class PreviewRecorder: def __init__(self, config: CameraConfig) -> None: self.config = config - self.start_time = 0 - self.last_output_time = 0 + self.camera_name: str = config.name or "" + self.start_time: float = 0 + self.last_output_time: float = 0 self.offline = False - self.output_frames = [] + self.output_frames: list[float] = [] - if config.detect.width > config.detect.height: + if config.detect.width is None or config.detect.height is None: + raise ValueError("Detect width and height must be set for previews.") + + self.detect_width: int = config.detect.width + self.detect_height: int = config.detect.height + + if self.detect_width > self.detect_height: self.out_height = PREVIEW_HEIGHT self.out_width = ( - int((config.detect.width / config.detect.height) * self.out_height) - // 4 - * 4 + int((self.detect_width / self.detect_height) * self.out_height) // 4 * 4 ) else: self.out_width = PREVIEW_HEIGHT self.out_height = ( - int((config.detect.height / config.detect.width) * self.out_width) - // 4 - * 4 + int((self.detect_height / self.detect_width) * self.out_width) // 4 * 4 ) # create communication for finished previews @@ -245,10 +249,9 @@ class PreviewRecorder: "v2": v2, } - # end segment at end of hour + # end segment at end of hour (use UTC to avoid DST issues) self.segment_end = ( - (datetime.datetime.now() + datetime.timedelta(hours=1)) - .astimezone(datetime.timezone.utc) + (datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=1)) .replace(minute=0, second=0, microsecond=0) .timestamp() ) @@ -260,8 +263,7 @@ class PreviewRecorder: # check for existing items in cache start_ts = ( - datetime.datetime.now() - .astimezone(datetime.timezone.utc) + datetime.datetime.now(datetime.timezone.utc) .replace(minute=0, second=0, microsecond=0) .timestamp() ) @@ -295,14 +297,16 @@ class PreviewRecorder: def reset_frame_cache(self, frame_time: float) -> None: self.segment_end = ( - (datetime.datetime.now() + datetime.timedelta(hours=1)) - .astimezone(datetime.timezone.utc) + ( + datetime.datetime.fromtimestamp(frame_time, tz=datetime.timezone.utc) + + datetime.timedelta(hours=1) + ) .replace(minute=0, second=0, microsecond=0) .timestamp() ) self.start_time = frame_time self.last_output_time = frame_time - self.output_frames: list[float] = [] + self.output_frames = [] def should_write_frame( self, @@ -342,7 +346,9 @@ class PreviewRecorder: def write_frame_to_cache(self, frame_time: float, frame: np.ndarray) -> None: # resize yuv frame - small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8) + small_frame: np.ndarray = np.zeros( + (self.out_height * 3 // 2, self.out_width), np.uint8 + ) copy_yuv_to_position( small_frame, (0, 0), @@ -356,7 +362,7 @@ class PreviewRecorder: cv2.COLOR_YUV2BGR_I420, ) cv2.imwrite( - get_cache_image_name(self.config.name, frame_time), + get_cache_image_name(self.camera_name, frame_time), small_frame, [ int(cv2.IMWRITE_WEBP_QUALITY), @@ -396,7 +402,7 @@ class PreviewRecorder: ).start() else: logger.debug( - f"Not saving preview for {self.config.name} because there are no saved frames." + f"Not saving preview for {self.camera_name} because there are no saved frames." ) self.reset_frame_cache(frame_time) @@ -416,9 +422,7 @@ class PreviewRecorder: if not self.offline: self.write_frame_to_cache( frame_time, - get_blank_yuv_frame( - self.config.detect.width, self.config.detect.height - ), + get_blank_yuv_frame(self.detect_width, self.detect_height), ) self.offline = True @@ -431,9 +435,9 @@ class PreviewRecorder: return old_frame_path = get_cache_image_name( - self.config.name, self.output_frames[-1] + self.camera_name, self.output_frames[-1] ) - new_frame_path = get_cache_image_name(self.config.name, frame_time) + new_frame_path = get_cache_image_name(self.camera_name, frame_time) shutil.copy(old_frame_path, new_frame_path) # save last frame to ensure consistent duration @@ -447,13 +451,12 @@ class PreviewRecorder: self.reset_frame_cache(frame_time) def stop(self) -> None: - self.config_subscriber.stop() self.requestor.stop() def get_active_objects( - frame_time: float, camera_config: CameraConfig, all_objects: list[TrackedObject] -) -> list[TrackedObject]: + frame_time: float, camera_config: CameraConfig, all_objects: list[dict[str, Any]] +) -> list[dict[str, Any]]: """get active objects for detection.""" return [ o diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index 488dbd278..79b771cb2 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -15,6 +15,10 @@ from zeep.exceptions import Fault, TransportError from frigate.camera import PTZMetrics from frigate.config import FrigateConfig, ZoomingModeEnum +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) from frigate.util.builtin import find_by_key logger = logging.getLogger(__name__) @@ -65,7 +69,14 @@ class OnvifController: self.camera_configs[cam_name] = cam self.status_locks[cam_name] = asyncio.Lock() + self.config_subscriber = CameraConfigUpdateSubscriber( + self.config, + self.config.cameras, + [CameraConfigUpdateEnum.onvif], + ) + asyncio.run_coroutine_threadsafe(self._init_cameras(), self.loop) + asyncio.run_coroutine_threadsafe(self._poll_config_updates(), self.loop) def _run_event_loop(self) -> None: """Run the event loop in a separate thread.""" @@ -80,6 +91,52 @@ class OnvifController: for cam_name in self.camera_configs: await self._init_single_camera(cam_name) + async def _poll_config_updates(self) -> None: + """Poll for ONVIF config updates and re-initialize cameras as needed.""" + while True: + await asyncio.sleep(1) + try: + updates = self.config_subscriber.check_for_updates() + for update_type, cameras in updates.items(): + if update_type == CameraConfigUpdateEnum.onvif.name: + for cam_name in cameras: + await self._reinit_camera(cam_name) + except Exception: + logger.error("Error checking for ONVIF config updates") + + async def _close_camera(self, cam_name: str) -> None: + """Close the ONVIF client session for a camera.""" + cam_state = self.cams.get(cam_name) + if cam_state and "onvif" in cam_state: + try: + await cam_state["onvif"].close() + except Exception: + logger.debug(f"Error closing ONVIF session for {cam_name}") + + async def _reinit_camera(self, cam_name: str) -> None: + """Re-initialize a camera after config change.""" + logger.info(f"Re-initializing ONVIF for {cam_name} due to config change") + + # close existing session before re-init + await self._close_camera(cam_name) + + cam = self.config.cameras.get(cam_name) + if not cam or not cam.onvif.host: + # ONVIF removed from config, clean up + self.cams.pop(cam_name, None) + self.camera_configs.pop(cam_name, None) + self.failed_cams.pop(cam_name, None) + return + + # update stored config and reset state + self.camera_configs[cam_name] = cam + if cam_name not in self.status_locks: + self.status_locks[cam_name] = asyncio.Lock() + self.cams.pop(cam_name, None) + self.failed_cams.pop(cam_name, None) + + await self._init_single_camera(cam_name) + async def _init_single_camera(self, cam_name: str) -> bool: """Initialize a single camera by name. @@ -118,6 +175,7 @@ class OnvifController: "active": False, "features": [], "presets": {}, + "profiles": [], } return True except (Fault, ONVIFError, TransportError, Exception) as e: @@ -161,22 +219,60 @@ class OnvifController: ) return False + # build list of valid PTZ profiles + valid_profiles = [ + p + for p in profiles + if p.VideoEncoderConfiguration + and p.PTZConfiguration + and ( + p.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace is not None + or p.PTZConfiguration.DefaultContinuousZoomVelocitySpace is not None + ) + ] + + # store available profiles for API response and log for debugging + self.cams[camera_name]["profiles"] = [ + {"name": getattr(p, "Name", None) or p.token, "token": p.token} + for p in valid_profiles + ] + for p in valid_profiles: + logger.debug( + "Onvif profile for %s: name='%s', token='%s'", + camera_name, + getattr(p, "Name", None), + p.token, + ) + + configured_profile = self.config.cameras[camera_name].onvif.profile profile = None - for _, onvif_profile in enumerate(profiles): - if ( - onvif_profile.VideoEncoderConfiguration - and onvif_profile.PTZConfiguration - and ( - onvif_profile.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace - is not None - or onvif_profile.PTZConfiguration.DefaultContinuousZoomVelocitySpace - is not None + + if configured_profile is not None: + # match by exact token first, then by name + for p in valid_profiles: + if p.token == configured_profile: + profile = p + break + if profile is None: + for p in valid_profiles: + if getattr(p, "Name", None) == configured_profile: + profile = p + break + if profile is None: + available = [ + f"name='{getattr(p, 'Name', None)}', token='{p.token}'" + for p in valid_profiles + ] + logger.error( + "Onvif profile '%s' not found for camera %s. Available profiles: %s", + configured_profile, + camera_name, + available, ) - ): - # use the first profile that has a valid ptz configuration - profile = onvif_profile - logger.debug(f"Selected Onvif profile for {camera_name}: {profile}") - break + return False + else: + # use the first profile that has a valid ptz configuration + profile = valid_profiles[0] if valid_profiles else None if profile is None: logger.error( @@ -184,6 +280,8 @@ class OnvifController: ) return False + logger.debug(f"Selected Onvif profile for {camera_name}: {profile}") + # get the PTZ config for the profile try: configs = profile.PTZConfiguration @@ -218,48 +316,92 @@ class OnvifController: move_request.ProfileToken = profile.token self.cams[camera_name]["move_request"] = move_request - # extra setup for autotracking cameras - if ( - self.config.cameras[camera_name].onvif.autotracking.enabled_in_config - and self.config.cameras[camera_name].onvif.autotracking.enabled - ): + # get PTZ configuration options for feature detection and relative movement + ptz_config = None + fov_space_id = None + + try: request = ptz.create_type("GetConfigurationOptions") request.ConfigurationToken = profile.PTZConfiguration.token ptz_config = await ptz.GetConfigurationOptions(request) - logger.debug(f"Onvif config for {camera_name}: {ptz_config}") + logger.debug( + f"Onvif PTZ configuration options for {camera_name}: {ptz_config}" + ) + except (Fault, ONVIFError, TransportError, Exception) as e: + logger.debug( + f"Unable to get PTZ configuration options for {camera_name}: {e}" + ) + + # detect FOV translation space for relative movement + if ptz_config is not None: + try: + fov_space_id = next( + ( + i + for i, space in enumerate( + ptz_config.Spaces.RelativePanTiltTranslationSpace + ) + if "TranslationSpaceFov" in space["URI"] + ), + None, + ) + except (AttributeError, TypeError): + fov_space_id = None + + autotracking_config = self.config.cameras[camera_name].onvif.autotracking + autotracking_enabled = ( + autotracking_config.enabled_in_config and autotracking_config.enabled + ) + + # autotracking-only: status request and service capabilities + if autotracking_enabled: + status_request = ptz.create_type("GetStatus") + status_request.ProfileToken = profile.token + self.cams[camera_name]["status_request"] = status_request service_capabilities_request = ptz.create_type("GetServiceCapabilities") self.cams[camera_name]["service_capabilities_request"] = ( service_capabilities_request ) - fov_space_id = next( - ( - i - for i, space in enumerate( - ptz_config.Spaces.RelativePanTiltTranslationSpace - ) - if "TranslationSpaceFov" in space["URI"] - ), - None, - ) - - # status request for autotracking and filling ptz-parameters - status_request = ptz.create_type("GetStatus") - status_request.ProfileToken = profile.token - self.cams[camera_name]["status_request"] = status_request + # setup relative move request when FOV relative movement is supported + if ( + fov_space_id is not None + and configs.DefaultRelativePanTiltTranslationSpace is not None + ): + # one-off GetStatus to seed Translation field + status = None try: - status = await ptz.GetStatus(status_request) - logger.debug(f"Onvif status config for {camera_name}: {status}") + one_off_status_request = ptz.create_type("GetStatus") + one_off_status_request.ProfileToken = profile.token + status = await ptz.GetStatus(one_off_status_request) + logger.debug(f"Onvif status for {camera_name}: {status}") except Exception as e: - logger.warning(f"Unable to get status from camera: {camera_name}: {e}") - status = None + logger.warning(f"Unable to get status from camera {camera_name}: {e}") - # autotracking relative panning/tilting needs a relative zoom value set to 0 - # if camera supports relative movement + rel_move_request = ptz.create_type("RelativeMove") + rel_move_request.ProfileToken = profile.token + logger.debug(f"{camera_name}: Relative move request: {rel_move_request}") + + fov_uri = ptz_config["Spaces"]["RelativePanTiltTranslationSpace"][ + fov_space_id + ]["URI"] + + if rel_move_request.Translation is None: + if status is not None: + # seed from current position + rel_move_request.Translation = status.Position + rel_move_request.Translation.PanTilt.space = fov_uri + else: + # fallback: construct Translation explicitly + rel_move_request.Translation = { + "PanTilt": {"x": 0, "y": 0, "space": fov_uri} + } + + # configure zoom on relative move request if ( - self.config.cameras[camera_name].onvif.autotracking.zooming - != ZoomingModeEnum.disabled + autotracking_enabled + and autotracking_config.zooming != ZoomingModeEnum.disabled ): zoom_space_id = next( ( @@ -271,60 +413,43 @@ class OnvifController: ), None, ) - - # setup relative moving request for autotracking - move_request = ptz.create_type("RelativeMove") - move_request.ProfileToken = profile.token - logger.debug(f"{camera_name}: Relative move request: {move_request}") - if move_request.Translation is None and fov_space_id is not None: - move_request.Translation = status.Position - move_request.Translation.PanTilt.space = ptz_config["Spaces"][ - "RelativePanTiltTranslationSpace" - ][fov_space_id]["URI"] - - # try setting relative zoom translation space - try: - if ( - self.config.cameras[camera_name].onvif.autotracking.zooming - != ZoomingModeEnum.disabled - ): + try: if zoom_space_id is not None: - move_request.Translation.Zoom.space = ptz_config["Spaces"][ + rel_move_request.Translation.Zoom.space = ptz_config["Spaces"][ "RelativeZoomTranslationSpace" ][zoom_space_id]["URI"] - else: - if ( - move_request["Translation"] is not None - and "Zoom" in move_request["Translation"] - ): - del move_request["Translation"]["Zoom"] - if ( - move_request["Speed"] is not None - and "Zoom" in move_request["Speed"] - ): - del move_request["Speed"]["Zoom"] - logger.debug( - f"{camera_name}: Relative move request after deleting zoom: {move_request}" + except Exception as e: + autotracking_config.zooming = ZoomingModeEnum.disabled + logger.warning( + f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported. Exception: {e}" ) - except Exception as e: - self.config.cameras[ - camera_name - ].onvif.autotracking.zooming = ZoomingModeEnum.disabled - logger.warning( - f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported. Exception: {e}" + else: + # remove zoom fields from relative move request + if ( + rel_move_request["Translation"] is not None + and "Zoom" in rel_move_request["Translation"] + ): + del rel_move_request["Translation"]["Zoom"] + if ( + rel_move_request["Speed"] is not None + and "Zoom" in rel_move_request["Speed"] + ): + del rel_move_request["Speed"]["Zoom"] + logger.debug( + f"{camera_name}: Relative move request after deleting zoom: {rel_move_request}" ) - if move_request.Speed is None: - move_request.Speed = configs.DefaultPTZSpeed if configs else None + if rel_move_request.Speed is None: + rel_move_request.Speed = configs.DefaultPTZSpeed if configs else None logger.debug( - f"{camera_name}: Relative move request after setup: {move_request}" + f"{camera_name}: Relative move request after setup: {rel_move_request}" ) - self.cams[camera_name]["relative_move_request"] = move_request + self.cams[camera_name]["relative_move_request"] = rel_move_request - # setup absolute moving request for autotracking zooming - move_request = ptz.create_type("AbsoluteMove") - move_request.ProfileToken = profile.token - self.cams[camera_name]["absolute_move_request"] = move_request + # setup absolute move request + abs_move_request = ptz.create_type("AbsoluteMove") + abs_move_request.ProfileToken = profile.token + self.cams[camera_name]["absolute_move_request"] = abs_move_request # setup existing presets try: @@ -358,48 +483,48 @@ class OnvifController: if configs.DefaultRelativeZoomTranslationSpace: supported_features.append("zoom-r") - if ( - self.config.cameras[camera_name].onvif.autotracking.enabled_in_config - and self.config.cameras[camera_name].onvif.autotracking.enabled - ): + if ptz_config is not None: try: - # get camera's zoom limits from onvif config self.cams[camera_name]["relative_zoom_range"] = ( ptz_config.Spaces.RelativeZoomTranslationSpace[0] ) except Exception as e: - if ( - self.config.cameras[camera_name].onvif.autotracking.zooming - == ZoomingModeEnum.relative - ): - self.config.cameras[ - camera_name - ].onvif.autotracking.zooming = ZoomingModeEnum.disabled + if autotracking_config.zooming == ZoomingModeEnum.relative: + autotracking_config.zooming = ZoomingModeEnum.disabled logger.warning( f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported. Exception: {e}" ) if configs.DefaultAbsoluteZoomPositionSpace: supported_features.append("zoom-a") - if ( - self.config.cameras[camera_name].onvif.autotracking.enabled_in_config - and self.config.cameras[camera_name].onvif.autotracking.enabled - ): + if ptz_config is not None: try: - # get camera's zoom limits from onvif config self.cams[camera_name]["absolute_zoom_range"] = ( ptz_config.Spaces.AbsoluteZoomPositionSpace[0] ) self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits except Exception as e: - if self.config.cameras[camera_name].onvif.autotracking.zooming: - self.config.cameras[ - camera_name - ].onvif.autotracking.zooming = ZoomingModeEnum.disabled + if autotracking_config.zooming != ZoomingModeEnum.disabled: + autotracking_config.zooming = ZoomingModeEnum.disabled logger.warning( f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}" ) + # disable autotracking zoom if required ranges are unavailable + if autotracking_config.zooming != ZoomingModeEnum.disabled: + if autotracking_config.zooming == ZoomingModeEnum.relative: + if "relative_zoom_range" not in self.cams[camera_name]: + autotracking_config.zooming = ZoomingModeEnum.disabled + logger.warning( + f"Disabling autotracking zooming for {camera_name}: Relative zoom range unavailable" + ) + if autotracking_config.zooming == ZoomingModeEnum.absolute: + if "absolute_zoom_range" not in self.cams[camera_name]: + autotracking_config.zooming = ZoomingModeEnum.disabled + logger.warning( + f"Disabling autotracking zooming for {camera_name}: Absolute zoom range unavailable" + ) + if ( self.cams[camera_name]["video_source_token"] is not None and imaging is not None @@ -416,10 +541,9 @@ class OnvifController: except (Fault, ONVIFError, TransportError, Exception) as e: logger.debug(f"Focus not supported for {camera_name}: {e}") + # detect FOV relative movement support if ( - self.config.cameras[camera_name].onvif.autotracking.enabled_in_config - and self.config.cameras[camera_name].onvif.autotracking.enabled - and fov_space_id is not None + fov_space_id is not None and configs.DefaultRelativePanTiltTranslationSpace is not None ): supported_features.append("pt-r-fov") @@ -548,11 +672,8 @@ class OnvifController: move_request.Translation.PanTilt.x = pan move_request.Translation.PanTilt.y = tilt - if ( - "zoom-r" in self.cams[camera_name]["features"] - and self.config.cameras[camera_name].onvif.autotracking.zooming - == ZoomingModeEnum.relative - ): + # include zoom if requested and camera supports relative zoom + if zoom != 0 and "zoom-r" in self.cams[camera_name]["features"]: move_request.Speed = { "PanTilt": { "x": speed, @@ -560,7 +681,7 @@ class OnvifController: }, "Zoom": {"x": speed}, } - move_request.Translation.Zoom.x = zoom + move_request["Translation"]["Zoom"] = {"x": zoom} await self.cams[camera_name]["ptz"].RelativeMove(move_request) @@ -568,12 +689,8 @@ class OnvifController: move_request.Translation.PanTilt.x = 0 move_request.Translation.PanTilt.y = 0 - if ( - "zoom-r" in self.cams[camera_name]["features"] - and self.config.cameras[camera_name].onvif.autotracking.zooming - == ZoomingModeEnum.relative - ): - move_request.Translation.Zoom.x = 0 + if zoom != 0 and "zoom-r" in self.cams[camera_name]["features"]: + del move_request["Translation"]["Zoom"] self.cams[camera_name]["active"] = False @@ -717,8 +834,18 @@ class OnvifController: elif command == OnvifCommandEnum.preset: await self._move_to_preset(camera_name, param) elif command == OnvifCommandEnum.move_relative: - _, pan, tilt = param.split("_") - await self._move_relative(camera_name, float(pan), float(tilt), 0, 1) + parts = param.split("_") + if len(parts) == 3: + _, pan, tilt = parts + zoom = 0.0 + elif len(parts) == 4: + _, pan, tilt, zoom = parts + else: + logger.error(f"Invalid move_relative params: {param}") + return + await self._move_relative( + camera_name, float(pan), float(tilt), float(zoom), 1 + ) elif command in (OnvifCommandEnum.zoom_in, OnvifCommandEnum.zoom_out): await self._zoom(camera_name, command) elif command in (OnvifCommandEnum.focus_in, OnvifCommandEnum.focus_out): @@ -773,6 +900,7 @@ class OnvifController: "name": camera_name, "features": self.cams[camera_name]["features"], "presets": list(self.cams[camera_name]["presets"].keys()), + "profiles": self.cams[camera_name].get("profiles", []), } if camera_name not in self.cams.keys() and camera_name in self.config.cameras: @@ -970,6 +1098,7 @@ class OnvifController: return logger.info("Exiting ONVIF controller...") + self.config_subscriber.stop() def stop_and_cleanup(): try: diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index 9122934a1..e41a5bf39 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -7,6 +7,7 @@ import os import threading from multiprocessing.synchronize import Event as MpEvent from pathlib import Path +from typing import Any from playhouse.sqlite_ext import SqliteExtDatabase @@ -60,7 +61,9 @@ class RecordingCleanup(threading.Thread): db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);") db.close() - def expire_review_segments(self, config: CameraConfig, now: datetime) -> set[Path]: + def expire_review_segments( + self, config: CameraConfig, now: datetime.datetime + ) -> set[Path]: """Delete review segments that are expired""" alert_expire_date = ( now - datetime.timedelta(days=config.record.alerts.retain.days) @@ -68,7 +71,7 @@ class RecordingCleanup(threading.Thread): detection_expire_date = ( now - datetime.timedelta(days=config.record.detections.retain.days) ).timestamp() - expired_reviews: ReviewSegment = ( + expired_reviews = ( ReviewSegment.select(ReviewSegment.id, ReviewSegment.thumb_path) .where(ReviewSegment.camera == config.name) .where( @@ -109,13 +112,13 @@ class RecordingCleanup(threading.Thread): continuous_expire_date: float, motion_expire_date: float, config: CameraConfig, - reviews: ReviewSegment, + reviews: list[Any], ) -> set[Path]: """Delete recordings for existing camera based on retention config.""" # Get the timestamp for cutoff of retained days # Get recordings to check for expiration - recordings: Recordings = ( + recordings = ( Recordings.select( Recordings.id, Recordings.start_time, @@ -148,13 +151,12 @@ class RecordingCleanup(threading.Thread): review_start = 0 deleted_recordings = set() kept_recordings: list[tuple[float, float]] = [] - recording: Recordings for recording in recordings: keep = False mode = None # Now look for a reason to keep this recording segment for idx in range(review_start, len(reviews)): - review: ReviewSegment = reviews[idx] + review = reviews[idx] severity = review.severity pre_capture = config.record.get_review_pre_capture(severity) post_capture = config.record.get_review_post_capture(severity) @@ -214,7 +216,7 @@ class RecordingCleanup(threading.Thread): Recordings.id << deleted_recordings_list[i : i + max_deletes] ).execute() - previews: list[Previews] = ( + previews = ( Previews.select( Previews.id, Previews.start_time, @@ -290,13 +292,13 @@ class RecordingCleanup(threading.Thread): expire_before = ( datetime.datetime.now() - datetime.timedelta(days=expire_days) ).timestamp() - no_camera_recordings: Recordings = ( + no_camera_recordings = ( Recordings.select( Recordings.id, Recordings.path, ) .where( - Recordings.camera.not_in(list(self.config.cameras.keys())), + Recordings.camera.not_in(list(self.config.cameras.keys())), # type: ignore[call-arg, arg-type, misc] Recordings.end_time < expire_before, ) .namedtuples() @@ -341,7 +343,7 @@ class RecordingCleanup(threading.Thread): ).timestamp() # Get all the reviews to check against - reviews: ReviewSegment = ( + reviews = ( ReviewSegment.select( ReviewSegment.start_time, ReviewSegment.end_time, diff --git a/frigate/record/export.py b/frigate/record/export.py index f8a72a79a..ddd14dfd6 100644 --- a/frigate/record/export.py +++ b/frigate/record/export.py @@ -36,7 +36,9 @@ logger = logging.getLogger(__name__) DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30" TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey" -# ffmpeg flags that can read from or write to arbitrary files +# ffmpeg flags that can read from or write to arbitrary files. +# filter flags are blocked because source filters like movie= and +# amovie= can read arbitrary files from the filesystem. BLOCKED_FFMPEG_ARGS = frozenset( { "-i", @@ -45,6 +47,12 @@ BLOCKED_FFMPEG_ARGS = frozenset( "-passlogfile", "-sdp_file", "-dump_attachment", + "-filter_complex", + "-lavfi", + "-vf", + "-af", + "-filter", + "-attach", } ) @@ -85,10 +93,6 @@ def validate_ffmpeg_args(args: str) -> tuple[bool, str]: return True, "" -def lower_priority(): - os.nice(PROCESS_PRIORITY_LOW) - - class PlaybackSourceEnum(str, Enum): recordings = "recordings" preview = "preview" @@ -150,7 +154,7 @@ class RecordingExporter(threading.Thread): ): # has preview mp4 try: - preview: Previews = ( + preview = ( Previews.select( Previews.camera, Previews.path, @@ -231,20 +235,19 @@ class RecordingExporter(threading.Thread): def get_record_export_command( self, video_path: str, use_hwaccel: bool = True - ) -> list[str]: + ) -> tuple[list[str], str | list[str]]: # handle case where internal port is a string with ip:port internal_port = self.config.networking.listen.internal if type(internal_port) is str: internal_port = int(internal_port.split(":")[-1]) + playlist_lines: list[str] = [] if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS: - playlist_lines = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" + playlist_url = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8" ffmpeg_input = ( - f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_lines}" + f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_url}" ) else: - playlist_lines = [] - # get full set of recordings export_recordings = ( Recordings.select( @@ -305,7 +308,7 @@ class RecordingExporter(threading.Thread): def get_preview_export_command( self, video_path: str, use_hwaccel: bool = True - ) -> list[str]: + ) -> tuple[list[str], list[str]]: playlist_lines = [] codec = "-c copy" @@ -355,7 +358,6 @@ class RecordingExporter(threading.Thread): .iterator() ) - preview: Previews for preview in export_previews: playlist_lines.append(f"file '{preview.path}'") @@ -441,10 +443,9 @@ class RecordingExporter(threading.Thread): return p = sp.run( - ffmpeg_cmd, + ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + ffmpeg_cmd, input="\n".join(playlist_lines), encoding="ascii", - preexec_fn=lower_priority, capture_output=True, ) @@ -469,10 +470,9 @@ class RecordingExporter(threading.Thread): ) p = sp.run( - ffmpeg_cmd, + ["nice", "-n", str(PROCESS_PRIORITY_LOW)] + ffmpeg_cmd, input="\n".join(playlist_lines), encoding="ascii", - preexec_fn=lower_priority, capture_output=True, ) @@ -493,7 +493,7 @@ class RecordingExporter(threading.Thread): logger.debug(f"Finished exporting {video_path}") -def migrate_exports(ffmpeg: FfmpegConfig, camera_names: list[str]): +def migrate_exports(ffmpeg: FfmpegConfig, camera_names: list[str]) -> None: Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True) exports = [] diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index 6290a2405..e3409652e 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -266,7 +266,7 @@ class RecordingMaintainer(threading.Thread): # get all reviews with the end time after the start of the oldest cache file # or with end_time None - reviews: ReviewSegment = ( + reviews = ( ReviewSegment.select( ReviewSegment.start_time, ReviewSegment.end_time, @@ -301,7 +301,9 @@ class RecordingMaintainer(threading.Thread): RecordingsDataTypeEnum.saved.value, ) - recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks) + recordings_to_insert: list[Optional[dict[str, Any]]] = await asyncio.gather( + *tasks + ) # fire and forget recordings entries self.requestor.send_data( @@ -314,8 +316,8 @@ class RecordingMaintainer(threading.Thread): self.end_time_cache.pop(cache_path, None) async def validate_and_move_segment( - self, camera: str, reviews: list[ReviewSegment], recording: dict[str, Any] - ) -> Optional[Recordings]: + self, camera: str, reviews: Any, recording: dict[str, Any] + ) -> Optional[dict[str, Any]]: cache_path: str = recording["cache_path"] start_time: datetime.datetime = recording["start_time"] @@ -456,6 +458,8 @@ class RecordingMaintainer(threading.Thread): if end_time < retain_cutoff: self.drop_segment(cache_path) + return None + def _compute_motion_heatmap( self, camera: str, motion_boxes: list[tuple[int, int, int, int]] ) -> dict[str, int] | None: @@ -481,7 +485,7 @@ class RecordingMaintainer(threading.Thread): frame_width = camera_config.detect.width frame_height = camera_config.detect.height - if frame_width <= 0 or frame_height <= 0: + if not frame_width or frame_width <= 0 or not frame_height or frame_height <= 0: return None GRID_SIZE = 16 @@ -575,13 +579,13 @@ class RecordingMaintainer(threading.Thread): duration: float, cache_path: str, store_mode: RetainModeEnum, - ) -> Optional[Recordings]: + ) -> Optional[dict[str, Any]]: segment_info = self.segment_stats(camera, start_time, end_time) # check if the segment shouldn't be stored if segment_info.should_discard_segment(store_mode): self.drop_segment(cache_path) - return + return None # directory will be in utc due to start_time being in utc directory = os.path.join( @@ -620,7 +624,8 @@ class RecordingMaintainer(threading.Thread): if p.returncode != 0: logger.error(f"Unable to convert {cache_path} to {file_path}") - logger.error((await p.stderr.read()).decode("ascii")) + if p.stderr: + logger.error((await p.stderr.read()).decode("ascii")) return None else: logger.debug( @@ -684,11 +689,16 @@ class RecordingMaintainer(threading.Thread): stale_frame_count_threshold = 10 # empty the object recordings info queue while True: - (topic, data) = self.detection_subscriber.check_for_update( + result = self.detection_subscriber.check_for_update( timeout=FAST_QUEUE_TIMEOUT ) - if not topic: + if not result: + break + + topic, data = result + + if not topic or not data: break if topic == DetectionTypeEnum.video.value: diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 4dc1d8e6a..cfc59744c 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -31,7 +31,7 @@ from frigate.const import ( ) from frigate.models import ReviewSegment from frigate.review.types import SeverityEnum -from frigate.track.object_processing import ManualEventState, TrackedObject +from frigate.track.object_processing import ManualEventState from frigate.util.image import SharedMemoryFrameManager, calculate_16_9_crop logger = logging.getLogger(__name__) @@ -69,7 +69,9 @@ class PendingReviewSegment: self.last_alert_time = frame_time # thumbnail - self._frame = np.zeros((THUMB_HEIGHT * 3 // 2, THUMB_WIDTH), np.uint8) + self._frame: np.ndarray[Any, Any] = np.zeros( + (THUMB_HEIGHT * 3 // 2, THUMB_WIDTH), np.uint8 + ) self.has_frame = False self.frame_active_count = 0 self.frame_path = os.path.join( @@ -77,8 +79,11 @@ class PendingReviewSegment: ) def update_frame( - self, camera_config: CameraConfig, frame, objects: list[TrackedObject] - ): + self, + camera_config: CameraConfig, + frame: np.ndarray, + objects: list[dict[str, Any]], + ) -> None: min_x = camera_config.frame_shape[1] min_y = camera_config.frame_shape[0] max_x = 0 @@ -114,7 +119,7 @@ class PendingReviewSegment: self.frame_path, self._frame, [int(cv2.IMWRITE_WEBP_QUALITY), 60] ) - def save_full_frame(self, camera_config: CameraConfig, frame): + def save_full_frame(self, camera_config: CameraConfig, frame: np.ndarray) -> None: color_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) width = int(THUMB_HEIGHT * color_frame.shape[1] / color_frame.shape[0]) self._frame = cv2.resize( @@ -165,13 +170,13 @@ class ActiveObjects: self, frame_time: float, camera_config: CameraConfig, - all_objects: list[TrackedObject], + all_objects: list[dict[str, Any]], ): self.camera_config = camera_config # get current categorization of objects to know if # these objects are currently being categorized - self.categorized_objects = { + self.categorized_objects: dict[str, list[dict[str, Any]]] = { "alerts": [], "detections": [], } @@ -250,7 +255,7 @@ class ActiveObjects: return False - def get_all_objects(self) -> list[TrackedObject]: + def get_all_objects(self) -> list[dict[str, Any]]: return ( self.categorized_objects["alerts"] + self.categorized_objects["detections"] ) @@ -309,7 +314,7 @@ class ReviewSegmentMaintainer(threading.Thread): "reviews", json.dumps(review_update), ) - self.review_publisher.publish(review_update, segment.camera) + self.review_publisher.publish(review_update, segment.camera) # type: ignore[arg-type] self.requestor.send_data( f"{segment.camera}/review_status", segment.severity.value.upper() ) @@ -318,8 +323,8 @@ class ReviewSegmentMaintainer(threading.Thread): self, segment: PendingReviewSegment, camera_config: CameraConfig, - frame, - objects: list[TrackedObject], + frame: Optional[np.ndarray], + objects: list[dict[str, Any]], prev_data: dict[str, Any], ) -> None: """Update segment.""" @@ -337,7 +342,7 @@ class ReviewSegmentMaintainer(threading.Thread): "reviews", json.dumps(review_update), ) - self.review_publisher.publish(review_update, segment.camera) + self.review_publisher.publish(review_update, segment.camera) # type: ignore[arg-type] self.requestor.send_data( f"{segment.camera}/review_status", segment.severity.value.upper() ) @@ -346,7 +351,7 @@ class ReviewSegmentMaintainer(threading.Thread): self, segment: PendingReviewSegment, prev_data: dict[str, Any], - ) -> float: + ) -> Any: """End segment.""" final_data = segment.get_data(ended=True) end_time = final_data[ReviewSegment.end_time.name] @@ -360,24 +365,25 @@ class ReviewSegmentMaintainer(threading.Thread): "reviews", json.dumps(review_update), ) - self.review_publisher.publish(review_update, segment.camera) + self.review_publisher.publish(review_update, segment.camera) # type: ignore[arg-type] self.requestor.send_data(f"{segment.camera}/review_status", "NONE") self.active_review_segments[segment.camera] = None return end_time - def forcibly_end_segment(self, camera: str) -> float: + def forcibly_end_segment(self, camera: str) -> Any: """Forcibly end the pending segment for a camera.""" segment = self.active_review_segments.get(camera) if segment: prev_data = segment.get_data(False) return self._publish_segment_end(segment, prev_data) + return None def update_existing_segment( self, segment: PendingReviewSegment, frame_name: str, frame_time: float, - objects: list[TrackedObject], + objects: list[dict[str, Any]], ) -> None: """Validate if existing review segment should continue.""" camera_config = self.config.cameras[segment.camera] @@ -492,8 +498,11 @@ class ReviewSegmentMaintainer(threading.Thread): except FileNotFoundError: return - if segment.severity == SeverityEnum.alert and frame_time > ( - segment.last_alert_time + camera_config.review.alerts.cutoff_time + if ( + segment.severity == SeverityEnum.alert + and segment.last_alert_time is not None + and frame_time + > (segment.last_alert_time + camera_config.review.alerts.cutoff_time) ): needs_new_detection = ( segment.last_detection_time > segment.last_alert_time @@ -516,23 +525,18 @@ class ReviewSegmentMaintainer(threading.Thread): new_zones.update(o["current_zones"]) if new_detections: - self.active_review_segments[activity.camera_config.name] = ( - PendingReviewSegment( - activity.camera_config.name, - end_time, - SeverityEnum.detection, - new_detections, - sub_labels={}, - audio=set(), - zones=list(new_zones), - ) + new_segment = PendingReviewSegment( + segment.camera, + end_time, + SeverityEnum.detection, + new_detections, + sub_labels={}, + audio=set(), + zones=list(new_zones), ) - self._publish_segment_start( - self.active_review_segments[activity.camera_config.name] - ) - self.active_review_segments[ - activity.camera_config.name - ].last_detection_time = last_detection_time + self.active_review_segments[segment.camera] = new_segment + self._publish_segment_start(new_segment) + new_segment.last_detection_time = last_detection_time elif segment.severity == SeverityEnum.detection and frame_time > ( segment.last_detection_time + camera_config.review.detections.cutoff_time @@ -544,7 +548,7 @@ class ReviewSegmentMaintainer(threading.Thread): camera: str, frame_name: str, frame_time: float, - objects: list[TrackedObject], + objects: list[dict[str, Any]], ) -> None: """Check if a new review segment should be created.""" camera_config = self.config.cameras[camera] @@ -581,7 +585,7 @@ class ReviewSegmentMaintainer(threading.Thread): zones.append(zone) if severity: - self.active_review_segments[camera] = PendingReviewSegment( + new_segment = PendingReviewSegment( camera, frame_time, severity, @@ -590,6 +594,7 @@ class ReviewSegmentMaintainer(threading.Thread): audio=set(), zones=zones, ) + self.active_review_segments[camera] = new_segment try: yuv_frame = self.frame_manager.get( @@ -600,11 +605,11 @@ class ReviewSegmentMaintainer(threading.Thread): logger.debug(f"Failed to get frame {frame_name} from SHM") return - self.active_review_segments[camera].update_frame( + new_segment.update_frame( camera_config, yuv_frame, activity.get_all_objects() ) self.frame_manager.close(frame_name) - self._publish_segment_start(self.active_review_segments[camera]) + self._publish_segment_start(new_segment) except FileNotFoundError: return @@ -621,9 +626,14 @@ class ReviewSegmentMaintainer(threading.Thread): for camera in updated_topics["enabled"]: self.forcibly_end_segment(camera) - (topic, data) = self.detection_subscriber.check_for_update(timeout=1) + result = self.detection_subscriber.check_for_update(timeout=1) - if not topic: + if not result: + continue + + topic, data = result + + if not topic or not data: continue if topic == DetectionTypeEnum.video.value: @@ -712,10 +722,13 @@ class ReviewSegmentMaintainer(threading.Thread): if topic == DetectionTypeEnum.api: # manual_info["label"] contains 'label: sub_label' # so split out the label without modifying manual_info + det_labels = self.config.cameras[ + camera + ].review.detections.labels if ( self.config.cameras[camera].review.detections.enabled - and manual_info["label"].split(": ")[0] - in self.config.cameras[camera].review.detections.labels + and det_labels is not None + and manual_info["label"].split(": ")[0] in det_labels ): current_segment.last_detection_time = manual_info[ "end_time" @@ -744,14 +757,15 @@ class ReviewSegmentMaintainer(threading.Thread): ): # manual_info["label"] contains 'label: sub_label' # so split out the label without modifying manual_info + det_labels = self.config.cameras[ + camera + ].review.detections.labels if ( not self.config.cameras[ camera ].review.detections.enabled - or manual_info["label"].split(": ")[0] - not in self.config.cameras[ - camera - ].review.detections.labels + or det_labels is None + or manual_info["label"].split(": ")[0] not in det_labels ): current_segment.severity = SeverityEnum.alert elif ( @@ -828,17 +842,18 @@ class ReviewSegmentMaintainer(threading.Thread): severity = None # manual_info["label"] contains 'label: sub_label' # so split out the label without modifying manual_info + det_labels = self.config.cameras[camera].review.detections.labels if ( self.config.cameras[camera].review.detections.enabled - and manual_info["label"].split(": ")[0] - in self.config.cameras[camera].review.detections.labels + and det_labels is not None + and manual_info["label"].split(": ")[0] in det_labels ): severity = SeverityEnum.detection elif self.config.cameras[camera].review.alerts.enabled: severity = SeverityEnum.alert if severity: - self.active_review_segments[camera] = PendingReviewSegment( + api_segment = PendingReviewSegment( camera, frame_time, severity, @@ -847,32 +862,25 @@ class ReviewSegmentMaintainer(threading.Thread): [], set(), ) + self.active_review_segments[camera] = api_segment if manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( manual_info["label"] ) # temporarily make it so this event can not end - self.active_review_segments[ - camera - ].last_alert_time = sys.maxsize - self.active_review_segments[ - camera - ].last_detection_time = sys.maxsize + api_segment.last_alert_time = sys.maxsize + api_segment.last_detection_time = sys.maxsize elif manual_info["state"] == ManualEventState.complete: - self.active_review_segments[ - camera - ].last_alert_time = manual_info["end_time"] - self.active_review_segments[ - camera - ].last_detection_time = manual_info["end_time"] + api_segment.last_alert_time = manual_info["end_time"] + api_segment.last_detection_time = manual_info["end_time"] else: logger.warning( f"Manual event API has been called for {camera}, but alerts and detections are disabled. This manual event will not appear as an alert or detection." ) elif topic == DetectionTypeEnum.lpr: if self.config.cameras[camera].review.detections.enabled: - self.active_review_segments[camera] = PendingReviewSegment( + lpr_segment = PendingReviewSegment( camera, frame_time, SeverityEnum.detection, @@ -881,25 +889,18 @@ class ReviewSegmentMaintainer(threading.Thread): [], set(), ) + self.active_review_segments[camera] = lpr_segment if manual_info["state"] == ManualEventState.start: self.indefinite_events[camera][manual_info["event_id"]] = ( manual_info["label"] ) # temporarily make it so this event can not end - self.active_review_segments[ - camera - ].last_alert_time = sys.maxsize - self.active_review_segments[ - camera - ].last_detection_time = sys.maxsize + lpr_segment.last_alert_time = sys.maxsize + lpr_segment.last_detection_time = sys.maxsize elif manual_info["state"] == ManualEventState.complete: - self.active_review_segments[ - camera - ].last_alert_time = manual_info["end_time"] - self.active_review_segments[ - camera - ].last_detection_time = manual_info["end_time"] + lpr_segment.last_alert_time = manual_info["end_time"] + lpr_segment.last_detection_time = manual_info["end_time"] else: logger.warning( f"Dedicated LPR camera API has been called for {camera}, but detections are disabled. LPR events will not appear as a detection." diff --git a/frigate/stats/emitter.py b/frigate/stats/emitter.py index 42d4c16a8..2b34c7c4e 100644 --- a/frigate/stats/emitter.py +++ b/frigate/stats/emitter.py @@ -52,18 +52,66 @@ class StatsEmitter(threading.Thread): def get_stats_history( self, keys: Optional[list[str]] = None ) -> list[dict[str, Any]]: - """Get stats history.""" + """Get stats history. + + Supports dot-notation for nested keys to avoid returning large objects + when only specific subfields are needed. Handles two patterns: + + - Flat dict: "service.last_updated" returns {"service": {"last_updated": ...}} + - Dict-of-dicts: "cameras.camera_fps" returns each camera entry filtered + to only include "camera_fps" + """ if not keys: return self.stats_history + # Pre-parse keys into top-level keys and dot-notation fields + top_level_keys: list[str] = [] + nested_keys: dict[str, list[str]] = {} + + for k in keys: + if "." in k: + parent_key, child_key = k.split(".", 1) + nested_keys.setdefault(parent_key, []).append(child_key) + else: + top_level_keys.append(k) + selected_stats: list[dict[str, Any]] = [] for s in self.stats_history: - selected = {} + selected: dict[str, Any] = {} - for k in keys: + for k in top_level_keys: selected[k] = s.get(k) + for parent_key, child_keys in nested_keys.items(): + parent = s.get(parent_key) + + if not isinstance(parent, dict): + selected[parent_key] = parent + continue + + # Check if values are dicts (dict-of-dicts like cameras/detectors) + first_value = next(iter(parent.values()), None) + + if isinstance(first_value, dict): + # Filter each nested entry to only requested fields, + # omitting None values to preserve key-absence semantics + selected[parent_key] = { + entry_key: { + field: val + for field in child_keys + if (val := entry.get(field)) is not None + } + for entry_key, entry in parent.items() + } + else: + # Flat dict (like service) - pick individual fields + if parent_key not in selected: + selected[parent_key] = {} + + for child_key in child_keys: + selected[parent_key][child_key] = parent.get(child_key) + selected_stats.append(selected) return selected_stats diff --git a/frigate/stats/prometheus.py b/frigate/stats/prometheus.py index 67d8d03d8..d2e229568 100644 --- a/frigate/stats/prometheus.py +++ b/frigate/stats/prometheus.py @@ -355,16 +355,37 @@ class CustomCollector(object): gpu_mem_usages = GaugeMetricFamily( "frigate_gpu_mem_usage_percent", "GPU memory usage %", labels=["gpu_name"] ) + gpu_enc_usages = GaugeMetricFamily( + "frigate_gpu_encoder_usage_percent", + "GPU encoder utilisation %", + labels=["gpu_name"], + ) + gpu_compute_usages = GaugeMetricFamily( + "frigate_gpu_compute_usage_percent", + "GPU compute / encode utilisation %", + labels=["gpu_name"], + ) + gpu_dec_usages = GaugeMetricFamily( + "frigate_gpu_decoder_usage_percent", + "GPU decoder utilisation %", + labels=["gpu_name"], + ) try: for gpu_name, gpu_stats in stats["gpu_usages"].items(): self.add_metric(gpu_usages, [gpu_name], gpu_stats, "gpu") self.add_metric(gpu_mem_usages, [gpu_name], gpu_stats, "mem") + self.add_metric(gpu_enc_usages, [gpu_name], gpu_stats, "enc") + self.add_metric(gpu_compute_usages, [gpu_name], gpu_stats, "compute") + self.add_metric(gpu_dec_usages, [gpu_name], gpu_stats, "dec") except KeyError: pass yield gpu_usages yield gpu_mem_usages + yield gpu_enc_usages + yield gpu_compute_usages + yield gpu_dec_usages # service stats uptime_seconds = GaugeMetricFamily( diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 40337268e..07b410ad2 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -19,6 +19,7 @@ from frigate.types import StatsTrackingTypes from frigate.util.services import ( calculate_shm_requirements, get_amd_gpu_stats, + get_axcl_npu_stats, get_bandwidth_stats, get_cpu_stats, get_fs_type, @@ -260,45 +261,33 @@ async def set_gpu_stats( else: stats["jetson-gpu"] = {"gpu": "", "mem": ""} hwaccel_errors.append(args) - elif "qsv" in args: + elif "qsv" in args or ("vaapi" in args and not is_vaapi_amd_driver()): if not config.telemetry.stats.intel_gpu_stats: continue - # intel QSV GPU - intel_usage = get_intel_gpu_stats(config.telemetry.stats.intel_gpu_device) - - if intel_usage is not None: - stats["intel-qsv"] = intel_usage or {"gpu": "", "mem": ""} - else: - stats["intel-qsv"] = {"gpu": "", "mem": ""} - hwaccel_errors.append(args) - elif "vaapi" in args: - if is_vaapi_amd_driver(): - if not config.telemetry.stats.amd_gpu_stats: - continue - - # AMD VAAPI GPU - amd_usage = get_amd_gpu_stats() - - if amd_usage: - stats["amd-vaapi"] = amd_usage - else: - stats["amd-vaapi"] = {"gpu": "", "mem": ""} - hwaccel_errors.append(args) - else: - if not config.telemetry.stats.intel_gpu_stats: - continue - - # intel VAAPI GPU + if "intel-gpu" not in stats: + # intel GPU (QSV or VAAPI both use the same physical GPU) intel_usage = get_intel_gpu_stats( config.telemetry.stats.intel_gpu_device ) if intel_usage is not None: - stats["intel-vaapi"] = intel_usage or {"gpu": "", "mem": ""} + stats["intel-gpu"] = intel_usage or {"gpu": "", "mem": ""} else: - stats["intel-vaapi"] = {"gpu": "", "mem": ""} + stats["intel-gpu"] = {"gpu": "", "mem": ""} hwaccel_errors.append(args) + elif "vaapi" in args: + if not config.telemetry.stats.amd_gpu_stats: + continue + + # AMD VAAPI GPU + amd_usage = get_amd_gpu_stats() + + if amd_usage: + stats["amd-vaapi"] = amd_usage + else: + stats["amd-vaapi"] = {"gpu": "", "mem": ""} + hwaccel_errors.append(args) elif "preset-rk" in args: rga_usage = get_rockchip_gpu_stats() @@ -324,6 +313,10 @@ async def set_npu_usages(config: FrigateConfig, all_stats: dict[str, Any]) -> No # OpenVINO NPU usage ov_usage = get_openvino_npu_stats() stats["openvino"] = ov_usage + elif detector.type == "axengine": + # AXERA NPU usage + axcl_usage = get_axcl_npu_stats() + stats["axengine"] = axcl_usage if stats: all_stats["npu_usages"] = stats @@ -505,4 +498,30 @@ def stats_snapshot( "pid": pid, } + # Embed cpu/mem stats into detectors, cameras, and processes + # so history consumers don't need the full cpu_usages dict + cpu_usages = stats.get("cpu_usages", {}) + + for det_stats in stats["detectors"].values(): + pid_str = str(det_stats.get("pid", "")) + usage = cpu_usages.get(pid_str, {}) + det_stats["cpu"] = usage.get("cpu") + det_stats["mem"] = usage.get("mem") + + for cam_stats in stats["cameras"].values(): + for pid_key, field in [ + ("ffmpeg_pid", "ffmpeg_cpu"), + ("capture_pid", "capture_cpu"), + ("pid", "detect_cpu"), + ]: + pid_str = str(cam_stats.get(pid_key, "")) + usage = cpu_usages.get(pid_str, {}) + cam_stats[field] = usage.get("cpu") + + for proc_stats in stats["processes"].values(): + pid_str = str(proc_stats.get("pid", "")) + usage = cpu_usages.get(pid_str, {}) + proc_stats["cpu"] = usage.get("cpu") + proc_stats["mem"] = usage.get("mem") + return stats diff --git a/frigate/storage.py b/frigate/storage.py index dad3c6e9c..8cc199a1b 100644 --- a/frigate/storage.py +++ b/frigate/storage.py @@ -3,6 +3,7 @@ import logging import shutil import threading +from multiprocessing.synchronize import Event as MpEvent from pathlib import Path from peewee import SQL, fn @@ -23,7 +24,7 @@ MAX_CALCULATED_BANDWIDTH = 10000 # 10Gb/hr class StorageMaintainer(threading.Thread): """Maintain frigates recording storage.""" - def __init__(self, config: FrigateConfig, stop_event) -> None: + def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None: super().__init__(name="storage_maintainer") self.config = config self.stop_event = stop_event @@ -114,7 +115,7 @@ class StorageMaintainer(threading.Thread): logger.debug( f"Storage cleanup check: {hourly_bandwidth} hourly with remaining storage: {remaining_storage}." ) - return remaining_storage < hourly_bandwidth + return remaining_storage < float(hourly_bandwidth) def reduce_storage_consumption(self) -> None: """Remove oldest hour of recordings.""" @@ -124,7 +125,7 @@ class StorageMaintainer(threading.Thread): [b["bandwidth"] for b in self.camera_storage_stats.values()] ) - recordings: Recordings = ( + recordings = ( Recordings.select( Recordings.id, Recordings.camera, @@ -138,7 +139,7 @@ class StorageMaintainer(threading.Thread): .iterator() ) - retained_events: Event = ( + retained_events = ( Event.select( Event.start_time, Event.end_time, @@ -278,7 +279,7 @@ class StorageMaintainer(threading.Thread): Recordings.id << deleted_recordings_list[i : i + max_deletes] ).execute() - def run(self): + def run(self) -> None: """Check every 5 minutes if storage needs to be cleaned up.""" if self.config.safe_mode: logger.info("Safe mode enabled, skipping storage maintenance") diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 61184c769..e82b688c6 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -10,7 +10,7 @@ from ruamel.yaml.constructor import DuplicateKeyError from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.const import MODEL_CACHE_DIR from frigate.detectors import DetectorTypeEnum -from frigate.util.builtin import deep_merge +from frigate.util.builtin import deep_merge, load_labels class TestConfig(unittest.TestCase): @@ -288,6 +288,65 @@ class TestConfig(unittest.TestCase): frigate_config = FrigateConfig(**config) assert "dog" in frigate_config.cameras["back"].objects.filters + def test_default_audio_filters(self): + config = { + "mqtt": {"host": "mqtt"}, + "audio": {"listen": ["speech", "yell"]}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + + frigate_config = FrigateConfig(**config) + all_audio_labels = { + label + for label in load_labels("/audio-labelmap.txt", prefill=521).values() + if label + } + + assert all_audio_labels.issubset( + set(frigate_config.cameras["back"].audio.filters.keys()) + ) + + def test_override_audio_filters(self): + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + "audio": { + "listen": ["speech", "yell"], + "filters": {"speech": {"threshold": 0.9}}, + }, + } + }, + } + + frigate_config = FrigateConfig(**config) + assert "speech" in frigate_config.cameras["back"].audio.filters + assert frigate_config.cameras["back"].audio.filters["speech"].threshold == 0.9 + assert "babbling" in frigate_config.cameras["back"].audio.filters + def test_inherit_object_filters(self): config = { "mqtt": {"host": "mqtt"}, @@ -1129,7 +1188,7 @@ class TestConfig(unittest.TestCase): def test_global_detect_merge(self): config = { "mqtt": {"host": "mqtt"}, - "detect": {"max_disappeared": 1, "height": 720}, + "detect": {"max_disappeared": 1, "height": 720, "width": 1280}, "cameras": { "back": { "ffmpeg": { diff --git a/frigate/test/test_gpu_stats.py b/frigate/test/test_gpu_stats.py index fd0df94c4..2604c4002 100644 --- a/frigate/test/test_gpu_stats.py +++ b/frigate/test/test_gpu_stats.py @@ -39,8 +39,12 @@ class TestGpuStats(unittest.TestCase): process.stdout = self.intel_results sp.return_value = process intel_stats = get_intel_gpu_stats(False) - print(f"the intel stats are {intel_stats}") + # rc6 values: 47.844741 and 100.0 → avg 73.92 → gpu = 100 - 73.92 = 26.08% + # Render/3D/0: 0.0 and 0.0 → enc = 0.0% + # Video/0: 4.533124 and 0.0 → dec = 2.27% assert intel_stats == { - "gpu": "1.13%", + "gpu": "26.08%", "mem": "-%", + "compute": "0.0%", + "dec": "2.27%", } diff --git a/frigate/test/test_profiles.py b/frigate/test/test_profiles.py index b77d3ebb6..b73fa74a0 100644 --- a/frigate/test/test_profiles.py +++ b/frigate/test/test_profiles.py @@ -1,7 +1,7 @@ """Tests for the profiles system.""" +import json import os -import tempfile import unittest from unittest.mock import MagicMock, patch @@ -549,10 +549,17 @@ class TestProfileManager(unittest.TestCase): def test_get_profile_info(self): """Profile info returns correct structure with friendly names.""" - info = self.manager.get_profile_info() + with patch.object( + ProfileManager, + "_load_persisted_data", + return_value={"active": None, "last_activated": {}}, + ): + info = self.manager.get_profile_info() assert "profiles" in info assert "active_profile" in info + assert "last_activated" in info assert info["active_profile"] is None + assert info["last_activated"] == {} names = [p["name"] for p in info["profiles"]] assert "armed" in names assert "disarmed" in names @@ -590,33 +597,22 @@ class TestProfilePersistence(unittest.TestCase): """Test profile persistence to disk.""" def test_persist_and_load(self): - """Active profile name can be persisted and loaded.""" - with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f: - temp_path = f.name - - try: - from pathlib import Path - - path = Path(temp_path) - path.write_text("armed") - loaded = path.read_text().strip() - assert loaded == "armed" - finally: - os.unlink(temp_path) + """Active profile name can be persisted and loaded via JSON.""" + data = {"active": "armed", "last_activated": {"armed": 1700000000.0}} + with patch.object( + ProfileManager, + "_load_persisted_data", + return_value=data, + ): + result = ProfileManager.load_persisted_profile() + assert result == "armed" def test_load_empty_file(self): """Empty persistence file returns None.""" - with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f: - f.write("") - temp_path = f.name - - try: - with patch.object(type(PERSISTENCE_FILE), "exists", return_value=True): - with patch.object(type(PERSISTENCE_FILE), "read_text", return_value=""): - result = ProfileManager.load_persisted_profile() - assert result is None - finally: - os.unlink(temp_path) + with patch.object(type(PERSISTENCE_FILE), "exists", return_value=True): + with patch.object(type(PERSISTENCE_FILE), "read_text", return_value=""): + result = ProfileManager.load_persisted_profile() + assert result is None def test_load_missing_file(self): """Missing persistence file returns None.""" @@ -624,6 +620,118 @@ class TestProfilePersistence(unittest.TestCase): result = ProfileManager.load_persisted_profile() assert result is None + def test_load_persisted_data_valid_json(self): + """Valid JSON file is loaded correctly.""" + data = {"active": "home", "last_activated": {"home": 1700000000.0}} + with patch.object(type(PERSISTENCE_FILE), "exists", return_value=True): + with patch.object( + type(PERSISTENCE_FILE), + "read_text", + return_value=json.dumps(data), + ): + result = ProfileManager._load_persisted_data() + assert result == data + + def test_load_persisted_data_invalid_json(self): + """Invalid JSON returns default structure.""" + with patch.object(type(PERSISTENCE_FILE), "exists", return_value=True): + with patch.object( + type(PERSISTENCE_FILE), "read_text", return_value="not json" + ): + result = ProfileManager._load_persisted_data() + assert result == {"active": None, "last_activated": {}} + + def test_load_persisted_data_missing_file(self): + """Missing file returns default structure.""" + with patch.object(type(PERSISTENCE_FILE), "exists", return_value=False): + result = ProfileManager._load_persisted_data() + assert result == {"active": None, "last_activated": {}} + + def test_persist_records_timestamp(self): + """Persisting a profile records the activation timestamp.""" + config_data = { + "mqtt": {"host": "mqtt"}, + "profiles": {"armed": {"friendly_name": "Armed"}}, + "cameras": { + "front": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + } + ] + }, + "detect": {"height": 1080, "width": 1920, "fps": 5}, + "profiles": {"armed": {"detect": {"enabled": True}}}, + }, + }, + } + if not os.path.exists(MODEL_CACHE_DIR) and not os.path.islink(MODEL_CACHE_DIR): + os.makedirs(MODEL_CACHE_DIR) + config = FrigateConfig(**config_data) + manager = ProfileManager(config, MagicMock()) + + written_data = {} + + def mock_write(_self, content): + written_data.update(json.loads(content)) + + with patch.object( + ProfileManager, + "_load_persisted_data", + return_value={"active": None, "last_activated": {}}, + ): + with patch.object(type(PERSISTENCE_FILE), "write_text", mock_write): + manager._persist_active_profile("armed") + + assert written_data["active"] == "armed" + assert "armed" in written_data["last_activated"] + assert isinstance(written_data["last_activated"]["armed"], float) + + def test_persist_deactivate_keeps_timestamps(self): + """Deactivating sets active to None but preserves last_activated.""" + existing = { + "active": "armed", + "last_activated": {"armed": 1700000000.0}, + } + written_data = {} + + def mock_write(_self, content): + written_data.update(json.loads(content)) + + config_data = { + "mqtt": {"host": "mqtt"}, + "profiles": {"armed": {"friendly_name": "Armed"}}, + "cameras": { + "front": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + } + ] + }, + "detect": {"height": 1080, "width": 1920, "fps": 5}, + "profiles": {"armed": {"detect": {"enabled": True}}}, + }, + }, + } + if not os.path.exists(MODEL_CACHE_DIR) and not os.path.islink(MODEL_CACHE_DIR): + os.makedirs(MODEL_CACHE_DIR) + config = FrigateConfig(**config_data) + manager = ProfileManager(config, MagicMock()) + + with patch.object( + ProfileManager, "_load_persisted_data", return_value=existing + ): + with patch.object(type(PERSISTENCE_FILE), "write_text", mock_write): + manager._persist_active_profile(None) + + assert written_data["active"] is None + assert written_data["last_activated"]["armed"] == 1700000000.0 + if __name__ == "__main__": unittest.main() diff --git a/frigate/timeline.py b/frigate/timeline.py index 3ec866176..6a62da2df 100644 --- a/frigate/timeline.py +++ b/frigate/timeline.py @@ -8,7 +8,7 @@ from multiprocessing.synchronize import Event as MpEvent from typing import Any from frigate.config import FrigateConfig -from frigate.events.maintainer import EventStateEnum, EventTypeEnum +from frigate.events.types import EventStateEnum, EventTypeEnum from frigate.models import Timeline from frigate.util.builtin import to_relative_box @@ -28,7 +28,7 @@ class TimelineProcessor(threading.Thread): self.config = config self.queue = queue self.stop_event = stop_event - self.pre_event_cache: dict[str, list[dict[str, Any]]] = {} + self.pre_event_cache: dict[str, list[dict[Any, Any]]] = {} def run(self) -> None: while not self.stop_event.is_set(): @@ -56,7 +56,7 @@ class TimelineProcessor(threading.Thread): def insert_or_save( self, - entry: dict[str, Any], + entry: dict[Any, Any], prev_event_data: dict[Any, Any], event_data: dict[Any, Any], ) -> None: @@ -84,11 +84,15 @@ class TimelineProcessor(threading.Thread): event_type: str, prev_event_data: dict[Any, Any], event_data: dict[Any, Any], - ) -> bool: + ) -> None: """Handle object detection.""" camera_config = self.config.cameras.get(camera) - if camera_config is None: - return False + if ( + camera_config is None + or camera_config.detect.width is None + or camera_config.detect.height is None + ): + return event_id = event_data["id"] # Base timeline entry data that all entries will share diff --git a/frigate/track/object_processing.py b/frigate/track/object_processing.py index 1a15e27ee..3fae8da6f 100644 --- a/frigate/track/object_processing.py +++ b/frigate/track/object_processing.py @@ -81,6 +81,7 @@ class TrackedObjectProcessor(threading.Thread): CameraConfigUpdateEnum.motion, CameraConfigUpdateEnum.objects, CameraConfigUpdateEnum.remove, + CameraConfigUpdateEnum.timestamp_style, CameraConfigUpdateEnum.zones, ], ) diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index 7d46b72fd..f5c33d1e6 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -61,14 +61,15 @@ class TrackedObject: self.zone_loitering: dict[str, int] = {} self.current_zones: list[str] = [] self.entered_zones: list[str] = [] + self.new_zone_entered: bool = False self.attributes: dict[str, float] = defaultdict(float) self.false_positive = True self.has_clip = False self.has_snapshot = False self.top_score = self.computed_score = 0.0 self.thumbnail_data: dict[str, Any] | None = None - self.last_updated = 0 - self.last_published = 0 + self.last_updated: float = 0 + self.last_published: float = 0 self.frame = None self.active = True self.pending_loitering = False @@ -278,6 +279,7 @@ class TrackedObject: if name not in self.entered_zones: self.entered_zones.append(name) + self.new_zone_entered = True else: self.zone_loitering[name] = loitering_score diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 42aa18c0a..bd45a4a1f 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -12,7 +12,7 @@ import shlex import struct import urllib.parse from collections.abc import Mapping -from multiprocessing.sharedctypes import Synchronized +from multiprocessing.managers import ValueProxy from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union @@ -64,7 +64,7 @@ class EventsPerSecond: class InferenceSpeed: - def __init__(self, metric: Synchronized) -> None: + def __init__(self, metric: ValueProxy[float]) -> None: self.__metric = metric self.__initialized = False diff --git a/frigate/util/ffmpeg.py b/frigate/util/ffmpeg.py new file mode 100644 index 000000000..9abacd4ed --- /dev/null +++ b/frigate/util/ffmpeg.py @@ -0,0 +1,48 @@ +"""FFmpeg utility functions for managing ffmpeg processes.""" + +import logging +import subprocess as sp +from typing import Any + +from frigate.log import LogPipe + + +def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger): + logger.info("Terminating the existing ffmpeg process...") + ffmpeg_process.terminate() + try: + logger.info("Waiting for ffmpeg to exit gracefully...") + ffmpeg_process.communicate(timeout=30) + logger.info("FFmpeg has exited") + except sp.TimeoutExpired: + logger.info("FFmpeg didn't exit. Force killing...") + ffmpeg_process.kill() + ffmpeg_process.communicate() + logger.info("FFmpeg has been killed") + ffmpeg_process = None + + +def start_or_restart_ffmpeg( + ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None +) -> sp.Popen[Any]: + if ffmpeg_process is not None: + stop_ffmpeg(ffmpeg_process, logger) + + if frame_size is None: + process = sp.Popen( + ffmpeg_cmd, + stdout=sp.DEVNULL, + stderr=logpipe, + stdin=sp.DEVNULL, + start_new_session=True, + ) + else: + process = sp.Popen( + ffmpeg_cmd, + stdout=sp.PIPE, + stderr=logpipe, + stdin=sp.DEVNULL, + bufsize=frame_size * 10, + start_new_session=True, + ) + return process diff --git a/frigate/util/services.py b/frigate/util/services.py index 52ce5a698..f0bf2de1e 100644 --- a/frigate/util/services.py +++ b/frigate/util/services.py @@ -265,14 +265,30 @@ def get_amd_gpu_stats() -> Optional[dict[str, str]]: def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, str]]: - """Get stats using intel_gpu_top.""" + """Get stats using intel_gpu_top. + + Returns overall GPU usage derived from rc6 residency (idle time), + plus individual engine breakdowns: + - enc: Render/3D engine (compute/shader encoder, used by QSV) + - dec: Video engines (fixed-function codec, used by VAAPI) + """ def get_stats_manually(output: str) -> dict[str, str]: """Find global stats via regex when json fails to parse.""" reading = "".join(output) results: dict[str, str] = {} - # render is used for qsv + # rc6 residency for overall GPU usage + rc6_match = re.search(r'"rc6":\{"value":([\d.]+)', reading) + if rc6_match: + rc6_value = float(rc6_match.group(1)) + results["gpu"] = f"{round(100.0 - rc6_value, 2)}%" + else: + results["gpu"] = "-%" + + results["mem"] = "-%" + + # Render/3D is the compute/encode engine render = [] for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading): packet = json.loads(result[14:]) @@ -280,11 +296,9 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s render.append(float(single)) if render: - render_avg = sum(render) / len(render) - else: - render_avg = 1 + results["compute"] = f"{round(sum(render) / len(render), 2)}%" - # video is used for vaapi + # Video engines are the fixed-function decode engines video = [] for result in re.findall(r'"Video/\d":{[a-z":\d.,%]+}', reading): packet = json.loads(result[10:]) @@ -292,12 +306,8 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s video.append(float(single)) if video: - video_avg = sum(video) / len(video) - else: - video_avg = 1 + results["dec"] = f"{round(sum(video) / len(video), 2)}%" - results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%" - results["mem"] = "-%" return results intel_gpu_top_command = [ @@ -336,10 +346,18 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s return get_stats_manually(output) results: dict[str, str] = {} - render = {"global": []} - video = {"global": []} + rc6_values = [] + render_global = [] + video_global = [] + # per-client: {pid: [total_busy_per_sample, ...]} + client_usages: dict[str, list[float]] = {} for block in data: + # rc6 residency: percentage of time GPU is idle + rc6 = block.get("rc6", {}).get("value") + if rc6 is not None: + rc6_values.append(float(rc6)) + global_engine = block.get("engines") if global_engine: @@ -347,48 +365,53 @@ def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, s video_frame = global_engine.get("Video/0", {}).get("busy") if render_frame is not None: - render["global"].append(float(render_frame)) + render_global.append(float(render_frame)) if video_frame is not None: - video["global"].append(float(video_frame)) + video_global.append(float(video_frame)) clients = block.get("clients", {}) - if clients and len(clients): + if clients: for client_block in clients.values(): - key = client_block["pid"] + pid = client_block["pid"] - if render.get(key) is None: - render[key] = [] - video[key] = [] + if pid not in client_usages: + client_usages[pid] = [] - client_engine = client_block.get("engine-classes", {}) + # Sum all engine-class busy values for this client + total_busy = 0.0 + for engine in client_block.get("engine-classes", {}).values(): + busy = engine.get("busy") + if busy is not None: + total_busy += float(busy) - render_frame = client_engine.get("Render/3D", {}).get("busy") - video_frame = client_engine.get("Video", {}).get("busy") + client_usages[pid].append(total_busy) - if render_frame is not None: - render[key].append(float(render_frame)) + # Overall GPU usage from rc6 (idle) residency + if rc6_values: + rc6_avg = sum(rc6_values) / len(rc6_values) + results["gpu"] = f"{round(100.0 - rc6_avg, 2)}%" - if video_frame is not None: - video[key].append(float(video_frame)) + results["mem"] = "-%" - if render["global"] and video["global"]: - results["gpu"] = ( - f"{round(((sum(render['global']) / len(render['global'])) + (sum(video['global']) / len(video['global']))) / 2, 2)}%" - ) - results["mem"] = "-%" + # Compute: Render/3D engine (compute/shader workloads and QSV encode) + if render_global: + results["compute"] = f"{round(sum(render_global) / len(render_global), 2)}%" - if len(render.keys()) > 1: + # Decoder: Video engine (fixed-function codec) + if video_global: + results["dec"] = f"{round(sum(video_global) / len(video_global), 2)}%" + + # Per-client GPU usage (sum of all engines per process) + if client_usages: results["clients"] = {} - for key in render.keys(): - if key == "global" or not render[key] or not video[key]: - continue - - results["clients"][key] = ( - f"{round(((sum(render[key]) / len(render[key])) + (sum(video[key]) / len(video[key]))) / 2, 2)}%" - ) + for pid, samples in client_usages.items(): + if samples: + results["clients"][pid] = ( + f"{round(sum(samples) / len(samples), 2)}%" + ) return results @@ -488,6 +511,43 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]: return stats +def get_axcl_npu_stats() -> Optional[dict[str, str | float]]: + """Get NPU stats using axcl.""" + # Check if axcl-smi exists + axcl_smi_path = "/usr/bin/axcl/axcl-smi" + if not os.path.exists(axcl_smi_path): + return None + + try: + # Run axcl-smi command to get NPU stats + axcl_command = [axcl_smi_path, "sh", "cat", "/proc/ax_proc/npu/top"] + p = sp.run( + axcl_command, + capture_output=True, + text=True, + ) + + if p.returncode != 0: + pass + else: + utilization = None + + for line in p.stdout.strip().splitlines(): + line = line.strip() + if line.startswith("utilization:"): + match = re.search(r"utilization:(\d+)%", line) + if match: + utilization = float(match.group(1)) + + if utilization is not None: + stats: dict[str, str | float] = {"npu": utilization, "mem": "-%"} + return stats + except Exception: + pass + + return None + + def try_get_info(f, h, default="N/A", sensor=None): try: if h: diff --git a/frigate/video/__init__.py b/frigate/video/__init__.py new file mode 100644 index 000000000..24589835c --- /dev/null +++ b/frigate/video/__init__.py @@ -0,0 +1,2 @@ +from .detect import * # noqa: F403 +from .ffmpeg import * # noqa: F403 diff --git a/frigate/video/detect.py b/frigate/video/detect.py new file mode 100644 index 000000000..339b11e53 --- /dev/null +++ b/frigate/video/detect.py @@ -0,0 +1,563 @@ +"""Manages camera object detection processes.""" + +import logging +import queue +import time +from datetime import datetime, timezone +from multiprocessing import Queue +from multiprocessing.synchronize import Event as MpEvent +from typing import Any + +import cv2 + +from frigate.camera import CameraMetrics, PTZMetrics +from frigate.comms.inter_process import InterProcessRequestor +from frigate.config import CameraConfig, DetectConfig, LoggerConfig, ModelConfig +from frigate.config.camera.camera import CameraTypeEnum +from frigate.config.camera.updater import ( + CameraConfigUpdateEnum, + CameraConfigUpdateSubscriber, +) +from frigate.const import ( + PROCESS_PRIORITY_HIGH, + REQUEST_REGION_GRID, +) +from frigate.motion import MotionDetector +from frigate.motion.improved_motion import ImprovedMotionDetector +from frigate.object_detection.base import RemoteObjectDetector +from frigate.ptz.autotrack import ptz_moving_at_frame_time +from frigate.track import ObjectTracker +from frigate.track.norfair_tracker import NorfairTracker +from frigate.track.tracked_object import TrackedObjectAttribute +from frigate.util.builtin import EventsPerSecond +from frigate.util.image import ( + FrameManager, + SharedMemoryFrameManager, + draw_box_with_label, +) +from frigate.util.object import ( + create_tensor_input, + get_cluster_candidates, + get_cluster_region, + get_cluster_region_from_grid, + get_min_region_size, + get_startup_regions, + inside_any, + intersects_any, + is_object_filtered, + reduce_detections, +) +from frigate.util.process import FrigateProcess +from frigate.util.time import get_tomorrow_at_time + +logger = logging.getLogger(__name__) + + +class CameraTracker(FrigateProcess): + def __init__( + self, + config: CameraConfig, + model_config: ModelConfig, + labelmap: dict[int, str], + detection_queue: Queue, + detected_objects_queue, + camera_metrics: CameraMetrics, + ptz_metrics: PTZMetrics, + region_grid: list[list[dict[str, Any]]], + stop_event: MpEvent, + log_config: LoggerConfig | None = None, + ) -> None: + super().__init__( + stop_event, + PROCESS_PRIORITY_HIGH, + name=f"frigate.process:{config.name}", + daemon=True, + ) + self.config = config + self.model_config = model_config + self.labelmap = labelmap + self.detection_queue = detection_queue + self.detected_objects_queue = detected_objects_queue + self.camera_metrics = camera_metrics + self.ptz_metrics = ptz_metrics + self.region_grid = region_grid + self.log_config = log_config + + def run(self) -> None: + self.pre_run_setup(self.log_config) + frame_queue = self.camera_metrics.frame_queue + frame_shape = self.config.frame_shape + + motion_detector = ImprovedMotionDetector( + frame_shape, + self.config.motion, + self.config.detect.fps, + name=self.config.name, + ptz_metrics=self.ptz_metrics, + ) + object_detector = RemoteObjectDetector( + self.config.name, + self.labelmap, + self.detection_queue, + self.model_config, + self.stop_event, + ) + + object_tracker = NorfairTracker(self.config, self.ptz_metrics) + + frame_manager = SharedMemoryFrameManager() + + # create communication for region grid updates + requestor = InterProcessRequestor() + + process_frames( + requestor, + frame_queue, + frame_shape, + self.model_config, + self.config, + frame_manager, + motion_detector, + object_detector, + object_tracker, + self.detected_objects_queue, + self.camera_metrics, + self.stop_event, + self.ptz_metrics, + self.region_grid, + ) + + # empty the frame queue + logger.info(f"{self.config.name}: emptying frame queue") + while not frame_queue.empty(): + (frame_name, _) = frame_queue.get(False) + frame_manager.delete(frame_name) + + logger.info(f"{self.config.name}: exiting subprocess") + + +def detect( + detect_config: DetectConfig, + object_detector, + frame, + model_config: ModelConfig, + region, + objects_to_track, + object_filters, +): + tensor_input = create_tensor_input(frame, model_config, region) + + detections = [] + region_detections = object_detector.detect(tensor_input) + for d in region_detections: + box = d[2] + size = region[2] - region[0] + x_min = int(max(0, (box[1] * size) + region[0])) + y_min = int(max(0, (box[0] * size) + region[1])) + x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0])) + y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1])) + + # ignore objects that were detected outside the frame + if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1): + continue + + width = x_max - x_min + height = y_max - y_min + area = width * height + ratio = width / max(1, height) + det = (d[0], d[1], (x_min, y_min, x_max, y_max), area, ratio, region) + # apply object filters + if is_object_filtered(det, objects_to_track, object_filters): + continue + detections.append(det) + return detections + + +def process_frames( + requestor: InterProcessRequestor, + frame_queue: Queue, + frame_shape: tuple[int, int], + model_config: ModelConfig, + camera_config: CameraConfig, + frame_manager: FrameManager, + motion_detector: MotionDetector, + object_detector: RemoteObjectDetector, + object_tracker: ObjectTracker, + detected_objects_queue: Queue, + camera_metrics: CameraMetrics, + stop_event: MpEvent, + ptz_metrics: PTZMetrics, + region_grid: list[list[dict[str, Any]]], + exit_on_empty: bool = False, +): + next_region_update = get_tomorrow_at_time(2) + config_subscriber = CameraConfigUpdateSubscriber( + None, + {camera_config.name: camera_config}, + [ + CameraConfigUpdateEnum.detect, + CameraConfigUpdateEnum.enabled, + CameraConfigUpdateEnum.motion, + CameraConfigUpdateEnum.objects, + ], + ) + + fps_tracker = EventsPerSecond() + fps_tracker.start() + + startup_scan = True + stationary_frame_counter = 0 + camera_enabled = True + + region_min_size = get_min_region_size(model_config) + + attributes_map = model_config.attributes_map + all_attributes = model_config.all_attributes + + # remove license_plate from attributes if this camera is a dedicated LPR cam + if camera_config.type == CameraTypeEnum.lpr: + modified_attributes_map = model_config.attributes_map.copy() + + if ( + "car" in modified_attributes_map + and "license_plate" in modified_attributes_map["car"] + ): + modified_attributes_map["car"] = [ + attr + for attr in modified_attributes_map["car"] + if attr != "license_plate" + ] + + attributes_map = modified_attributes_map + + all_attributes = [ + attr for attr in model_config.all_attributes if attr != "license_plate" + ] + + while not stop_event.is_set(): + updated_configs = config_subscriber.check_for_updates() + + if "enabled" in updated_configs: + prev_enabled = camera_enabled + camera_enabled = camera_config.enabled + + if "motion" in updated_configs: + motion_detector.config = camera_config.motion + motion_detector.update_mask() + + if ( + not camera_enabled + and prev_enabled != camera_enabled + and camera_metrics.frame_queue.empty() + ): + logger.debug( + f"Camera {camera_config.name} disabled, clearing tracked objects" + ) + prev_enabled = camera_enabled + + # Clear norfair's dictionaries + object_tracker.tracked_objects.clear() + object_tracker.disappeared.clear() + object_tracker.stationary_box_history.clear() + object_tracker.positions.clear() + object_tracker.track_id_map.clear() + + # Clear internal norfair states + for trackers_by_type in object_tracker.trackers.values(): + for tracker in trackers_by_type.values(): + tracker.tracked_objects = [] + for tracker in object_tracker.default_tracker.values(): + tracker.tracked_objects = [] + + if not camera_enabled: + time.sleep(0.1) + continue + + if datetime.now().astimezone(timezone.utc) > next_region_update: + region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name) + next_region_update = get_tomorrow_at_time(2) + + try: + if exit_on_empty: + frame_name, frame_time = frame_queue.get(False) + else: + frame_name, frame_time = frame_queue.get(True, 1) + except queue.Empty: + if exit_on_empty: + logger.info("Exiting track_objects...") + break + continue + + camera_metrics.detection_frame.value = frame_time + ptz_metrics.frame_time.value = frame_time + + frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1])) + + if frame is None: + logger.debug( + f"{camera_config.name}: frame {frame_time} is not in memory store." + ) + continue + + # look for motion if enabled + motion_boxes = motion_detector.detect(frame) + + regions = [] + consolidated_detections = [] + + # if detection is disabled + if not camera_config.detect.enabled: + object_tracker.match_and_update(frame_name, frame_time, []) + else: + # get stationary object ids + # check every Nth frame for stationary objects + # disappeared objects are not stationary + # also check for overlapping motion boxes + if stationary_frame_counter == camera_config.detect.stationary.interval: + stationary_frame_counter = 0 + stationary_object_ids = [] + else: + stationary_frame_counter += 1 + stationary_object_ids = [ + obj["id"] + for obj in object_tracker.tracked_objects.values() + # if it has exceeded the stationary threshold + if obj["motionless_count"] + >= camera_config.detect.stationary.threshold + # and it hasn't disappeared + and object_tracker.disappeared[obj["id"]] == 0 + # and it doesn't overlap with any current motion boxes when not calibrating + and not intersects_any( + obj["box"], + [] if motion_detector.is_calibrating() else motion_boxes, + ) + ] + + # get tracked object boxes that aren't stationary + tracked_object_boxes = [ + ( + # use existing object box for stationary objects + obj["estimate"] + if obj["motionless_count"] + < camera_config.detect.stationary.threshold + else obj["box"] + ) + for obj in object_tracker.tracked_objects.values() + if obj["id"] not in stationary_object_ids + ] + object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes + + # get consolidated regions for tracked objects + regions = [ + get_cluster_region( + frame_shape, region_min_size, candidate, object_boxes + ) + for candidate in get_cluster_candidates( + frame_shape, region_min_size, object_boxes + ) + ] + + # only add in the motion boxes when not calibrating and a ptz is not moving via autotracking + # ptz_moving_at_frame_time() always returns False for non-autotracking cameras + if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time( + frame_time, + ptz_metrics.start_time.value, + ptz_metrics.stop_time.value, + ): + # find motion boxes that are not inside tracked object regions + standalone_motion_boxes = [ + b for b in motion_boxes if not inside_any(b, regions) + ] + + if standalone_motion_boxes: + motion_clusters = get_cluster_candidates( + frame_shape, + region_min_size, + standalone_motion_boxes, + ) + motion_regions = [ + get_cluster_region_from_grid( + frame_shape, + region_min_size, + candidate, + standalone_motion_boxes, + region_grid, + ) + for candidate in motion_clusters + ] + regions += motion_regions + + # if starting up, get the next startup scan region + if startup_scan: + for region in get_startup_regions( + frame_shape, region_min_size, region_grid + ): + regions.append(region) + startup_scan = False + + # resize regions and detect + # seed with stationary objects + detections = [ + ( + obj["label"], + obj["score"], + obj["box"], + obj["area"], + obj["ratio"], + obj["region"], + ) + for obj in object_tracker.tracked_objects.values() + if obj["id"] in stationary_object_ids + ] + + for region in regions: + detections.extend( + detect( + camera_config.detect, + object_detector, + frame, + model_config, + region, + camera_config.objects.track, + camera_config.objects.filters, + ) + ) + + consolidated_detections = reduce_detections(frame_shape, detections) + + # if detection was run on this frame, consolidate + if len(regions) > 0: + tracked_detections = [ + d for d in consolidated_detections if d[0] not in all_attributes + ] + # now that we have refined our detections, we need to track objects + object_tracker.match_and_update( + frame_name, frame_time, tracked_detections + ) + # else, just update the frame times for the stationary objects + else: + object_tracker.update_frame_times(frame_name, frame_time) + + # group the attribute detections based on what label they apply to + attribute_detections: dict[str, list[TrackedObjectAttribute]] = {} + for label, attribute_labels in attributes_map.items(): + attribute_detections[label] = [ + TrackedObjectAttribute(d) + for d in consolidated_detections + if d[0] in attribute_labels + ] + + # build detections + detections = {} + for obj in object_tracker.tracked_objects.values(): + detections[obj["id"]] = {**obj, "attributes": []} + + # find the best object for each attribute to be assigned to + all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values() + for attributes in attribute_detections.values(): + for attribute in attributes: + filtered_objects = filter( + lambda o: attribute.label in attributes_map.get(o["label"], []), + all_objects, + ) + selected_object_id = attribute.find_best_object(filtered_objects) + + if selected_object_id is not None: + detections[selected_object_id]["attributes"].append( + attribute.get_tracking_data() + ) + + # debug object tracking + if False: + bgr_frame = cv2.cvtColor( + frame, + cv2.COLOR_YUV2BGR_I420, + ) + object_tracker.debug_draw(bgr_frame, frame_time) + cv2.imwrite( + f"debug/frames/track-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame + ) + # debug + if False: + bgr_frame = cv2.cvtColor( + frame, + cv2.COLOR_YUV2BGR_I420, + ) + + for m_box in motion_boxes: + cv2.rectangle( + bgr_frame, + (m_box[0], m_box[1]), + (m_box[2], m_box[3]), + (0, 0, 255), + 2, + ) + + for b in tracked_object_boxes: + cv2.rectangle( + bgr_frame, + (b[0], b[1]), + (b[2], b[3]), + (255, 0, 0), + 2, + ) + + for obj in object_tracker.tracked_objects.values(): + if obj["frame_time"] == frame_time: + thickness = 2 + color = model_config.colormap.get(obj["label"], (255, 255, 255)) + else: + thickness = 1 + color = (255, 0, 0) + + # draw the bounding boxes on the frame + box = obj["box"] + + draw_box_with_label( + bgr_frame, + box[0], + box[1], + box[2], + box[3], + obj["label"], + obj["id"], + thickness=thickness, + color=color, + ) + + for region in regions: + cv2.rectangle( + bgr_frame, + (region[0], region[1]), + (region[2], region[3]), + (0, 255, 0), + 2, + ) + + cv2.imwrite( + f"debug/frames/{camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg", + bgr_frame, + ) + # add to the queue if not full + if detected_objects_queue.full(): + frame_manager.close(frame_name) + continue + else: + fps_tracker.update() + camera_metrics.process_fps.value = fps_tracker.eps() + detected_objects_queue.put( + ( + camera_config.name, + frame_name, + frame_time, + detections, + motion_boxes, + regions, + ) + ) + camera_metrics.detection_fps.value = object_detector.fps.eps() + frame_manager.close(frame_name) + + motion_detector.stop() + requestor.stop() + config_subscriber.stop() diff --git a/frigate/video.py b/frigate/video/ffmpeg.py old mode 100755 new mode 100644 similarity index 55% rename from frigate/video.py rename to frigate/video/ffmpeg.py index 289027bb4..852ea4a16 --- a/frigate/video.py +++ b/frigate/video/ffmpeg.py @@ -1,3 +1,5 @@ +"""Manages ffmpeg processes for camera frame capture.""" + import logging import queue import subprocess as sp @@ -9,97 +11,30 @@ from multiprocessing import Queue, Value from multiprocessing.synchronize import Event as MpEvent from typing import Any -import cv2 - -from frigate.camera import CameraMetrics, PTZMetrics +from frigate.camera import CameraMetrics from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.recordings_updater import ( RecordingsDataSubscriber, RecordingsDataTypeEnum, ) -from frigate.config import CameraConfig, DetectConfig, LoggerConfig, ModelConfig -from frigate.config.camera.camera import CameraTypeEnum +from frigate.config import CameraConfig, LoggerConfig from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateSubscriber, ) -from frigate.const import ( - PROCESS_PRIORITY_HIGH, - REQUEST_REGION_GRID, -) +from frigate.const import PROCESS_PRIORITY_HIGH from frigate.log import LogPipe -from frigate.motion import MotionDetector -from frigate.motion.improved_motion import ImprovedMotionDetector -from frigate.object_detection.base import RemoteObjectDetector -from frigate.ptz.autotrack import ptz_moving_at_frame_time -from frigate.track import ObjectTracker -from frigate.track.norfair_tracker import NorfairTracker -from frigate.track.tracked_object import TrackedObjectAttribute from frigate.util.builtin import EventsPerSecond +from frigate.util.ffmpeg import start_or_restart_ffmpeg, stop_ffmpeg from frigate.util.image import ( FrameManager, SharedMemoryFrameManager, - draw_box_with_label, -) -from frigate.util.object import ( - create_tensor_input, - get_cluster_candidates, - get_cluster_region, - get_cluster_region_from_grid, - get_min_region_size, - get_startup_regions, - inside_any, - intersects_any, - is_object_filtered, - reduce_detections, ) from frigate.util.process import FrigateProcess -from frigate.util.time import get_tomorrow_at_time logger = logging.getLogger(__name__) -def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger): - logger.info("Terminating the existing ffmpeg process...") - ffmpeg_process.terminate() - try: - logger.info("Waiting for ffmpeg to exit gracefully...") - ffmpeg_process.communicate(timeout=30) - logger.info("FFmpeg has exited") - except sp.TimeoutExpired: - logger.info("FFmpeg didn't exit. Force killing...") - ffmpeg_process.kill() - ffmpeg_process.communicate() - logger.info("FFmpeg has been killed") - ffmpeg_process = None - - -def start_or_restart_ffmpeg( - ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None -) -> sp.Popen[Any]: - if ffmpeg_process is not None: - stop_ffmpeg(ffmpeg_process, logger) - - if frame_size is None: - process = sp.Popen( - ffmpeg_cmd, - stdout=sp.DEVNULL, - stderr=logpipe, - stdin=sp.DEVNULL, - start_new_session=True, - ) - else: - process = sp.Popen( - ffmpeg_cmd, - stdout=sp.PIPE, - stderr=logpipe, - stdin=sp.DEVNULL, - bufsize=frame_size * 10, - start_new_session=True, - ) - return process - - def capture_frames( ffmpeg_process: sp.Popen[Any], config: CameraConfig, @@ -708,513 +643,3 @@ class CameraCapture(FrigateProcess): ) camera_watchdog.start() camera_watchdog.join() - - -class CameraTracker(FrigateProcess): - def __init__( - self, - config: CameraConfig, - model_config: ModelConfig, - labelmap: dict[int, str], - detection_queue: Queue, - detected_objects_queue, - camera_metrics: CameraMetrics, - ptz_metrics: PTZMetrics, - region_grid: list[list[dict[str, Any]]], - stop_event: MpEvent, - log_config: LoggerConfig | None = None, - ) -> None: - super().__init__( - stop_event, - PROCESS_PRIORITY_HIGH, - name=f"frigate.process:{config.name}", - daemon=True, - ) - self.config = config - self.model_config = model_config - self.labelmap = labelmap - self.detection_queue = detection_queue - self.detected_objects_queue = detected_objects_queue - self.camera_metrics = camera_metrics - self.ptz_metrics = ptz_metrics - self.region_grid = region_grid - self.log_config = log_config - - def run(self) -> None: - self.pre_run_setup(self.log_config) - frame_queue = self.camera_metrics.frame_queue - frame_shape = self.config.frame_shape - - motion_detector = ImprovedMotionDetector( - frame_shape, - self.config.motion, - self.config.detect.fps, - name=self.config.name, - ptz_metrics=self.ptz_metrics, - ) - object_detector = RemoteObjectDetector( - self.config.name, - self.labelmap, - self.detection_queue, - self.model_config, - self.stop_event, - ) - - object_tracker = NorfairTracker(self.config, self.ptz_metrics) - - frame_manager = SharedMemoryFrameManager() - - # create communication for region grid updates - requestor = InterProcessRequestor() - - process_frames( - requestor, - frame_queue, - frame_shape, - self.model_config, - self.config, - frame_manager, - motion_detector, - object_detector, - object_tracker, - self.detected_objects_queue, - self.camera_metrics, - self.stop_event, - self.ptz_metrics, - self.region_grid, - ) - - # empty the frame queue - logger.info(f"{self.config.name}: emptying frame queue") - while not frame_queue.empty(): - (frame_name, _) = frame_queue.get(False) - frame_manager.delete(frame_name) - - logger.info(f"{self.config.name}: exiting subprocess") - - -def detect( - detect_config: DetectConfig, - object_detector, - frame, - model_config: ModelConfig, - region, - objects_to_track, - object_filters, -): - tensor_input = create_tensor_input(frame, model_config, region) - - detections = [] - region_detections = object_detector.detect(tensor_input) - for d in region_detections: - box = d[2] - size = region[2] - region[0] - x_min = int(max(0, (box[1] * size) + region[0])) - y_min = int(max(0, (box[0] * size) + region[1])) - x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0])) - y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1])) - - # ignore objects that were detected outside the frame - if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1): - continue - - width = x_max - x_min - height = y_max - y_min - area = width * height - ratio = width / max(1, height) - det = (d[0], d[1], (x_min, y_min, x_max, y_max), area, ratio, region) - # apply object filters - if is_object_filtered(det, objects_to_track, object_filters): - continue - detections.append(det) - return detections - - -def process_frames( - requestor: InterProcessRequestor, - frame_queue: Queue, - frame_shape: tuple[int, int], - model_config: ModelConfig, - camera_config: CameraConfig, - frame_manager: FrameManager, - motion_detector: MotionDetector, - object_detector: RemoteObjectDetector, - object_tracker: ObjectTracker, - detected_objects_queue: Queue, - camera_metrics: CameraMetrics, - stop_event: MpEvent, - ptz_metrics: PTZMetrics, - region_grid: list[list[dict[str, Any]]], - exit_on_empty: bool = False, -): - next_region_update = get_tomorrow_at_time(2) - config_subscriber = CameraConfigUpdateSubscriber( - None, - {camera_config.name: camera_config}, - [ - CameraConfigUpdateEnum.detect, - CameraConfigUpdateEnum.enabled, - CameraConfigUpdateEnum.motion, - CameraConfigUpdateEnum.objects, - ], - ) - - fps_tracker = EventsPerSecond() - fps_tracker.start() - - startup_scan = True - stationary_frame_counter = 0 - camera_enabled = True - - region_min_size = get_min_region_size(model_config) - - attributes_map = model_config.attributes_map - all_attributes = model_config.all_attributes - - # remove license_plate from attributes if this camera is a dedicated LPR cam - if camera_config.type == CameraTypeEnum.lpr: - modified_attributes_map = model_config.attributes_map.copy() - - if ( - "car" in modified_attributes_map - and "license_plate" in modified_attributes_map["car"] - ): - modified_attributes_map["car"] = [ - attr - for attr in modified_attributes_map["car"] - if attr != "license_plate" - ] - - attributes_map = modified_attributes_map - - all_attributes = [ - attr for attr in model_config.all_attributes if attr != "license_plate" - ] - - while not stop_event.is_set(): - updated_configs = config_subscriber.check_for_updates() - - if "enabled" in updated_configs: - prev_enabled = camera_enabled - camera_enabled = camera_config.enabled - - if "motion" in updated_configs: - motion_detector.config = camera_config.motion - motion_detector.update_mask() - - if ( - not camera_enabled - and prev_enabled != camera_enabled - and camera_metrics.frame_queue.empty() - ): - logger.debug( - f"Camera {camera_config.name} disabled, clearing tracked objects" - ) - prev_enabled = camera_enabled - - # Clear norfair's dictionaries - object_tracker.tracked_objects.clear() - object_tracker.disappeared.clear() - object_tracker.stationary_box_history.clear() - object_tracker.positions.clear() - object_tracker.track_id_map.clear() - - # Clear internal norfair states - for trackers_by_type in object_tracker.trackers.values(): - for tracker in trackers_by_type.values(): - tracker.tracked_objects = [] - for tracker in object_tracker.default_tracker.values(): - tracker.tracked_objects = [] - - if not camera_enabled: - time.sleep(0.1) - continue - - if datetime.now().astimezone(timezone.utc) > next_region_update: - region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name) - next_region_update = get_tomorrow_at_time(2) - - try: - if exit_on_empty: - frame_name, frame_time = frame_queue.get(False) - else: - frame_name, frame_time = frame_queue.get(True, 1) - except queue.Empty: - if exit_on_empty: - logger.info("Exiting track_objects...") - break - continue - - camera_metrics.detection_frame.value = frame_time - ptz_metrics.frame_time.value = frame_time - - frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1])) - - if frame is None: - logger.debug( - f"{camera_config.name}: frame {frame_time} is not in memory store." - ) - continue - - # look for motion if enabled - motion_boxes = motion_detector.detect(frame) - - regions = [] - consolidated_detections = [] - - # if detection is disabled - if not camera_config.detect.enabled: - object_tracker.match_and_update(frame_name, frame_time, []) - else: - # get stationary object ids - # check every Nth frame for stationary objects - # disappeared objects are not stationary - # also check for overlapping motion boxes - if stationary_frame_counter == camera_config.detect.stationary.interval: - stationary_frame_counter = 0 - stationary_object_ids = [] - else: - stationary_frame_counter += 1 - stationary_object_ids = [ - obj["id"] - for obj in object_tracker.tracked_objects.values() - # if it has exceeded the stationary threshold - if obj["motionless_count"] - >= camera_config.detect.stationary.threshold - # and it hasn't disappeared - and object_tracker.disappeared[obj["id"]] == 0 - # and it doesn't overlap with any current motion boxes when not calibrating - and not intersects_any( - obj["box"], - [] if motion_detector.is_calibrating() else motion_boxes, - ) - ] - - # get tracked object boxes that aren't stationary - tracked_object_boxes = [ - ( - # use existing object box for stationary objects - obj["estimate"] - if obj["motionless_count"] - < camera_config.detect.stationary.threshold - else obj["box"] - ) - for obj in object_tracker.tracked_objects.values() - if obj["id"] not in stationary_object_ids - ] - object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes - - # get consolidated regions for tracked objects - regions = [ - get_cluster_region( - frame_shape, region_min_size, candidate, object_boxes - ) - for candidate in get_cluster_candidates( - frame_shape, region_min_size, object_boxes - ) - ] - - # only add in the motion boxes when not calibrating and a ptz is not moving via autotracking - # ptz_moving_at_frame_time() always returns False for non-autotracking cameras - if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time( - frame_time, - ptz_metrics.start_time.value, - ptz_metrics.stop_time.value, - ): - # find motion boxes that are not inside tracked object regions - standalone_motion_boxes = [ - b for b in motion_boxes if not inside_any(b, regions) - ] - - if standalone_motion_boxes: - motion_clusters = get_cluster_candidates( - frame_shape, - region_min_size, - standalone_motion_boxes, - ) - motion_regions = [ - get_cluster_region_from_grid( - frame_shape, - region_min_size, - candidate, - standalone_motion_boxes, - region_grid, - ) - for candidate in motion_clusters - ] - regions += motion_regions - - # if starting up, get the next startup scan region - if startup_scan: - for region in get_startup_regions( - frame_shape, region_min_size, region_grid - ): - regions.append(region) - startup_scan = False - - # resize regions and detect - # seed with stationary objects - detections = [ - ( - obj["label"], - obj["score"], - obj["box"], - obj["area"], - obj["ratio"], - obj["region"], - ) - for obj in object_tracker.tracked_objects.values() - if obj["id"] in stationary_object_ids - ] - - for region in regions: - detections.extend( - detect( - camera_config.detect, - object_detector, - frame, - model_config, - region, - camera_config.objects.track, - camera_config.objects.filters, - ) - ) - - consolidated_detections = reduce_detections(frame_shape, detections) - - # if detection was run on this frame, consolidate - if len(regions) > 0: - tracked_detections = [ - d for d in consolidated_detections if d[0] not in all_attributes - ] - # now that we have refined our detections, we need to track objects - object_tracker.match_and_update( - frame_name, frame_time, tracked_detections - ) - # else, just update the frame times for the stationary objects - else: - object_tracker.update_frame_times(frame_name, frame_time) - - # group the attribute detections based on what label they apply to - attribute_detections: dict[str, list[TrackedObjectAttribute]] = {} - for label, attribute_labels in attributes_map.items(): - attribute_detections[label] = [ - TrackedObjectAttribute(d) - for d in consolidated_detections - if d[0] in attribute_labels - ] - - # build detections - detections = {} - for obj in object_tracker.tracked_objects.values(): - detections[obj["id"]] = {**obj, "attributes": []} - - # find the best object for each attribute to be assigned to - all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values() - for attributes in attribute_detections.values(): - for attribute in attributes: - filtered_objects = filter( - lambda o: attribute.label in attributes_map.get(o["label"], []), - all_objects, - ) - selected_object_id = attribute.find_best_object(filtered_objects) - - if selected_object_id is not None: - detections[selected_object_id]["attributes"].append( - attribute.get_tracking_data() - ) - - # debug object tracking - if False: - bgr_frame = cv2.cvtColor( - frame, - cv2.COLOR_YUV2BGR_I420, - ) - object_tracker.debug_draw(bgr_frame, frame_time) - cv2.imwrite( - f"debug/frames/track-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame - ) - # debug - if False: - bgr_frame = cv2.cvtColor( - frame, - cv2.COLOR_YUV2BGR_I420, - ) - - for m_box in motion_boxes: - cv2.rectangle( - bgr_frame, - (m_box[0], m_box[1]), - (m_box[2], m_box[3]), - (0, 0, 255), - 2, - ) - - for b in tracked_object_boxes: - cv2.rectangle( - bgr_frame, - (b[0], b[1]), - (b[2], b[3]), - (255, 0, 0), - 2, - ) - - for obj in object_tracker.tracked_objects.values(): - if obj["frame_time"] == frame_time: - thickness = 2 - color = model_config.colormap.get(obj["label"], (255, 255, 255)) - else: - thickness = 1 - color = (255, 0, 0) - - # draw the bounding boxes on the frame - box = obj["box"] - - draw_box_with_label( - bgr_frame, - box[0], - box[1], - box[2], - box[3], - obj["label"], - obj["id"], - thickness=thickness, - color=color, - ) - - for region in regions: - cv2.rectangle( - bgr_frame, - (region[0], region[1]), - (region[2], region[3]), - (0, 255, 0), - 2, - ) - - cv2.imwrite( - f"debug/frames/{camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg", - bgr_frame, - ) - # add to the queue if not full - if detected_objects_queue.full(): - frame_manager.close(frame_name) - continue - else: - fps_tracker.update() - camera_metrics.process_fps.value = fps_tracker.eps() - detected_objects_queue.put( - ( - camera_config.name, - frame_name, - frame_time, - detections, - motion_boxes, - regions, - ) - ) - camera_metrics.detection_fps.value = object_detector.fps.eps() - frame_manager.close(frame_name) - - motion_detector.stop() - requestor.stop() - config_subscriber.stop() diff --git a/web/package-lock.json b/web/package-lock.json index f910cb4ad..494498f30 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -52,6 +52,7 @@ "i18next-http-backend": "^3.0.1", "idb-keyval": "^6.2.1", "immer": "^10.1.1", + "js-yaml": "^4.1.1", "konva": "^10.2.3", "lodash": "^4.17.23", "lucide-react": "^0.577.0", @@ -90,6 +91,7 @@ "devDependencies": { "@tailwindcss/forms": "^0.5.9", "@testing-library/jest-dom": "^6.6.2", + "@types/js-yaml": "^4.0.9", "@types/lodash": "^4.17.12", "@types/node": "^20.14.10", "@types/react": "^19.2.14", @@ -5494,6 +5496,13 @@ "@types/unist": "*" } }, + "node_modules/@types/js-yaml": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", + "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", @@ -6132,7 +6141,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, "license": "Python-2.0" }, "node_modules/aria-hidden": { @@ -9178,10 +9186,9 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "license": "MIT", "dependencies": { "argparse": "^2.0.1" diff --git a/web/package.json b/web/package.json index ed522b827..960b556ff 100644 --- a/web/package.json +++ b/web/package.json @@ -61,6 +61,7 @@ "i18next-http-backend": "^3.0.1", "idb-keyval": "^6.2.1", "immer": "^10.1.1", + "js-yaml": "^4.1.1", "konva": "^10.2.3", "lodash": "^4.17.23", "lucide-react": "^0.577.0", @@ -99,6 +100,7 @@ "devDependencies": { "@tailwindcss/forms": "^0.5.9", "@testing-library/jest-dom": "^6.6.2", + "@types/js-yaml": "^4.0.9", "@types/lodash": "^4.17.12", "@types/node": "^20.14.10", "@types/react": "^19.2.14", diff --git a/web/public/locales/ar/config/cameras.json b/web/public/locales/ar/config/cameras.json index 0967ef424..a5ec98238 100644 --- a/web/public/locales/ar/config/cameras.json +++ b/web/public/locales/ar/config/cameras.json @@ -1 +1,3 @@ -{} +{ + "label": "اعدادات الكاميرا" +} diff --git a/web/public/locales/ar/config/groups.json b/web/public/locales/ar/config/groups.json index 0967ef424..2254e0308 100644 --- a/web/public/locales/ar/config/groups.json +++ b/web/public/locales/ar/config/groups.json @@ -1 +1,7 @@ -{} +{ + "audio": { + "global": { + "detection": "التحري العام" + } + } +} diff --git a/web/public/locales/ar/views/system.json b/web/public/locales/ar/views/system.json index e68d544e4..261b7e929 100644 --- a/web/public/locales/ar/views/system.json +++ b/web/public/locales/ar/views/system.json @@ -7,7 +7,8 @@ "logs": { "frigate": "سجلات Frigate - Frigate", "go2rtc": "Go2RTC سجلات - Frigate", - "nginx": "سجلات إنجنإكس - Frigate" + "nginx": "سجلات إنجنإكس - Frigate", + "websocket": "سجلات الرسائل" } }, "metrics": "مقاييس النظام", @@ -22,9 +23,33 @@ }, "type": { "label": "النوع", - "timestamp": "الختم الزمني" + "timestamp": "الختم الزمني", + "message": "رسالة" }, - "tips": "يتم بث السجلات من الخادم" + "tips": "يتم الآن جلب السجلات من الخادم", + "websocket": { + "label": "الرسائل", + "pause": "إيقاف مؤقت", + "resume": "استئناف", + "filter": { + "all": "كافة المواضيع", + "topics": "المسارات", + "events": "الأحداث", + "reviews": "المراجعات", + "classification": "التصنيف", + "face_recognition": "التعرف على الوجه", + "camera_activity": "نشاط الكاميرا", + "system": "النظام", + "camera": "الكاميرا", + "all_cameras": "كافة الكاميرات" + } + }, + "toast": { + "error": { + "fetchingLogsFailed": "خطأ أثناء جلب السجلات: {{errorMessage}}", + "whileStreamingLogs": "خطأ أثناء تدفق السجلات: {{errorMessage}}" + } + } }, "title": "النظام", "general": { @@ -34,19 +59,38 @@ "gpuInfo": { "vainfoOutput": { "title": "مخرجات Vainfo", - "processOutput": "ناتج العملية:", - "processError": "خطأ في العملية:" + "processOutput": "مخرجات العملية :", + "processError": "خطأ في العملية:", + "returnCode": "كود الاستجابة: {{code}}" }, "nvidiaSMIOutput": { "title": "مخرجات Nvidia SMI", "name": "الاسم: {{name}}", "driver": "برنامج التشغيل: {{driver}}", - "cudaComputerCapability": "قدرة الحوسبة CUDA: {{cuda_compute}}" + "cudaComputerCapability": "قدرة الحوسبة CUDA: {{cuda_compute}}", + "vbios": "" + }, + "closeInfo": { + "label": "إغلاق معلومات المعالج الرسومي" + }, + "copyInfo": { + "label": "نسخ معلومات المعالج الرسومي" + }, + "toast": { + "success": "تم نسخ معلومات المعالج الرسومي إلى الحافظة" } }, "title": "معلومات الاجهزة المادية", "gpuUsage": "مقدار استخدام GPU", - "gpuMemory": "ذاكرة GPU" + "gpuMemory": "ذاكرة GPU", + "gpuTemperature": "درجة حرارة الـ GPU", + "npuUsage": "معلومات وحدة معالجة الشبكة", + "npuMemory": "استخدام وحدة المعالجة العصبية", + "npuTemperature": "درجة حرارة الـ NPU", + "intelGpuWarning": { + "title": "تحذير إحصائيات معالج Intel الرسومي", + "description": "هذا خطأ برمي معروف في أدوات تقارير إحصائيات معالجات Intel الرسومية (intel_gpu_top)، حيث تتوقف الأداة عن العمل وتُظهر استهلاك المعالج الرسومي (GPU) بنسبة 0% بشكل متكرر، حتى في الحالات التي يعمل فيها تسريع العتاد وكشف الكائنات بشكل صحيح على المعالج الرسومي المدمج (iGPU). هذا ليس خطأً في برنامج فرايجيت (Frigate). يمكنك إعادة تشغيل الجهاز المضيف لحل المشكلة مؤقتاً والتأكد من أن المعالج الرسومي يعمل بشكل صحيح. علماً بأن هذا الخلل لا يؤثر على الأداء." + } }, "title": "لمحة عامة", "detector": { @@ -54,7 +98,8 @@ "inferenceSpeed": "سرعة استنتاج الكاشف", "temperature": "درجة حرارة الكاشف", "cpuUsage": "كشف استخدام CPU", - "memoryUsage": "كشف استخدام الذاكرة" + "memoryUsage": "كشف استخدام الذاكرة", + "cpuUsageInformation": "المعالج المستخدم في تجهيز بيانات الإدخال والإخراج من وإلى نماذج الكشف. هذه القيمة لا تقيس استهلاك الاستنتاج (Inference)، حتى عند استخدام معالج رسومي (GPU) أو مسرع." }, "otherProcesses": { "title": "عمليات أخرى", @@ -69,12 +114,36 @@ "title": "التسجيلات", "tips": "تمثل هذه القيمة إجمالي مساحة التخزين المستخدمة للتسجيلات في قاعدة بيانات Frigate. لا يتتبع Frigate استخدام مساحة التخزين لجميع الملفات الموجودة على القرص.", "earliestRecording": "أقدم تسجيل متاح:" + }, + "shm": { + "warning": "حجم ذاكرة SHM الحالي البالغ {{total}} ميجابايت صغير جداً. يرجى زيادته إلى {{min_shm}} ميجابايت على الأقل.", + "frameLifetime": { + "description": "تمتلك كل كاميرا {{frames}} خانة (slots) للإطارات في الذاكرة المشتركة. عند أعلى معدل إطارات للكاميرا، يكون كل إطار متاحاً لمدة {{lifetime}} ثانية تقريباً قبل أن يتم الكتابة فوقه." + } + }, + "cameraStorage": { + "unused": { + "tips": "قد لا تمثل هذه القيمة بدقة المساحة الخالية المتاحة لبرنامج فرايجيت (Frigate) إذا كان لديك ملفات أخرى مخزنة على القرص بخلاف تسجيلات البرنامج نفسه. لا يقوم فرايجيت بتتبع استهلاك التخزين خارج نطاق تسجيلاته الخاصة." + } } }, "cameras": { "overview": "نظرة عامة", "info": { "unknown": "غير معروف" + }, + "connectionQuality": { + "fair": "متوسط", + "poor": "ضعيف", + "unusable": "غير قابل للاستخدام", + "fps": "معدل الإطارات", + "expectedFps": "معدل الإطارات المتوقع", + "reconnectsLastHour": "إعادات الاتصال (خلال الساعة الماضية)", + "stallsLastHour": "توقفات البث (خلال الساعة الماضية)" } + }, + "stats": { + "detectIsSlow": "عملية الكشف {{detect}} بطيئة ({{speed}} مللي ثانية)", + "detectIsVerySlow": "عملية الكشف {{detect}} بطيئة جداً ({{speed}} مللي ثانية)" } } diff --git a/web/public/locales/ca/common.json b/web/public/locales/ca/common.json index b583d02bb..d1593e948 100644 --- a/web/public/locales/ca/common.json +++ b/web/public/locales/ca/common.json @@ -108,7 +108,8 @@ }, "classification": "Classificació", "chat": "Xat", - "actions": "Accions" + "actions": "Accions", + "profiles": "Perfils" }, "pagination": { "previous": { @@ -281,7 +282,8 @@ "saveAll": "Desa-ho tot", "savingAll": "S'està desant tot…", "undoAll": "Desfés-ho tot", - "applying": "S'està aplicant…" + "applying": "S'està aplicant…", + "retry": "Torna a intentar" }, "toast": { "copyUrlToClipboard": "URL copiada al porta-retalls.", @@ -290,7 +292,8 @@ "error": { "title": "No s'han pogut guardar els canvis de configuració: {{errorMessage}}", "noMessage": "No s'han pogut guardar els canvis de configuració" - } + }, + "success": "S'han desat correctament els canvis de configuració." } }, "accessDenied": { @@ -316,5 +319,7 @@ "field": { "optional": "Opcional", "internalID": "L'ID intern que Frigate s'utilitza a la configuració i a la base de dades" - } + }, + "no_items": "Sense elements", + "validation_errors": "Errors de validació" } diff --git a/web/public/locales/ca/components/camera.json b/web/public/locales/ca/components/camera.json index bfa8ea161..e2309db0a 100644 --- a/web/public/locales/ca/components/camera.json +++ b/web/public/locales/ca/components/camera.json @@ -82,6 +82,7 @@ "zones": "Zones", "mask": "Màscara", "motion": "Moviment", - "regions": "Regions" + "regions": "Regions", + "paths": "Rutes" } } diff --git a/web/public/locales/ca/config/cameras.json b/web/public/locales/ca/config/cameras.json index d88f3c81a..090de49fb 100644 --- a/web/public/locales/ca/config/cameras.json +++ b/web/public/locales/ca/config/cameras.json @@ -79,8 +79,8 @@ "label": "Detecció d'objectes", "description": "Configuració del rol de detecció utilitzat per executar la detecció d'objectes i inicialitzar els rastrejadors.", "enabled": { - "label": "Detecció activada", - "description": "Activa o desactiva la detecció d'objectes per a aquesta càmera. La detecció s'ha d'activar perquè s'executi el seguiment d'objectes." + "label": "Habilita la detecció d'objectes", + "description": "Activa o desactiva la detecció d'objectes per a aquesta càmera." }, "height": { "label": "Detecta l'alçada", @@ -149,7 +149,7 @@ }, "ffmpeg": { "label": "FFmpeg", - "description": "Paràmetres del FFmpeg que inclouen camins binaris, args, opcions de hwaccel i args de sortida per rol.", + "description": "Paràmetres del FFmpeg que inclouen la ruta dels binaris, args, opcions de hwaccel i args de sortida per rol.", "path": { "label": "Ruta FFmpeg", "description": "Ruta al binari FFmpeg a usar o un àlies de versió («5.0» o «7.0»)." @@ -192,7 +192,7 @@ }, "inputs": { "label": "Entrada de la càmera", - "description": "Llista de definicions de flux d'entrada (camins i rols) per a aquesta càmera.", + "description": "Llista de definicions de flux d'entrada (rutes i rols) per a aquesta càmera.", "path": { "label": "Ruta d'entrada", "description": "URL o camí del flux d'entrada de la càmera." @@ -303,7 +303,7 @@ }, "skip_motion_threshold": { "label": "Omet el llindar de moviment", - "description": "Si més d'aquesta fracció de la imatge canvia en un sol fotograma, el detector no retornarà cap caixa de moviment i recalibrarà immediatament. Això pot estalviar CPU i reduir falsos positius durant el llamp, tempestes, etc., però pot perdre esdeveniments reals com una càmera PTZ que fa un seguiment automàtic d'un objecte. La compensació es troba entre deixar caure uns quants megabytes d'enregistraments versus revisar un parell de clips curts. Interval de 0,0 a 1,0." + "description": "Si s'estableix a un valor entre 0.0 i 1.0, i més d'aquesta fracció de la imatge canvia en un sol fotograma, el detector no retornarà cap caixa de moviment i recalibrarà immediatament. Això pot estalviar CPU i reduir falsos positius durant el llamp, tempestes, etc., però pot perdre esdeveniments reals com una càmera PTZ que fa un seguiment automàtic d'un objecte. La compensació es troba entre deixar caure uns quants megabytes d'enregistraments versus revisar un parell de clips curts. Deixa sense establir (Cap) per desactivar aquesta característica." } }, "objects": { @@ -529,7 +529,7 @@ }, "detections": { "label": "Configuració de les deteccions", - "description": "Paràmetres per a crear esdeveniments de detecció (no-alerta) i quant de temps conservar-los.", + "description": "Paràmetres per als quals els objectes rastrejats generen deteccions (sense-alerta) i com es mantenen les deteccions.", "enabled": { "label": "Habilita les deteccions", "description": "Activa o desactiva els esdeveniments de detecció d'aquesta càmera." @@ -626,9 +626,9 @@ }, "snapshots": { "label": "Instantànies", - "description": "Configuració per a les instantànies JPEG desades dels objectes seguits per a aquesta càmera.", + "description": "Configuració per a les instantànies API-generades dels objectes seguits per a aquesta càmera.", "enabled": { - "label": "Instantànies habilitades", + "label": "Habilita les instantànies", "description": "Activa o desactiva el desament de les instantànies d'aquesta càmera." }, "clean_copy": { @@ -637,15 +637,15 @@ }, "timestamp": { "label": "Superposició de marca horària", - "description": "Superposa una marca horària a les instantànies desades." + "description": "Superposa una marca horària a les instantànies de l'API." }, "bounding_box": { "label": "Superposició de la caixa contenidora", - "description": "Dibuixa caixes contenidores per als objectes seguits en les instantànies desades." + "description": "Dibuixa caixes contenidores per als objectes seguits en les instantànies de l'API." }, "crop": { "label": "Retalla la instantània", - "description": "Retalla les instantànies desades a la caixa contenidora de l'objecte detectat." + "description": "Retalla les instantànies de l'API a la caixa contenidora de l'objecte detectat." }, "required_zones": { "label": "Zones requerides", @@ -653,11 +653,11 @@ }, "height": { "label": "Alçada de la instantània", - "description": "Alçada (píxels) per a canviar la mida de les instantànies desades; deixeu-ho buit per a preservar la mida original." + "description": "Alçada (píxels) per a canviar la mida de les instantànies de l'API; deixeu-ho buit per a preservar la mida original." }, "retain": { "label": "Retenció de la instantània", - "description": "Paràmetres de retenció per a les instantànies desades, inclosos els dies predeterminats i les anul·lacions per objecte.", + "description": "Paràmetres de retenció per a les instantànies, inclosos els dies predeterminats i les anul·lacions per objecte.", "default": { "label": "Retenció predeterminada", "description": "Nombre predeterminat de dies per a retenir les instantànies." @@ -672,8 +672,8 @@ } }, "quality": { - "label": "Qualitat JPEG", - "description": "Qualitat del codi JPEG per a les instantànies desades (0-100)." + "label": "Qualitat captura", + "description": "Qualitat per a les instantànies desades (0-100)." } }, "timestamp_style": { @@ -838,6 +838,10 @@ "ignore_time_mismatch": { "label": "Ignora el desajust de temps", "description": "Ignora les diferències de sincronització de temps entre càmera i servidor Frigate per a la comunicació ONVIF." + }, + "profile": { + "label": "Perfil ONVIF", + "description": "Perfil multimèdia ONVIF específic a utilitzar per al control PTZ, que coincideix amb el token o el nom. Si no s'estableix, el primer perfil amb configuració PTZ vàlida se selecciona automàticament." } }, "type": { @@ -865,7 +869,7 @@ "description": "Les zones permeten definir una àrea específica del marc perquè pugueu determinar si un objecte es troba dins d'una àrea determinada.", "friendly_name": { "label": "Nom de la zona", - "description": "Un nom fàcil d'utilitzar per a la zona, que es mostra a la interfície d'usuari de la fragata. Si no s'estableix, s'utilitzarà una versió amb format del nom de la zona." + "description": "Un nom fàcil d'utilitzar per a la zona, que es mostra a la interfície d'usuari de Friagte. Si no s'estableix, s'utilitzarà una versió amb format del nom de la zona." }, "enabled": { "label": "Habilitat", @@ -937,5 +941,9 @@ "enabled_in_config": { "label": "Estat original de la càmera", "description": "Feu un seguiment de l'estat original de la càmera." + }, + "profiles": { + "label": "Perfils", + "description": "Perfils de configuració amb nom amb anul·lacions parcials que es poden activar en temps d'execució." } } diff --git a/web/public/locales/ca/config/global.json b/web/public/locales/ca/config/global.json index 6b0210364..d81735a61 100644 --- a/web/public/locales/ca/config/global.json +++ b/web/public/locales/ca/config/global.json @@ -4,7 +4,7 @@ "description": "Activa l'etiquetatge HEVC per a una millor compatibilitat amb el reproductor d'Apple en gravar H.265.", "label": "Compatibilitat d'Apple" }, - "description": "Paràmetres del FFmpeg que inclouen camins binaris, args, opcions de hwaccel i args de sortida per rol.", + "description": "Paràmetres del FFmpeg que inclouen la ruta dels binaris, args, opcions de hwaccel i args de sortida per rol.", "path": { "label": "Ruta FFmpeg", "description": "Ruta al binari FFmpeg a usar o un àlies de versió («5.0» o «7.0»)." @@ -43,7 +43,7 @@ }, "inputs": { "label": "Entrada de la càmera", - "description": "Llista de definicions de flux d'entrada (camins i rols) per a aquesta càmera.", + "description": "Llista de definicions de flux d'entrada (rutes i rols) per a aquesta càmera.", "path": { "label": "Ruta d'entrada", "description": "URL o camí del flux d'entrada de la càmera." @@ -73,7 +73,7 @@ "label": "Alçada del directe" }, "label": "Reproducció en directe", - "description": "Configuració utilitzada per la interfície d'usuari web per controlar la resolució i la qualitat del flux en viu.", + "description": "Configuració per a controlar la resolució i la qualitat del flux en viu del jsmpeg. Això no afecta les càmeres restreamed que utilitzen go2rtc per a la vista en directe.", "streams": { "label": "Noms de flux en viu", "description": "Assignació de noms de flux configurats per a restream/go2rtc noms utilitzats per a la reproducció en viu." @@ -159,7 +159,7 @@ }, "skip_motion_threshold": { "label": "Omet el llindar de moviment", - "description": "Si més d'aquesta fracció de la imatge canvia en un sol fotograma, el detector no retornarà cap caixa de moviment i recalibrarà immediatament. Això pot estalviar CPU i reduir falsos positius durant el llamp, tempestes, etc., però pot perdre esdeveniments reals com una càmera PTZ que fa un seguiment automàtic d'un objecte. La compensació es troba entre deixar caure uns quants megabytes d'enregistraments versus revisar un parell de clips curts. Interval de 0,0 a 1,0." + "description": "Si s'estableix a un valor entre 0.0 i 1.0, i més d'aquesta fracció de la imatge canvia en un sol fotograma, el detector no retornarà cap caixa de moviment i recalibrarà immediatament. Això pot estalviar CPU i reduir falsos positius durant el llamp, tempestes, etc., però pot perdre esdeveniments reals com una càmera PTZ que fa un seguiment automàtic d'un objecte. La compensació es troba entre deixar caure uns quants megabytes d'enregistraments versus revisar un parell de clips curts. Deixa sense establir (Cap) per desactivar aquesta característica." } }, "objects": { @@ -363,7 +363,7 @@ "label": "Zones requerides" }, "label": "Configuració de les deteccions", - "description": "Paràmetres per a crear esdeveniments de detecció (no-alerta) i quant de temps conservar-los.", + "description": "Paràmetres per als quals els objectes rastrejats generen deteccions (sense-alerta) i com es mantenen les deteccions.", "enabled": { "label": "Habilita les deteccions", "description": "Habilita o inhabilita els esdeveniments de detecció per a totes les càmeres; es poden sobreescriure per càmera." @@ -488,8 +488,8 @@ "description": "Activa un reíndex complet d'objectes rastrejats històrics a la base de dades d'incrustacions." }, "model": { - "label": "Model de cerca semàntic", - "description": "El model d'incrustació a utilitzar per a la cerca semàntica (per exemple 'jinav1')." + "label": "Model de cerca semàntica o nom del proveïdor GenAI", + "description": "El model d'incrustació a utilitzar per a la cerca semàntica (per exemple 'jinav1'), o el nom d'un proveïdor de GenAI amb el rol d'incrustació." }, "model_size": { "label": "Mida del model", @@ -502,9 +502,9 @@ }, "snapshots": { "label": "Instantànies", - "description": "Arranjament per a les instantànies JPEG desades dels objectes rastrejats per a totes les càmeres; es pot sobreescriure per càmera.", + "description": "Arranjament per a les instantànies de l'API dels objectes rastrejats per a totes les càmeres; es pot sobreescriure per càmera.", "enabled": { - "label": "Instantànies habilitades", + "label": "Habilita les instantànies", "description": "Habilita o inhabilita les instantànies de desament per a totes les càmeres; es pot sobreescriure per càmera." }, "clean_copy": { @@ -513,15 +513,15 @@ }, "timestamp": { "label": "Superposició de marca horària", - "description": "Superposa una marca horària a les instantànies desades." + "description": "Superposa una marca horària a les instantànies de l'API." }, "bounding_box": { "label": "Superposició de la caixa contenidora", - "description": "Dibuixa caixes contenidores per als objectes seguits en les instantànies desades." + "description": "Dibuixa caixes contenidores per als objectes seguits en les instantànies de l'API." }, "crop": { "label": "Retalla la instantània", - "description": "Retalla les instantànies desades a la caixa contenidora de l'objecte detectat." + "description": "Retalla les instantànies de l'API a la caixa contenidora de l'objecte detectat." }, "required_zones": { "label": "Zones requerides", @@ -529,11 +529,11 @@ }, "height": { "label": "Alçada de la instantània", - "description": "Alçada (píxels) per a canviar la mida de les instantànies desades; deixeu-ho buit per a preservar la mida original." + "description": "Alçada (píxels) per a canviar la mida de les instantànies de l'API; deixeu-ho buit per a preservar la mida original." }, "retain": { "label": "Retenció de la instantània", - "description": "Paràmetres de retenció per a les instantànies desades, inclosos els dies predeterminats i les anul·lacions per objecte.", + "description": "Paràmetres de retenció per a les instantànies, inclosos els dies predeterminats i les anul·lacions per objecte.", "default": { "label": "Retenció predeterminada", "description": "Nombre predeterminat de dies per a retenir les instantànies." @@ -548,8 +548,8 @@ } }, "quality": { - "label": "Qualitat JPEG", - "description": "Qualitat del codi JPEG per a les instantànies desades (0-100)." + "label": "Qualitat captura", + "description": "Qualitat per a les instantànies desades (0-100)." } }, "timestamp_style": { @@ -658,6 +658,10 @@ "ignore_time_mismatch": { "label": "Ignora el desajust de temps", "description": "Ignora les diferències de sincronització de temps entre càmera i servidor Frigate per a la comunicació ONVIF." + }, + "profile": { + "label": "Perfil ONVIF", + "description": "Perfil multimèdia ONVIF específic a utilitzar per al control PTZ, que coincideix amb el token o el nom. Si no s'estableix, el primer perfil amb configuració PTZ vàlida se selecciona automàticament." } }, "audio_transcription": { @@ -992,7 +996,7 @@ "label": "Detector de hardware", "description": "Configuració per a detectors d'objectes (CPU, GPU, dorsals ONNX) i qualsevol configuració de model específica per a detectors.", "type": { - "label": "Tipus de detector", + "label": "Tipus", "description": "Tipus de detector a utilitzar per a la detecció d'objectes (per exemple 'cpu', 'edgetpu', 'openvino')." }, "cpu": { @@ -1777,13 +1781,116 @@ "description": "Període de permanència del socket en mil·lisegons." }, "label": "ZMQ IPC" + }, + "axengine": { + "label": "AXEngine NPU", + "description": "Detector AXERA AX650N/AX8850N NPU executant fitxers .axmodel compilats a través del temps d'execució d'AXEngine.", + "type": { + "label": "Tipus" + }, + "model": { + "label": "Configuració del model específic del detector", + "description": "Opcions de configuració del model específic del detector (camí, mida d'entrada, etc.).", + "path": { + "label": "Camí personalitzat del model de detecció d'objectes", + "description": "Camí a un fitxer de model de detecció personalitzat (o plus:// per a models Frigate+)." + }, + "labelmap_path": { + "label": "Mapa d'etiquetes per al detector d'objectes personalitzat", + "description": "Camí a un fitxer de mapa d'etiquetes que assigna classes numèriques a etiquetes de cadena per al detector." + }, + "width": { + "label": "Amplada d'entrada del model de detecció d'objectes", + "description": "Amplada del tensor d'entrada del model en píxels." + }, + "height": { + "label": "Alçada d'entrada del model de detecció d'objectes", + "description": "Alçada del tensor d'entrada del model en píxels." + }, + "labelmap": { + "label": "Personalització del mapa d'etiquetes", + "description": "Sobreescriu o reassigna les entrades per a fusionar-se en el mapa d'etiquetes estàndard." + }, + "attributes_map": { + "label": "Mapa d'etiquetes d'objectes a les seves etiquetes d'atribut", + "description": "Assignació des d'etiquetes d'objectes a etiquetes d'atribut utilitzades per adjuntar metadades (per exemple 'cotxe' -). ['matrícula'])." + }, + "input_tensor": { + "label": "Forma del sensor d'entrada del model", + "description": "Format del sensor esperat pel model: 'nhwc' o 'nchw'." + }, + "input_pixel_format": { + "label": "Format de color del píxel d'entrada del model", + "description": "Espai de color del píxel esperat pel model: 'rgb', 'bgr' o 'yuv'." + }, + "input_dtype": { + "label": "Tipus D d'entrada del model", + "description": "Tipus de dades del tensor d'entrada del model (per exemple 'float32')." + }, + "model_type": { + "label": "Tipus de model de detecció d'objectes", + "description": "Tipus d'arquitectura del model de detector (ssd, yolox, yolonas) utilitzat per alguns detectors per a l'optimització." + } + }, + "model_path": { + "label": "Camí del model específic del detector", + "description": "Camí de fitxer al binari del model de detector si el detector escollit ho requereix." + } + }, + "model": { + "label": "Configuració de model de detector específic", + "description": "Opcions de configuració de model de detector específic (ruta, tamany entrada, etc.).", + "path": { + "label": "Ruta del model de detector d'objectes personalitzat", + "description": "Ruta a l'arxiu del model de detecció personalitzat ( o plus:// per a models Frigate+)." + }, + "labelmap_path": { + "label": "Etiqueta per a detector d'objectes personalitzat", + "description": "Ruta a l'arxiu d'etiqueta que mapeja les classes numériques a etiquetes per al detector." + }, + "width": { + "label": "Amplada d'entrada del model de detecció d'objecte", + "description": "Amplada de l'entrada del model en píxels." + }, + "height": { + "label": "Entrada de l'altura del model de detecció d'objecte", + "description": "Altura de l'entrada del model en píxels." + }, + "labelmap": { + "label": "Personlització d'etiquetes", + "description": "Sobreescriu o remapeja entrades per fusionar a l'estandar d'etiquetes." + }, + "attributes_map": { + "label": "Mapeja d'etiquetes d'objecte a la seva etiqueta", + "description": "Mapeja des de les etiquetes d'objectes als seus atributs usats per anexar metadades (per exemple 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model d'entrada de forma de tensor", + "description": "El format del tensor experat per el model: 'nhwc' o 'nchw'." + }, + "input_pixel_format": { + "label": "Entrada del format de píxel del model", + "description": "Espai-color del píxel experat per el model: 'rgb', 'bgr', o 'yuv'." + }, + "input_dtype": { + "label": "Tipus D entrada del model", + "description": "tipus de dada per al model de tensor (per exemple 'float32')." + }, + "model_type": { + "label": "Tipus de Model de detecció d'objecte", + "description": "Tipus d'arquitectura del model de detector (ssd, yolox, yolonas) usat per l'optimització d'alguns detectors." + } + }, + "model_path": { + "label": "Ruta a model de detector específic", + "description": "Ruta a l'arxiu al model binari de detector si es requerit per al detector seleccionat." } }, "model": { "label": "Model de detecció", "description": "Configuració per a configurar un model de detecció d'objectes personalitzat i la seva forma d'entrada.", "path": { - "label": "Ruta personalitzat del model de detecció d'objectes", + "label": "Ruta del model de detector d'objectes personalitzat", "description": "Ruta a un fitxer de model de detecció personalitzat (o plus:// per a models Frigate+)." }, "labelmap_path": { @@ -1824,7 +1931,7 @@ } }, "genai": { - "label": "Configuració de la IA generadora (proveïdors amb nom).", + "label": "Configuració de la IA generada", "description": "Paràmetres per als proveïdors integrats generatius d'IA utilitzats per generar descripcions d'objectes i resums de revisions.", "api_key": { "label": "Clau API", @@ -1943,8 +2050,8 @@ "label": "Detecció d'objectes", "description": "Configuració del rol de detecció utilitzat per executar la detecció d'objectes i inicialitzar els rastrejadors.", "enabled": { - "label": "Detecció activada", - "description": "Activa o desactiva la detecció d'objectes per a totes les càmeres; es pot sobreescriure per càmera. La detecció s'ha d'activar perquè s'executi el seguiment d'objectes." + "label": "Habilita la detecció d'objectes", + "description": "Activa o desactiva la detecció d'objectes per a totes les càmeres; es pot sobreescriure per càmera." }, "height": { "label": "Detecta l'alçada", @@ -2188,5 +2295,17 @@ "label": "Mostra a la interfície", "description": "Estableix si aquesta càmera és visible a tot arreu a la interfície d'usuari de Frigate. Desactivar això requerirà editar manualment la configuració per tornar a veure aquesta càmera a la interfície d'usuari." } + }, + "profiles": { + "label": "Perfils", + "description": "Definicions de perfil amb nom amigable. Els perfils de la càmera han de fer referència als noms definits aquí.", + "friendly_name": { + "label": "Nom amistós", + "description": "Mostra el nom d'aquest perfil que es mostra a la interfície d'usuari." + } + }, + "active_profile": { + "label": "Perfil actiu", + "description": "Nom de perfil actualment actiu. Només en temps d'execució, no ha persistit en YAML." } } diff --git a/web/public/locales/ca/objects.json b/web/public/locales/ca/objects.json index 06cac2028..456f522ab 100644 --- a/web/public/locales/ca/objects.json +++ b/web/public/locales/ca/objects.json @@ -116,5 +116,10 @@ "nzpost": "NZPost", "postnord": "PostNord", "dpd": "DPD", - "gls": "GLS" + "gls": "GLS", + "canada_post": "Canada Post", + "royal_mail": "Royal Mail", + "school_bus": "Bus escolar", + "skunk": "Mofeta", + "kangaroo": "Cangur" } diff --git a/web/public/locales/ca/views/classificationModel.json b/web/public/locales/ca/views/classificationModel.json index 8c1a24791..e683939e3 100644 --- a/web/public/locales/ca/views/classificationModel.json +++ b/web/public/locales/ca/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Classe suprimida", - "deletedImage": "Imatges suprimides", + "deletedCategory_one": "S'ha suprimit la classe {{count}}", + "deletedCategory_many": "S'han suprimit {{count}} classes", + "deletedCategory_other": "S'han suprimit {{count}} classes", + "deletedImage_one": "Imatge eliminada {{count}}", + "deletedImage_many": "S'han suprimit {{count}} imatges", + "deletedImage_other": "S'han suprimit {{count}} imatges", "categorizedImage": "Imatge classificada amb èxit", "trainedModel": "Model entrenat amb èxit.", "trainingModel": "S'ha iniciat amb èxit la formació de models.", @@ -21,7 +25,8 @@ "deletedModel_many": "S'han suprimit correctament els {{count}} models", "deletedModel_other": "S'han suprimit correctament els {{count}} models", "updatedModel": "S'ha actualitzat correctament la configuració del model", - "renamedCategory": "S'ha canviat el nom de la classe a {{name}}" + "renamedCategory": "S'ha canviat el nom de la classe a {{name}}", + "reclassifiedImage": "Imatge reclassificada amb èxit" }, "error": { "deleteImageFailed": "No s'ha pogut suprimir: {{errorMessage}}", @@ -31,7 +36,8 @@ "deleteModelFailed": "No s'ha pogut suprimir el model: {{errorMessage}}", "updateModelFailed": "No s'ha pogut actualitzar el model: {{errorMessage}}", "renameCategoryFailed": "No s'ha pogut canviar el nom de la classe: {{errorMessage}}", - "trainingFailedToStart": "Errar en arrencar l'entrenament del model: {{errorMessage}}" + "trainingFailedToStart": "Errar en arrencar l'entrenament del model: {{errorMessage}}", + "reclassifyFailed": "No s'ha pogut reclassificar la imatge: {{errorMessage}}" } }, "deleteCategory": { @@ -156,8 +162,13 @@ "allImagesRequired_other": "Classifiqueu totes les imatges. Queden {{count}} imatges.", "modelCreated": "El model s'ha creat correctament. Utilitzeu la vista Classificacions recents per a afegir imatges per als estats que falten i, a continuació, entrenar el model.", "missingStatesWarning": { - "title": "Falten exemples d'estat", - "description": "Es recomana seleccionar exemples per a tots els estats per obtenir els millors resultats. Podeu continuar sense seleccionar tots els estats, però el model no serà entrenat fins que tots els estats tinguin imatges. Després de continuar, utilitzeu la vista Classificacions recents per classificar imatges per als estats que falten, i després entrenar el model." + "title": "Falten exemples de classe", + "description": "No totes les classes tenen exemples. Proveu de generar nous exemples per a trobar la classe que falta, o continueu i utilitzeu la vista Classificacions recents per a afegir imatges més tard." + }, + "refreshExamples": "Genera nous exemples", + "refreshConfirm": { + "title": "Voleu generar exemples nous?", + "description": "Això generarà un nou conjunt d'imatges i netejarà totes les seleccions, incloses les classes anteriors. Haureu de tornar a seleccionar exemples per a totes les classes." } } }, @@ -189,5 +200,7 @@ "modelNotReady": "El model no está preparat per entrenar", "noChanges": "No hi ha canvis al conjunt de dades des de l'última formació." }, - "none": "Cap" + "none": "Cap", + "reclassifyImageAs": "Reclassifica la imatge com a:", + "reclassifyImage": "Reclassifica la imatge" } diff --git a/web/public/locales/ca/views/events.json b/web/public/locales/ca/views/events.json index 73a9d2b17..afacccbf9 100644 --- a/web/public/locales/ca/views/events.json +++ b/web/public/locales/ca/views/events.json @@ -16,7 +16,9 @@ "description": "Només es poden revisar temes quan s'han activat les gravacions de la càmera." } }, - "timeline": "Línia de temps", + "timeline": { + "label": "Línia de temps" + }, "timeline.aria": "Seleccionar línia de temps", "events": { "label": "Esdeveniments", diff --git a/web/public/locales/ca/views/explore.json b/web/public/locales/ca/views/explore.json index ea7258976..a923baa95 100644 --- a/web/public/locales/ca/views/explore.json +++ b/web/public/locales/ca/views/explore.json @@ -172,7 +172,8 @@ "attributes": "Atributs de classificació", "title": { "label": "Títol" - } + }, + "scoreInfo": "Informació de la partitura" }, "searchResult": { "tooltip": "S'ha identificat {{type}} amb una confiança del {{confidence}}%", @@ -238,6 +239,9 @@ "debugReplay": { "label": "Depura la repetició", "aria": "Mostra aquest objecte rastrejat a la vista de reproducció de depuració" + }, + "more": { + "aria": "Més" } }, "noTrackedObjects": "No s'han trobat objectes rastrejats", @@ -245,6 +249,9 @@ "confirmDelete": { "title": "Confirmar la supressió", "desc": "Eliminant aquest objecte seguit borrarà l'snapshot, qualsevol embedding gravat, i qualsevol detall de seguiment. Les imatges gravades d'aquest objecte seguit en l'historial NO seràn eliminades.

Estas segur que vols continuar?" + }, + "toast": { + "error": "S'ha produït un error en suprimir aquest objecte rastrejat: {{errorMessage}}" } }, "fetchingTrackedObjectsFailed": "Error al obtenir objectes rastrejats: {{errorMessage}}", diff --git a/web/public/locales/ca/views/exports.json b/web/public/locales/ca/views/exports.json index 1778cffc4..ccb5366b5 100644 --- a/web/public/locales/ca/views/exports.json +++ b/web/public/locales/ca/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "Exportar - Frigate", "search": "Buscar", "noExports": "No s'han trobat exportacions", - "deleteExport": "Suprimeix l'exportació", + "deleteExport": { + "label": "Suprimeix l'exportació" + }, "deleteExport.desc": "Estàs segur que vols eliminar {{exportName}}?", "editExport": { "title": "Renombrar exportació", diff --git a/web/public/locales/ca/views/faceLibrary.json b/web/public/locales/ca/views/faceLibrary.json index 069049255..1cc77f1a6 100644 --- a/web/public/locales/ca/views/faceLibrary.json +++ b/web/public/locales/ca/views/faceLibrary.json @@ -78,7 +78,8 @@ "deletedFace_one": "{{count}} rostre suprimit amb èxit.", "deletedFace_many": "{{count}} rostres suprimits amb èxit.", "deletedFace_other": "{{count}} rostres suprimits amb èxit.", - "renamedFace": "Rostre renombrat amb èxit a {{name}}" + "renamedFace": "Rostre renombrat amb èxit a {{name}}", + "reclassifiedFace": "Cara reclassificada amb èxit." }, "error": { "uploadingImageFailed": "No s'ha pogut penjar la imatge: {{errorMessage}}", @@ -87,7 +88,8 @@ "deleteNameFailed": "No s'ha pogut suprimir el nom: {{errorMessage}}", "updateFaceScoreFailed": "No s'ha pogut actualitzar la puntuació de rostre: {{errorMessage}}", "addFaceLibraryFailed": "No s'ha pogut establir el nom del rostre: {{errorMessage}}", - "renameFaceFailed": "No s'ha pogut renombrar el rostre: {{errorMessage}}" + "renameFaceFailed": "No s'ha pogut renombrar el rostre: {{errorMessage}}", + "reclassifyFailed": "No s'ha pogut reclassificar la cara: {{errorMessage}}" } }, "nofaces": "No hi han rostres disponibles", @@ -100,5 +102,7 @@ "pixels": "{{area}}px", "trainFace": "Entrenar rostre", "readTheDocs": "Llegir la documentació", - "trainFaceAs": "Entrenar rostre com a:" + "trainFaceAs": "Entrenar rostre com a:", + "reclassifyFaceAs": "Reclassifica la cara com a:", + "reclassifyFace": "Reclassifica la cara" } diff --git a/web/public/locales/ca/views/live.json b/web/public/locales/ca/views/live.json index 94a811d7a..b40f02e35 100644 --- a/web/public/locales/ca/views/live.json +++ b/web/public/locales/ca/views/live.json @@ -12,7 +12,8 @@ "clickMove": { "label": "Fes clic a la imatge per centrar la càmera", "enable": "Habilita clic per moure", - "disable": "Deshabilita clic per moure" + "disable": "Deshabilita clic per moure", + "enableWithZoom": "Activa el clic per moure / arrossegar per ampliar" }, "left": { "label": "Moure la càmera PTZ a l'esquerra" @@ -42,7 +43,9 @@ } } }, - "documentTitle": "Directe - Frigate", + "documentTitle": { + "default": "Live - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Directe - Frigate", "lowBandwidthMode": "Mode de baix ample de banda", "twoWayTalk": { diff --git a/web/public/locales/ca/views/settings.json b/web/public/locales/ca/views/settings.json index f7aad51ba..187132bf8 100644 --- a/web/public/locales/ca/views/settings.json +++ b/web/public/locales/ca/views/settings.json @@ -14,7 +14,8 @@ "cameraReview": "Configuració Revisió de Càmeres - Frigate", "globalConfig": "Configuració global - Frigate", "cameraConfig": "Configuració de la càmera - Frigate", - "maintenance": "Manteniment - Frigate" + "maintenance": "Manteniment - Frigate", + "profiles": "Perfils - Frigate" }, "menu": { "ui": "Interfície d'usuari", @@ -86,7 +87,10 @@ "cameraMqtt": "Càmera MQTT", "maintenance": "Manteniment", "mediaSync": "Sincronització multimèdia", - "regionGrid": "Quadrícula de la regió" + "regionGrid": "Quadrícula de la regió", + "uiSettings": "Paràmetres de la IU", + "profiles": "Perfils", + "systemGo2rtcStreams": "go2rtc streams" }, "dialog": { "unsavedChanges": { @@ -99,7 +103,7 @@ "noCamera": "Cap càmera" }, "general": { - "title": "Configuració del perfil", + "title": "Paràmetres de la IU", "liveDashboard": { "title": "Panell en directe", "automaticLiveView": { @@ -179,6 +183,10 @@ "zone": "zona", "motion_mask": "màscara de moviment", "object_mask": "màscara d'objecte" + }, + "revertOverride": { + "title": "Reverteix a la configuració base", + "desc": "Això eliminarà la substitució de perfil per {{type}} {{name}} i tornarà a la configuració base." } }, "zoneName": { @@ -211,6 +219,17 @@ "error": { "mustBeGreaterOrEqualZero": "El temps de merodeig ha de ser mes gran o igual a 0." } + }, + "id": { + "error": { + "mustNotBeEmpty": "L'ID no pot estar buit.", + "alreadyExists": "Ja existeix una màscara amb aquest ID per a aquesta càmera." + } + }, + "name": { + "error": { + "mustNotBeEmpty": "El nom no pot estar buit." + } } }, "zones": { @@ -357,7 +376,10 @@ "title": "Habilitat", "description": "Si aquesta màscara està activada al fitxer de configuració. Si està desactivat, no pot ser habilitat per MQTT. Les màscares desactivades s'ignoren en temps d'execució." } - } + }, + "profileBase": "(base)", + "profileOverride": "(sobreescriu)", + "addDisabledProfile": "Afegiu primer a la configuració base i després sobreescriviu-ho al perfil" }, "notification": { "email": { @@ -693,8 +715,8 @@ }, "title": "Configuració d'instantànies", "documentation": "Llegir la documentació", - "desc": "Per a enviar a Frigate+ fa falta que tan la instantània com la instantània clean_copy estiguin habilitades a la configuració.", - "cleanCopyWarning": "Algunes càmeres tenen les captures d'imatge activades però la còpia neta desactivada. Cal habilitar clean_copy a la configuració de captures per poder enviar imatges d’aquestes càmeres a Frigate+." + "desc": "Per a enviar a Frigate+ fa falta que la instantània estigui habilitada a la configuració.", + "cleanCopyWarning": "Algunes càmeres tenen la captura desactivada" }, "modelInfo": { "baseModel": "Model base", @@ -1317,6 +1339,14 @@ "confirmButton": "Suprimeix permanentment", "success": "La càmera {{cameraName}} s'ha suprimit correctament", "error": "No s'ha pogut suprimir la càmera {{cameraName}}" + }, + "profiles": { + "title": "Sobreescriu la càmera de perfil", + "selectLabel": "Seleccioneu el perfil", + "description": "Configura quines càmeres estan habilitades o desactivades quan s'activa un perfil. Les càmeres establertes a «Inherit» mantenen el seu estat base habilitat.", + "inherit": "Hereta", + "enabled": "Habilitat", + "disabled": "Desactivat" } }, "cameraReview": { @@ -1371,6 +1401,9 @@ "value": { "label": "Valor nou", "reset": "Restableix" + }, + "profile": { + "label": "Perfil" } }, "detectionModel": { @@ -1427,7 +1460,9 @@ "review_thumbnails": "Revisa les miniatures", "previews": "Previsualitzacions", "exports": "Exporta", - "recordings": "Enregistraments" + "recordings": "Enregistraments", + "verbose": "Verbose", + "verboseDesc": "Escriu una llista completa de fitxers orfes al disc per revisar-los." }, "regionGrid": { "title": "Quadrícula de la regió", @@ -1447,7 +1482,8 @@ }, "camera": { "title": "Configuració de la càmera", - "description": "Aquests paràmetres només s'apliquen a aquesta càmera i substitueixen els paràmetres globals." + "description": "Aquests paràmetres només s'apliquen a aquesta càmera i substitueixen els paràmetres globals.", + "noCameras": "No hi ha càmeres disponibles" }, "advancedSettingsCount": "Configuració avançada ({{count}})", "advancedCount": "Avançat ({{count}})", @@ -1478,7 +1514,35 @@ "manual": "Arguments manuals", "inherit": "Hereta de la configuració de la càmera", "selectPreset": "Selecció de valors predefinits", - "manualPlaceholder": "ntroduïu els arguments FFmpeg" + "manualPlaceholder": "ntroduïu els arguments FFmpeg", + "none": "Cap", + "useGlobalSetting": "Hereta de l'entorn global", + "presetLabels": { + "preset-rpi-64-h264": "Raspberry Pi (H.264)", + "preset-rpi-64-h265": "Raspberry Pi (H.265)", + "preset-vaapi": "VAAPI (Intel/AMD GPU)", + "preset-intel-qsv-h264": "Intel QuickSync (H.264)", + "preset-intel-qsv-h265": "Intel QuickSync (H.265)", + "preset-nvidia": "NVIDIA GPU", + "preset-jetson-h264": "NVIDIA Jetson (H.264)", + "preset-jetson-h265": "NVIDIA Jetson (H.265)", + "preset-rkmpp": "Rockchip RKMPP", + "preset-http-jpeg-generic": "JPEG HTTP (Genèric)", + "preset-http-mjpeg-generic": "HTTP MJPEG (Generic)", + "preset-http-reolink": "HTTP - Reolink càmeres", + "preset-rtmp-generic": "RTMP (Generic)", + "preset-rtsp-generic": "RTSP (Generic)", + "preset-rtsp-restream": "RTSP - Restream de go2rtc", + "preset-rtsp-restream-low-latency": "RTSP - Restream de go2rtc (Latència baixa)", + "preset-rtsp-udp": "RTSP - UDP", + "preset-rtsp-blue-iris": "RTSP - Blue Iris", + "preset-record-generic": "Enregistra (Genèric, sense àudio)", + "preset-record-generic-audio-copy": "Enregistra (Genèric + Copia l'àudio)", + "preset-record-generic-audio-aac": "Enregistra (Genèric + Àudio a AAC)", + "preset-record-mjpeg": "Registre - Càmeres MJPEG", + "preset-record-jpeg": "Registre - Càmeres JPEG", + "preset-record-ubiquiti": "Registre - Càmeres Ubiquiti" + } }, "cameraInputs": { "itemTitle": "Flux {{index}}" @@ -1510,7 +1574,8 @@ "genai": "GenAI", "face_recognition": "Reconeixement de cares", "lpr": "Reconeixement de matrícules", - "birdseye": "Birdseye" + "birdseye": "Birdseye", + "masksAndZones": "Màscares / Zones" }, "detect": { "title": "Configuració de detecció" @@ -1522,7 +1587,8 @@ "keyDuplicate": "El nom del detector ja existeix.", "noSchema": "No hi ha esquemes de detector disponibles.", "none": "No s'ha configurat cap instància de detector.", - "add": "Afegeix un detector" + "add": "Afegeix un detector", + "addCustomKey": "Afegeix una clau personalitzada" }, "record": { "title": "Configuració de l'enregistrament" @@ -1575,7 +1641,25 @@ "timestamp_style": { "title": "Configuració de la marca horària" }, - "searchPlaceholder": "Cerca..." + "searchPlaceholder": "Cerca...", + "genaiRoles": { + "options": { + "embeddings": "Incrustació", + "vision": "Visió", + "tools": "Eines" + } + }, + "semanticSearchModel": { + "placeholder": "Selecciona el model…", + "builtIn": "Models integrats", + "genaiProviders": "Proveïdors de GenAI" + }, + "reviewLabels": { + "summary": "{{count}} etiquetes seleccionades", + "empty": "No hi ha etiquetes disponibles", + "allNonAlertDetections": "Totes les activitats no alertes s'inclouran com a deteccions." + }, + "addCustomLabel": "Afegeix una etiqueta personalitzada..." }, "globalConfig": { "title": "Configuració global", @@ -1615,5 +1699,143 @@ "unsavedChanges": "Teniu canvis sense desar", "confirmReset": "Confirma el restabliment", "resetToDefaultDescription": "Això restablirà tots els paràmetres d'aquesta secció als seus valors predeterminats. Aquesta acció no es pot desfer.", - "resetToGlobalDescription": "Això restablirà la configuració d'aquesta secció als valors predeterminats globals. Aquesta acció no es pot desfer." + "resetToGlobalDescription": "Això restablirà la configuració d'aquesta secció als valors predeterminats globals. Aquesta acció no es pot desfer.", + "button": { + "overriddenGlobal": "Sobreescrit (Global)", + "overriddenGlobalTooltip": "Aquesta càmera anul·la la configuració global d'aquesta secció", + "overriddenBaseConfig": "Sobreescrit (Configuració base)", + "overriddenBaseConfigTooltip": "El perfil {{profile}} substitueix la configuració d'aquesta secció" + }, + "profiles": { + "title": "Perfils", + "activeProfile": "Perfil actiu", + "noActiveProfile": "No hi ha un perfil actiu", + "active": "Actiu", + "activated": "S'ha activat el perfil '{{profile}}'", + "activateFailed": "No s'ha pogut establir el perfil", + "deactivated": "Perfil desactivat", + "noProfiles": "No s'ha definit cap perfil.", + "noOverrides": "No hi ha excepcions", + "cameraCount_one": "{{count}} càmera", + "cameraCount_many": "{{count}} càmeres", + "cameraCount_other": "{{count}} càmeres", + "baseConfig": "Configuració base", + "addProfile": "Afegeix un perfil", + "newProfile": "Perfil nou", + "profileNamePlaceholder": "p. ex., Armat, lluny, mode nocturn", + "friendlyNameLabel": "Nom del perfil", + "profileIdLabel": "ID del perfil", + "profileIdDescription": "Identificador intern utilitzat en la configuració i les automatitzacions", + "nameInvalid": "Només es permeten lletres en minúscula, números i guions baixos", + "nameDuplicate": "Ja existeix un perfil amb aquest nom", + "error": { + "mustBeAtLeastTwoCharacters": "Ha de tenir com a mínim 2 caràcters", + "mustNotContainPeriod": "No ha de contenir períodes", + "alreadyExists": "Ja existeix un perfil amb aquest ID" + }, + "renameProfile": "Canvia el nom del perfil", + "renameSuccess": "Perfil reanomenat a '{{profile}}'", + "deleteProfile": "Suprimeix el perfil", + "deleteProfileConfirm": "Voleu suprimir el perfil \"{{profile}}\" de totes les càmeres? Això no es pot desfer.", + "deleteSuccess": "S'ha suprimit el perfil '{{profile}}'", + "createSuccess": "S'ha creat el perfil '{{profile}}'", + "removeOverride": "Elimina la sobreescriptura del perfil", + "deleteSection": "Suprimeix les excepcions de secció", + "deleteSectionConfirm": "Voleu eliminar les sobreescriptures de {{section}} del perfil {{profile}} a {{camera}}?", + "deleteSectionSuccess": "S'han suprimit {{section}} sobreescrits per {{profile}}", + "enableSwitch": "Habilita els perfils", + "enabledDescription": "Els perfils estan habilitats. Creeu un perfil nou a continuació, navegueu a una secció de configuració de la càmera per fer els vostres canvis i deseu perquè els canvis tinguin efecte.", + "disabledDescription": "Els perfils permeten definir conjunts de configuracions de càmera amb nom (p. ex., armats, fora, nit) que es poden activar sota demanda.", + "columnCamera": "Càmara", + "columnOverrides": "Sobreescriu el perfil" + }, + "go2rtcStreams": { + "title": "go2rtc Corrents", + "description": "Gestiona les configuracions de flux go2rtc per al restreaming de la càmera. Cada flux té un nom i un o més URL d'origen.", + "addStream": "Afegeix un flux", + "addStreamDesc": "Introduïu un nom per al flux nou. Aquest nom s'utilitzarà per a fer referència al flux en la configuració de la càmera.", + "addUrl": "Afegeix un URL", + "streamName": "Nom del flux", + "streamNamePlaceholder": "p. ex., porta d'entrada", + "streamUrlPlaceholder": "e.g., rtsp://usuari:contrasenya@192.168.1.100/flux", + "deleteStream": "Suprimeix el flux", + "deleteStreamConfirm": "Segur que voleu suprimir el flux \"{{streamName}}\"? Les càmeres que fan referència a aquest flux poden deixar de funcionar.", + "noStreams": "No s'ha configurat cap flux go2rtc. Afegeix un flux per començar.", + "validation": { + "nameRequired": "Es requereix el nom del flux", + "nameDuplicate": "Ja existeix un flux amb aquest nom", + "nameInvalid": "El nom del flux només pot contenir lletres, números, guions baixos i guions", + "urlRequired": "Es requereix com a mínim un URL" + }, + "renameStream": "Canvia el nom del flux", + "renameStreamDesc": "Introduïu un nom nou per a aquest flux. El canvi de nom d'un flux pot trencar les càmeres o altres fluxos que el fan referència pel seu nom.", + "newStreamName": "Nom de flux nou", + "ffmpeg": { + "useFfmpegModule": "Usa el mode de compatibilitat (ffmpeg)", + "video": "Vídeo", + "audio": "Àudio", + "hardware": "Acceleració del maquinari", + "videoCopy": "Copia", + "videoH264": "Transcodifica a H.264", + "videoH265": "Transcodifica a H.265", + "videoExclude": "Exclou", + "audioCopy": "Copia", + "audioAac": "Transcodifica a l'AAC", + "audioOpus": "Transcodifica a Opus", + "audioPcmu": "Transcodifica a PCM μ-law", + "audioPcma": "Transcodifica a PCM A-law", + "audioPcm": "Transcodifica a PCM", + "audioMp3": "Transcodifica a MP3", + "audioExclude": "Exclou", + "hardwareNone": "Sense acceleració de hardware", + "hardwareAuto": "Acceleració de hardware automàtica" + } + }, + "timestampPosition": { + "tl": "A dalt a l'esquerra", + "tr": "A dalt a la dreta", + "bl": "Baix a l'esquerra", + "br": "A baix a la dreta" + }, + "onvif": { + "profileAuto": "Automàtic", + "profileLoading": "S'estan carregant perfils..." + }, + "configMessages": { + "review": { + "recordDisabled": "L'enregistrament està desactivat, els elements de revisió no es generaran.", + "detectDisabled": "La detecció d'objectes està desactivada. Els elements de revisió requereixen objectes detectats per categoritzar alertes i deteccions.", + "allNonAlertDetections": "Totes les activitats no alertes s'inclouran com a deteccions." + }, + "audio": { + "noAudioRole": "Cap flux té definit el rol d'àudio. Heu d'habilitar el rol d'àudio per a la detecció d'àudio perquè funcioni." + }, + "audioTranscription": { + "audioDetectionDisabled": "La detecció d'àudio no està activada per a aquesta càmera. La transcripció d'àudio requereix que la detecció d'àudio estigui activa." + }, + "detect": { + "fpsGreaterThanFive": "No es recomana establir el detect FPS superior a 5." + }, + "faceRecognition": { + "globalDisabled": "El reconeixement de cares no està habilitat a nivell global. Habilita-ho en la configuració global per al reconeixement facial a nivell de càmera per funcionar.", + "personNotTracked": "El reconeixement de cares requereix que l'objecte 'persona' sigui rastrejat. Assegureu-vos que «persona» estigui a la llista de seguiment d'objectes." + }, + "lpr": { + "globalDisabled": "El reconeixement de la matrícula no està habilitat a nivell global. Habilita-ho en la configuració global per al funcionament de LPR a nivell de càmera.", + "vehicleNotTracked": "El reconeixement de la matrícula requereix que es faci un seguiment del 'cotxe' o de la 'motocicleta'." + }, + "record": { + "noRecordRole": "Cap flux té el rol de registre definit. L'enregistrament no funcionarà." + }, + "birdseye": { + "objectsModeDetectDisabled": "Birdseye està configurat en mode 'objectes', però la detecció d'objectes està desactivada per a aquesta càmera. La càmera no apareixerà a Birdseye." + }, + "snapshots": { + "detectDisabled": "La detecció d'objectes està desactivada. Les instantànies es generen a partir d'objectes rastrejats i no es crearan." + }, + "detectors": { + "mixedTypes": "Tots els detectors han d'utilitzar el mateix tipus. Elimina els detectors existents per utilitzar un tipus diferent.", + "mixedTypesSuggestion": "Tots els detectors han d'utilitzar el mateix tipus. Suprimiu detectors existents o seleccioneu {{type}}." + } + } } diff --git a/web/public/locales/ca/views/system.json b/web/public/locales/ca/views/system.json index 933eff0b3..22ecd1fa8 100644 --- a/web/public/locales/ca/views/system.json +++ b/web/public/locales/ca/views/system.json @@ -59,7 +59,9 @@ "count": "{{count}} missatges", "expanded": { "payload": "Payload" - } + }, + "count_one": "{{count}} missatge", + "count_other": "{{count}} missatges" } }, "general": { @@ -110,7 +112,8 @@ "description": "Aquest és un error conegut en les eines d'informació de les estadístiques de GPU d'Intel (intel.gpu.top) on es trencarà i retornarà repetidament un ús de GPU del 0% fins i tot en els casos en què l'acceleració del maquinari i la detecció d'objectes s'executen correctament a la (i)GPU. Això no és un error de Frigate. Podeu reiniciar l'amfitrió per a corregir temporalment el problema i confirmar que la GPU funciona correctament. Això no afecta el rendiment." }, "gpuTemperature": "Temperatura de la GPU", - "npuTemperature": "Temperatura NPU" + "npuTemperature": "Temperatura NPU", + "gpuCompute": "Càlcul / Codificació per GPU" }, "otherProcesses": { "title": "Altres processos", @@ -170,7 +173,8 @@ "cameraFramesPerSecond": "{{camName}} fotogrames per segon", "cameraDetectionsPerSecond": "{{camName}} deteccions per segon", "overallSkippedDetectionsPerSecond": "Nombre total de deteccions descartades per segon", - "cameraSkippedDetectionsPerSecond": "Nombre de deteccions descartades per segon a {{camName}}" + "cameraSkippedDetectionsPerSecond": "Nombre de deteccions descartades per segon a {{camName}}", + "cameraGpu": "{{camName}} GPU" }, "info": { "codec": "Còdec:", diff --git a/web/public/locales/cs/views/classificationModel.json b/web/public/locales/cs/views/classificationModel.json index 910f0cdaf..e770a1bb3 100644 --- a/web/public/locales/cs/views/classificationModel.json +++ b/web/public/locales/cs/views/classificationModel.json @@ -23,11 +23,15 @@ }, "toast": { "success": { - "deletedImage": "Smazat obrázky", + "deletedImage_one": "Smazat obrázky", + "deletedImage_few": "", + "deletedImage_other": "", "deletedModel_one": "Úspěšně odstraněný {{count}} model", "deletedModel_few": "Úspěšně odstraněné {{count}} modely", "deletedModel_other": "Úspěšně odstraněných {{count}} modelů", - "deletedCategory": "Smazat třídu", + "deletedCategory_one": "Smazat třídu", + "deletedCategory_few": "", + "deletedCategory_other": "", "categorizedImage": "Obrázek úspěšně klasifikován", "trainedModel": "Úspěšně vytrénovaný model.", "trainingModel": "Trénování modelu bylo úspěšně zahájeno.", diff --git a/web/public/locales/da/views/classificationModel.json b/web/public/locales/da/views/classificationModel.json index 3193dbb59..25d1704fc 100644 --- a/web/public/locales/da/views/classificationModel.json +++ b/web/public/locales/da/views/classificationModel.json @@ -26,8 +26,10 @@ }, "toast": { "success": { - "deletedCategory": "Slettet kategori", - "deletedImage": "Slettede billeder", + "deletedCategory_one": "Slettet kategori", + "deletedCategory_other": "", + "deletedImage_one": "Slettede billeder", + "deletedImage_other": "", "deletedModel_one": "{{count}} model er nu slettet", "deletedModel_other": "{{count}} modeller er nu slettet", "categorizedImage": "Billedet er nu kategoriseret", diff --git a/web/public/locales/de/common.json b/web/public/locales/de/common.json index be5132067..8924da381 100644 --- a/web/public/locales/de/common.json +++ b/web/public/locales/de/common.json @@ -134,7 +134,8 @@ "resetToDefault": "Auf Werkseinstellungen zurücksetzten", "saveAll": "Alle speichern", "savingAll": "Alle werden gespeichert…", - "undoAll": "Alle rückgängig" + "undoAll": "Alle rückgängig", + "retry": "Wiederholen" }, "label": { "back": "Zurück", @@ -248,7 +249,8 @@ "export": "Exportieren", "classification": "Klassifizierung", "actions": "Aktion", - "chat": "Chat" + "chat": "Chat", + "profiles": "Profile" }, "unit": { "speed": { @@ -275,7 +277,8 @@ "title": "Speichern der Konfigurationsänderungen gescheitert: {{errorMessage}}", "noMessage": "Speichern der Konfigurationsänderungen gescheitert" }, - "title": "Speichern" + "title": "Speichern", + "success": "Die Konfigurationsänderungen wurden erfolgreich gespeichert." } }, "role": { @@ -319,5 +322,7 @@ "two": "{{0}} und {{1}}", "many": "{{items}}, und {{last}}", "separatorWithSpace": ", " - } + }, + "no_items": "Keine Artikel", + "validation_errors": "Validierungsfehler" } diff --git a/web/public/locales/de/components/camera.json b/web/public/locales/de/components/camera.json index 32874bab6..e9f39cb8e 100644 --- a/web/public/locales/de/components/camera.json +++ b/web/public/locales/de/components/camera.json @@ -82,6 +82,7 @@ "mask": "Maske", "motion": "Bewegung", "regions": "Regionen", - "boundingBox": "Begrenzungsrechteck" + "boundingBox": "Begrenzungsrechteck", + "paths": "Pfad" } } diff --git a/web/public/locales/de/config/cameras.json b/web/public/locales/de/config/cameras.json index cbd5bca5c..9a0ab8b17 100644 --- a/web/public/locales/de/config/cameras.json +++ b/web/public/locales/de/config/cameras.json @@ -79,8 +79,8 @@ "label": "Objekterkennung", "description": "Einstellungen für die Erkennungs-/Detektionsrolle, die zum Ausführen der Objekterkennung und zum Initialisieren von Trackern verwendet wird.", "enabled": { - "label": "Erkennung aktiviert", - "description": "Aktivieren oder deaktivieren Sie die Objekterkennung für diese Kamera. Die Erkennung muss aktiviert sein, damit die Objektverfolgung ausgeführt werden kann." + "label": "Objekterkennung aktiviert", + "description": "Aktivieren oder deaktivieren Sie die Objekterkennung für diese Kamera." }, "height": { "label": "Höhe erkennen", @@ -140,6 +140,31 @@ "enabled": { "label": "Bild senden", "description": "Aktivieren Sie für diese Kamera die Veröffentlichung von Bild-Snapshots für Objekte in MQTT-Themen." + }, + "description": "Einstellungen für die Veröffentlichung von Bildern über MQTT.", + "timestamp": { + "label": "Zeitstempel hinzufügen", + "description": "Füge einen Zeitstempel auf Bilder ein, die über MQTT veröffentlicht werden." + }, + "bounding_box": { + "label": "Begrenzungsrahmen hinzufügen", + "description": "Zeichne Begrenzungsrahmen auf Bilder, die über MQTT veröffentlicht werden." + }, + "crop": { + "label": "Bild zuschneiden", + "description": "Bilder, die über MQTT veröffentlicht werden, werden auf die Begrenzungsrahmen der erkannten Objekte zugeschnitten." + }, + "height": { + "label": "Bildhöhe", + "description": "Höhe (in Pixeln) zur Größenanpassung von über MQTT veröffentlichten Bildern." + }, + "required_zones": { + "label": "Erforderliche Zonen", + "description": "Zonen, die ein Objekt betreten muss, damit ein MQTT-Bild veröffentlicht wird." + }, + "quality": { + "label": "JPEG-Qualität", + "description": "JPEG-Qualität für über MQTT veröffentlichte Bilder (0–100)." } }, "face_recognition": { @@ -157,7 +182,8 @@ "notifications": { "label": "Benachrichtigung", "enabled": { - "label": "Benachrichtigungen aktivieren" + "label": "Benachrichtigungen aktivieren", + "description": "Benachrichtigungen für diese Kamera aktivieren oder deaktivieren." }, "email": { "label": "Benachrichtigungs-E-Mail", @@ -170,7 +196,8 @@ "enabled_in_config": { "label": "Ursprüngliche Meldungen geben an", "description": "Gibt an, ob Benachrichtigungen in der ursprünglichen statischen Konfiguration aktiviert waren." - } + }, + "description": "Einstellungen zum Aktivieren und Verwalten von Benachrichtigungen für diese Kamera." }, "ffmpeg": { "label": "FFmpeg", @@ -293,7 +320,7 @@ }, "skip_motion_threshold": { "label": "Schwellenwert für Bewegungsüberspringen", - "description": "Wenn sich mehr als dieser Anteil des Bildes in einem einzelnen Frame ändert, gibt der Detektor keine Bewegungsfelder zurück und kalibriert sich sofort neu. Dies kann CPU-Leistung sparen und Fehlalarme bei Blitzschlag, Gewittern usw. reduzieren, aber auch echte Ereignisse übersehen, wie z. B. eine PTZ-Kamera, die ein Objekt automatisch verfolgt. Der Kompromiss besteht darin, entweder einige Megabyte an Aufzeichnungen zu verlieren oder ein paar kurze Clips zu überprüfen. Bereich 0,0 bis 1,0." + "description": "Wenn sich mehr als dieser Anteil des Bildes in einem einzelnen Frame ändert, gibt der Detektor keine Bewegungsfelder zurück und kalibriert sich sofort neu. Dies kann CPU-Leistung sparen und Fehlalarme bei Blitzschlag, Gewittern usw. reduzieren, aber auch echte Ereignisse übersehen, wie z. B. eine PTZ-Kamera, die ein Objekt automatisch verfolgt. Der Kompromiss besteht darin, entweder einige Megabyte an Aufzeichnungen zu verlieren oder ein paar kurze Clips zu überprüfen. Leer lassen um diese Funktion zu deaktivieren." }, "improve_contrast": { "label": "Kontrast verbessern", @@ -554,7 +581,7 @@ }, "detections": { "label": "Konfiguration der Erkennungen", - "description": "Einstellungen zum Erstellen von Erkennungsereignissen (ohne Alarm) und zur Festlegung ihrer Aufbewahrungsdauer.", + "description": "Einstellungen, die festlegen, bei welchen verfolgten Objekten Erkennungen (ohne Alarm) generiert werden und wie lange diese Erkennungen gespeichert bleiben.", "enabled": { "label": "Erkennung aktivieren", "description": "Erkennungsereignisse für diese Kamera aktivieren oder deaktivieren." @@ -620,11 +647,77 @@ "onvif": { "autotracking": { "required_zones": { - "label": "Erforderliche Zonen" + "label": "Erforderliche Zonen", + "description": "Objekte müssen in eine dieser Zonen eintreten, bevor die automatische Verfolgung beginnt." }, "movement_weights": { - "description": "Diese Kalibrierungswerte werden automatisch durch die Kamerakalibrierung generiert. Bitte nicht manuell ändern." + "description": "Diese Kalibrierungswerte werden automatisch durch die Kamerakalibrierung generiert. Bitte nicht manuell ändern.", + "label": "Bewegungsgewichte" + }, + "label": "Automatische Verfolgung", + "description": "Bewegliche Objekte automatisch verfolgen und sie mithilfe von PTZ-Kamerabewegungen im Bildausschnitt zentriert halten.", + "enabled": { + "label": "Automatische Verfolgung aktivieren", + "description": "Aktivieren oder deaktivieren Sie die automatische PTZ-Kamera-Verfolgung erkannter Objekte." + }, + "calibrate_on_startup": { + "label": "Beim Start kalibrieren", + "description": "Messen Sie die Drehzahlen der PTZ-Motoren beim Start, um die Nachführgenauigkeit zu verbessern. Frigate aktualisiert die Konfiguration nach der Kalibrierung mit den Bewegungsgewichten." + }, + "zooming": { + "label": "Zoom-Modus", + "description": "Zoomverhalten steuern: deaktiviert (nur Schwenken/Neigen), absolut (am besten kompatibel) oder relativ (gleichzeitiges Schwenken/Neigen/Zoomen)." + }, + "zoom_factor": { + "label": "Zoomfaktor", + "description": "Steuert den Zoomfaktor bei verfolgten Objekten. Bei niedrigeren Werten bleibt mehr von der Szene im Bild; bei höheren Werten wird näher herangezoomt, wobei jedoch die Verfolgung verloren gehen kann. Werte zwischen 0,1 und 0,75." + }, + "track": { + "label": "Verfolgte Objekte", + "description": "Liste der Objekttypen, die das automatische Tracking auslösen sollen." + }, + "return_preset": { + "label": "Voreinstellung setzen", + "description": "Der in der Kamera-Firmware konfigurierte ONVIF-Voreinstellungsname, zu dem nach Beendigung der Verfolgung zurückgekehrt werden soll." + }, + "timeout": { + "label": "Zeitüberschreitung bei der Rückgabe", + "description": "Warte nach dem Verlust der Verfolgung so viele Sekunden, bevor die Kamera in die voreingestellte Position zurückkehrt." + }, + "enabled_in_config": { + "label": "Ursprünglicher Autotrack-Status", + "description": "Internes Feld zur Erfassung, ob die automatische Nachführung in der Konfiguration aktiviert wurde." } + }, + "label": "ONVIF", + "description": "ONVIF-Verbindung und Einstellungen für die automatische PTZ-Verfolgung dieser Kamera.", + "host": { + "label": "ONVIF Host", + "description": "Host (und optional Schema) für den ONVIF-Dienst dieser Kamera." + }, + "port": { + "label": "ONVIF Port", + "description": "Portnummer für den ONVIF-Dienst." + }, + "user": { + "label": "ONVIF-Benutzername", + "description": "Benutzername für die ONVIF-Authentifizierung; bei einigen Geräten ist für ONVIF ein Admin-Benutzer erforderlich." + }, + "password": { + "label": "ONVIF-Passwort", + "description": "Passwort für die ONVIF-Authentifizierung." + }, + "tls_insecure": { + "label": "TLS-Überprüfung deaktivieren", + "description": "TLS-Überprüfung überspringen und Digest-Authentifizierung für ONVIF deaktivieren (unsicher; nur in sicheren Netzwerken verwenden)." + }, + "ignore_time_mismatch": { + "label": "Zeitabweichung ignorieren", + "description": "Ignoriere Zeitunterschiede zwischen Kamera und Frigate-Server bei der ONVIF-Kommunikation." + }, + "profile": { + "label": "ONVIF Profile", + "description": "Spezifisches ONVIF-Medienprofil für die PTZ-Steuerung, das anhand eines Tokens oder Namens ausgewählt wird. Ist kein Profil festgelegt, wird automatisch das erste Profil mit gültiger PTZ-Konfiguration ausgewählt." } }, "semantic_search": { @@ -660,13 +753,22 @@ } }, "ui": { - "label": "Kamera UI" + "label": "Kamera UI", + "description": "Legen Sie die Reihenfolge und Sichtbarkeit dieser Kamera in der Benutzeroberfläche fest. Die Reihenfolge wirkt sich auf das Standard-Dashboard aus. Für eine detailliertere Steuerung verwenden Sie Kameragruppen.", + "order": { + "label": "UI Reihenfolge", + "description": "Numerische Reihenfolge, nach der die Kamera in der Benutzeroberfläche sortiert wird (Standard-Dashboard und Listen); höhere Zahlen erscheinen später." + }, + "dashboard": { + "label": "In der Benutzeroberfläche anzeigen", + "description": "Schalte ein, ob diese Kamera überall in der Benutzeroberfläche von „Frigate“ sichtbar ist. Wenn du diese Option deaktivierst, musst du die Konfiguration manuell bearbeiten, um diese Kamera wieder in der Benutzeroberfläche anzuzeigen." + } }, "snapshots": { "label": "Schnappschüsse", - "description": "Einstellungen für gespeicherte JPEG-Schnappschüsse von verfolgten Objekten für diese Kamera.", + "description": "Einstellungen für API-generierte Momentaufnahmen der erfassten Objekte für diese Kamera.", "enabled": { - "label": "Schnappschüsse aktiviert", + "label": "Schnappschüsse aktivieren", "description": "Das Speichern von Momentaufnahmen für diese Kamera aktivieren oder deaktivieren." }, "clean_copy": { @@ -675,11 +777,173 @@ }, "timestamp": { "label": "Zeitstempel-Einblendung", - "description": "Füge den gespeicherten Momentaufnahmen einen Zeitstempel hinzu." + "description": "Füge einen Zeitstempel auf die von der API abgerufenen Momentaufnahmen ein." }, "bounding_box": { "label": "Einblendung der Begrenzungsrahmen", - "description": "Zeichne Begrenzungsrahmen für verfolgte Objekte auf gespeicherten Momentaufnahmen." + "description": "Zeichne Begrenzungsrahmen für verfolgte Objekte auf Momentaufnahmen aus der API." + }, + "crop": { + "label": "Ertragsübersicht", + "description": "Schnappschüsse aus der API auf die Begrenzungsrahmen der erkannten Objekte zuschneiden." + }, + "required_zones": { + "label": "Erforderliche Zonen", + "description": "Bereiche, die ein Objekt betreten muss, damit ein Schnappschuss gespeichert wird." + }, + "height": { + "label": "Höhe der Momentaufnahme", + "description": "Höhe (Pixel), auf die Schnappschüsse über die API skaliert werden sollen; leer lassen, um die Originalgröße beizubehalten." + }, + "retain": { + "label": "Aufbewahrungsdauer von Snapshots", + "description": "Aufbewahrungseinstellungen für Snapshots, einschließlich Standarddauer in Tagen und objektspezifischer Überschreibungen.", + "default": { + "label": "Standard-Aufbewahrungsfrist", + "description": "Standardmäßige Anzahl von Tagen, für die Snapshots aufbewahrt werden." + }, + "mode": { + "label": "Speichermodus", + "description": "Speichermodus: „all“ (alle Segmente speichern), „motion“ (Segmente mit Bewegung speichern) oder „active_objects“ (Segmente mit aktiven Objekten speichern)." + }, + "objects": { + "label": "Objektaufbewahrung", + "description": "Objektbezogene Überschreibungen für die Aufbewahrungsdauer von Snapshots." + } + }, + "quality": { + "label": "Qualität der Momentaufnahme", + "description": "Codierungsqualität für gespeicherte Momentaufnahmen (0–100)." } + }, + "timestamp_style": { + "label": "Format für Zeitstempel", + "description": "Gestaltungsmöglichkeiten für Zeitstempel im Feed, die auf Aufzeichnungen und Momentaufnahmen angewendet werden.", + "position": { + "label": "Position des Zeitstempels", + "description": "Position des Zeitstempels auf dem Bild (tl/tr/bl/br)." + }, + "format": { + "label": "Zeitstempelformat", + "description": "Datums- und Uhrzeitformatzeichenfolge für Zeitstempel (Python-Datums- und Uhrzeitformatcodes)." + }, + "color": { + "label": "Farbe des Zeitstempels", + "description": "RGB-Farbwerte für den Zeitstempeltext (alle Werte zwischen 0 und 255).", + "red": { + "label": "Rot", + "description": "Rotwert (0–255) für die Farbe des Zeitstempels." + }, + "green": { + "label": "Grün", + "description": "Grünanteil (0–255) für die Farbe des Zeitstempels." + }, + "blue": { + "label": "Blau", + "description": "Blauer Farbanteil (0–255) für die Farbe des Zeitstempels." + } + }, + "thickness": { + "label": "Stärke der Zeitmarke", + "description": "Linienstärke des Zeitstempeltextes." + }, + "effect": { + "label": "Zeitstempeleffekt", + "description": "Visuelle Darstellung des Zeitstempeltextes (keine, durchgehend, Schatten)." + } + }, + "best_image_timeout": { + "label": "Optimale Zeitüberschreitung für Bilder", + "description": "Wie lange soll man auf das Bild mit dem höchsten Konfidenzwert warten?" + }, + "type": { + "label": "Kameratyp", + "description": "Kameratyp" + }, + "webui_url": { + "label": "URL der Kamera", + "description": "URL, um die Kamera direkt von der Systemseite aus aufzurufen" + }, + "profiles": { + "label": "Profile", + "description": "Benannte Konfigurationsprofile mit teilweisen Überschreibungen, die zur Laufzeit aktiviert werden können." + }, + "zones": { + "label": "Zonen", + "description": "Mit Zonen können Sie einen bestimmten Bereich des Bildausschnitts festlegen, um zu bestimmen, ob sich ein Objekt innerhalb dieses Bereichs befindet oder nicht.", + "friendly_name": { + "label": "Zonen Name", + "description": "Ein benutzerfreundlicher Name für die Zone, der in der Benutzeroberfläche von Frigate angezeigt wird. Wenn kein Name festgelegt ist, wird eine formatierte Version des Zonennamens verwendet." + }, + "enabled": { + "label": "Aktiviert", + "description": "Diese Zone aktivieren oder deaktivieren. Deaktivierte Zonen werden zur Laufzeit ignoriert." + }, + "enabled_in_config": { + "label": "Behalten Sie den ursprünglichen Zustand der Zone im Blick." + }, + "filters": { + "label": "Zonenfilter", + "description": "Filter, die auf Objekte innerhalb dieser Zone angewendet werden sollen. Dienen dazu, Fehlalarme zu reduzieren oder einzuschränken, welche Objekte als in der Zone vorhanden gelten.", + "min_area": { + "label": "Mindestfläche des Objekts", + "description": "Mindestfläche der Begrenzungsbox (in Pixeln oder Prozent), die für diesen Objekttyp erforderlich ist. Kann als Pixelwert (Ganzzahl) oder als Prozentwert (Gleitkomma zwischen 0,000001 und 0,99) angegeben werden." + }, + "max_area": { + "label": "Maximale Objektfläche", + "description": "Maximal zulässige Fläche der Begrenzungsbox (in Pixeln oder Prozent) für diesen Objekttyp. Kann als Pixelwert (Ganzzahl) oder als Prozentwert (Gleitkomma zwischen 0,000001 und 0,99) angegeben werden." + }, + "min_ratio": { + "label": "Mindestseitenverhältnis", + "description": "Erforderliches Mindestverhältnis von Breite zu Höhe, damit die Begrenzungsbox die Voraussetzungen erfüllt." + }, + "max_ratio": { + "label": "Maximales Seitenverhältnis", + "description": "Maximales Seitenverhältnis: Das maximal zulässige Verhältnis von Breite zu Höhe, damit die Begrenzungsbox die Anforderungen erfüllt.Maximales Seitenverhältnis: Das maximal zulässige Verhältnis von Breite zu Höhe, damit die Begrenzungsbox die Anforderungen erfüllt." + }, + "threshold": { + "label": "Konfidenzschwelle", + "description": "Durchschnittlicher Schwellenwert für die Erkennungssicherheit, der erforderlich ist, damit das Objekt als echtes Positiv gewertet wird." + }, + "min_score": { + "label": "Mindestvertrauen", + "description": "Erforderliche Mindestzuverlässigkeit der Einzelbilderkennung, damit das Objekt gezählt wird." + }, + "mask": { + "label": "Filtermaske", + "description": "Polygonkoordinaten, die festlegen, wo dieser Filter innerhalb des Bildausschnitts angewendet wird." + }, + "raw_mask": { + "label": "Rohmaske" + } + }, + "coordinates": { + "label": "Koordinaten", + "description": "Polygonkoordinaten, die den Bereich der Zone definieren. Dies kann eine durch Kommas getrennte Zeichenfolge oder eine Liste von Koordinatenzeichenfolgen sein. Die Koordinaten sollten relativ (0–1) oder absolut (veraltet) sein." + }, + "distances": { + "label": "Entfernungen in der realen Welt", + "description": "Optionale reale Entfernungen für jede Seite des Zonenvierecks, die für Geschwindigkeits- oder Entfernungsberechnungen verwendet werden. Bei Angabe müssen genau 4 Werte angegeben werden." + }, + "inertia": { + "label": "Inertialkoordinatensysteme", + "description": "Anzahl der aufeinanderfolgenden Bilder, in denen ein Objekt in der Zone erkannt werden muss, bevor es als vorhanden gilt. Dies hilft dabei, vorübergehende Erkennungen herauszufiltern." + }, + "loitering_time": { + "label": "Verzögerungszeit in Sekunden", + "description": "Anzahl der Sekunden, die sich ein Objekt in der Zone aufhalten muss, damit es als „Herumlungern“ gewertet wird. Setzen Sie den Wert auf 0, um die Erkennung von Herumlungern zu deaktivieren." + }, + "speed_threshold": { + "label": "Mindestgeschwindigkeit", + "description": "Mindestgeschwindigkeit (in realen Einheiten, sofern Entfernungen festgelegt sind), die erforderlich ist, damit ein Objekt als in der Zone vorhanden gilt. Wird für geschwindigkeitsbasierte Zonenauslöser verwendet." + }, + "objects": { + "label": "Auslöseobjekte", + "description": "Liste der Objekttypen (aus labelmap), die diese Zone auslösen können. Kann eine Zeichenkette oder eine Liste von Zeichenketten sein. Ist das Feld leer, werden alle Objekte berücksichtigt." + } + }, + "enabled_in_config": { + "label": "Ursprünglicher Zustand der Kamera", + "description": "Behalten Sie den ursprünglichen Zustand der Kamera." } } diff --git a/web/public/locales/de/config/global.json b/web/public/locales/de/config/global.json index 8f9a9660e..b7758bfea 100644 --- a/web/public/locales/de/config/global.json +++ b/web/public/locales/de/config/global.json @@ -10,7 +10,8 @@ "audio": { "label": "Audioereignisse", "enabled": { - "label": "Aktivieren der Audioerkennung" + "label": "Aktivieren der Audioerkennung", + "description": "Aktivieren oder deaktivieren Sie die Erkennung von Audioereignissen für alle Kameras; diese Einstellung kann für jede Kamera individuell überschrieben werden." }, "min_volume": { "label": "Mindestlautstärke", @@ -35,7 +36,8 @@ "num_threads": { "label": "Erkennungsthreads", "description": "Anzahl der Threads, die für die Audioerkennungsverarbeitung verwendet werden sollen." - } + }, + "description": "Einstellungen für die audiobasierte Ereigniserkennung für alle Kameras; können für jede Kamera individuell überschrieben werden." }, "environment_vars": { "label": "Umgebungsvariablen", @@ -109,7 +111,20 @@ "description": "Aktivieren Sie die Live-Transkription für Audio, sobald es empfangen wird." }, "enabled": { - "label": "Audio-Transkription aktivieren" + "label": "Audio-Transkription aktivieren", + "description": "Automatische Audio-Transkription für alle Kameras aktivieren oder deaktivieren; kann für jede Kamera einzeln überschrieben werden." + }, + "language": { + "label": "Transkriptsprache", + "description": "Für die Transkription/Übersetzung verwendeter Sprachcode (z. B. „en“ für Englisch). Eine Liste der unterstützten Sprachcodes finden Sie unter https://whisper-api.com/docs/languages/." + }, + "device": { + "label": "Transkriptionsgerät", + "description": "Geräteschlüssel (CPU/GPU), auf dem das Transkriptionsmodell ausgeführt werden soll. Derzeit werden für die Transkription nur NVIDIA-CUDA-GPUs unterstützt." + }, + "model_size": { + "label": "Modellgröße", + "description": "Modellgröße für die Transkription von Audioereignissen im Offline-Modus." } }, "birdseye": { @@ -126,6 +141,42 @@ "order": { "label": "Position", "description": "Numerische Position, die Reihenfolge der Kamera im Birdseye-Layout steuert." + }, + "restream": { + "label": "Restream RTSP", + "description": "Leiten Sie den Birdseye-Ausgang als RTSP-Feed weiter; wenn Sie diese Option aktivieren, läuft Birdseye ununterbrochen weiter." + }, + "width": { + "label": "Breite", + "description": "Ausgabebreite (Pixel) des zusammengesetzten Birdseye-Bildes." + }, + "height": { + "label": "Höhe", + "description": "Ausgabehöhe (in Pixeln) des zusammengesetzten Birdseye-Bildes." + }, + "quality": { + "label": "Codierungsqualität", + "description": "Codierungsqualität für den Birdseye-MPEG-1-Feed (1 = höchste Qualität, 31 = niedrigste Qualität)." + }, + "inactivity_threshold": { + "label": "Schwellenwert für Inaktivität", + "description": "Sekunden der Inaktivität, nach denen eine Kamera nicht mehr in Birdseye angezeigt wird." + }, + "layout": { + "label": "Layout", + "description": "Layoutoptionen für die Birdseye-Komposition.", + "scaling_factor": { + "label": "Skalierungsfaktor", + "description": "Vom Layout-Rechner verwendeter Skalierungsfaktor (Bereich 1,0 bis 5,0)." + }, + "max_cameras": { + "label": "Max. Anzahl Kameras", + "description": "Maximale Anzahl der Kameras, die gleichzeitig in Birdseye angezeigt werden können; es werden die neuesten Kameras angezeigt." + } + }, + "idle_heartbeat_fps": { + "label": "FPS im Leerlauf", + "description": "Bilder pro Sekunde, um das zuletzt erstellte Birdseye-Bild im Leerlauf erneut zu senden; auf 0 setzen, um die Funktion zu deaktivieren." } }, "database": { @@ -140,7 +191,8 @@ "label": "Objekterkennung", "description": "Einstellungen für die Erkennungs-/Detektionsrolle, die zum Ausführen der Objekterkennung und zum Initialisieren von Trackern verwendet wird.", "enabled": { - "label": "Erkennung aktiviert" + "label": "Objekterkennung aktiviert", + "description": "Objekterkennung für alle Kameras aktivieren oder deaktivieren; kann für jede Kamera einzeln überschrieben werden." }, "height": { "label": "Höhe erkennen", @@ -258,11 +310,45 @@ "face_recognition": { "label": "Gesichtserkennung", "enabled": { - "label": "Gesichtserkennung aktivieren" + "label": "Gesichtserkennung aktivieren", + "description": "Gesichtserkennung für alle Kameras aktivieren oder deaktivieren; kann für jede Kamera einzeln überschrieben werden." }, "min_area": { "label": "Mindestfläche der Stirnseite", "description": "Mindestfläche (Pixel) eines erkannten Gesichtsrahmens, die für einen Erkennungsversuch erforderlich ist." + }, + "description": "Einstellungen für die Gesichtserkennung und -identifizierung für alle Kameras; können für jede Kamera individuell angepasst werden.", + "model_size": { + "label": "Modellgröße", + "description": "Zu verwendende Modellgröße für Gesichts-Embeddings (klein/groß); bei größeren Modellen ist möglicherweise eine GPU erforderlich." + }, + "unknown_score": { + "label": "Unbekannter Schwellenwert", + "description": "Abstandsschwelle, unterhalb derer ein Gesicht als potenzielle Übereinstimmung angesehen wird (höher = strenger)." + }, + "detection_threshold": { + "label": "Erkennungsschwelle", + "description": "Mindestvertrauensgrad, der erforderlich ist, damit eine Gesichtserkennung als gültig angesehen wird." + }, + "recognition_threshold": { + "label": "Erkennungsschwelle", + "description": "Schwellenwert für den Abstand bei der Gesichts-Einbettung, ab dem zwei Gesichter als übereinstimmend gelten." + }, + "min_faces": { + "label": "Mindestens Gesichter", + "description": "Mindestanzahl an Gesichtserkennungen, die erforderlich sind, bevor einer Person ein erkanntes Unterlabel zugewiesen wird." + }, + "save_attempts": { + "label": "Speicherungen", + "description": "Anzahl der Gesichtserkennungsversuche, die für die Benutzeroberfläche zur aktuellen Erkennung gespeichert werden sollen." + }, + "blur_confidence_filter": { + "label": "Weichzeichnungsfilter", + "description": "Passen Sie die Konfidenzwerte anhand der Bildunschärfe an, um Fehlalarme bei Gesichtern von schlechter Qualität zu reduzieren." + }, + "device": { + "label": "Gerät", + "description": "Dies ist eine Übersteuerung, um ein bestimmtes Gerät anzusprechen. Weitere Informationen finden Sie unter https://onnxruntime.ai/docs/execution-providers/" } }, "notifications": { @@ -426,7 +512,8 @@ "quality": { "label": "Live Qualität", "description": "Kodierungsqualität für den jsmpeg-Stream (1 = höchst, 31 = niedrigst)." - } + }, + "description": "Einstellungen zur Steuerung der Auflösung und Qualität des jsmpeg-Livestreams. Dies hat keine Auswirkungen auf weitergeleitete Kameras, die go2rtc für die Live-Ansicht verwenden." }, "telemetry": { "label": "Telemetrie", @@ -464,7 +551,8 @@ "label": "Kennzeichenerkennung", "description": "Einstellungen für die Kennzeichenerkennung, einschließlich Erkennungsschwellen, Formatierung und bekannte Kennzeichen.", "enabled": { - "label": "LPR aktivieren" + "label": "LPR aktivieren", + "description": "Die Kennzeichenerkennung für alle Kameras aktivieren oder deaktivieren; die Einstellung kann für jede Kamera individuell überschrieben werden." }, "expire_time": { "label": "Sekunden bis zum Ablauf", @@ -477,12 +565,59 @@ "enhancement": { "label": "Verbesserungsgrad", "description": "Verstärkungsstufe (0-10) zur Anwendung auf Plattenaufnahmen vor der OCR; höhere Werte führen nicht immer zu besseren Ergebnissen, Stufen über 5 funktionieren möglicherweise nur bei Nachtaufnahmen und sollten mit Vorsicht verwendet werden." + }, + "model_size": { + "label": "Modellgröße", + "description": "Für die Texterkennung verwendete Modellgröße. Die meisten Benutzer sollten „klein“ wählen." + }, + "detection_threshold": { + "label": "Erkennungsschwelle", + "description": "Schwellenwert für die Erkennungssicherheit, ab dem die OCR-Erkennung für ein verdächtiges Kennzeichen gestartet wird." + }, + "recognition_threshold": { + "label": "Erkennungsschwelle", + "description": "Schwellwert für die Erkennungssicherheit, der erforderlich ist, damit der erkannte Text des Kennzeichens als Unterbezeichnung hinzugefügt wird." + }, + "min_plate_length": { + "label": "Mindestplattenlänge", + "description": "Mindestanzahl an Zeichen, die ein erkanntes Kennzeichen enthalten muss, um als gültig zu gelten." + }, + "format": { + "label": "Regulärer Ausdruck für das Plattenformat", + "description": "Optionaler regulärer Ausdruck zur Überprüfung der erkannten Kennzeichenfolgen auf Übereinstimmung mit einem erwarteten Format." + }, + "match_distance": { + "label": "Entfernung", + "description": "Anzahl der zulässigen Zeichenabweichungen beim Vergleich erkannter Kennzeichen mit bekannten Kennzeichen." + }, + "known_plates": { + "label": "Bekannte Schilder", + "description": "Liste der Kennzeichen oder regulären Ausdrücke, die besonders überwacht oder gemeldet werden sollen." + }, + "debug_save_plates": { + "label": "Debug-Platten speichern", + "description": "Speichern Sie Ausschnitte aus den Plattenbildern zur Fehlerbehebung bei der LPR-Leistung." + }, + "device": { + "label": "Gerät", + "description": "Dies ist eine Übersteuerung, um ein bestimmtes Gerät anzusprechen. Weitere Informationen finden Sie unter https://onnxruntime.ai/docs/execution-providers/" + }, + "replace_rules": { + "label": "Ersatzregeln", + "description": "Reguläre Ausdrücke, die zur Normalisierung der erkannten Kennzeichen vor dem Abgleich verwendet werden.", + "pattern": { + "label": "Regex-Muster" + }, + "replacement": { + "label": "Ersetzungs String" + } } }, "motion": { "label": "Bewegungserkennung", "enabled": { - "label": "Bewegungserkennung aktivieren" + "label": "Bewegungserkennung aktivieren", + "description": "Bewegungserkennung für alle Kameras aktivieren oder deaktivieren; kann für jede Kamera einzeln überschrieben werden." }, "threshold": { "label": "Bewegungsschwelle", @@ -494,7 +629,7 @@ }, "skip_motion_threshold": { "label": "Schwellenwert für Bewegungsüberspringen", - "description": "Wenn sich mehr als dieser Anteil des Bildes in einem einzelnen Frame ändert, gibt der Detektor keine Bewegungsfelder zurück und kalibriert sich sofort neu. Dies kann CPU-Leistung sparen und Fehlalarme bei Blitzschlag, Gewittern usw. reduzieren, aber auch echte Ereignisse übersehen, wie z. B. eine PTZ-Kamera, die ein Objekt automatisch verfolgt. Der Kompromiss besteht darin, entweder einige Megabyte an Aufzeichnungen zu verlieren oder ein paar kurze Clips zu überprüfen. Bereich 0,0 bis 1,0." + "description": "Wenn sich mehr als dieser Anteil des Bildes in einem einzelnen Frame ändert, gibt der Detektor keine Bewegungsfelder zurück und kalibriert sich sofort neu. Dies kann CPU-Leistung sparen und Fehlalarme bei Blitzschlag, Gewittern usw. reduzieren, aber auch echte Ereignisse übersehen, wie z. B. eine PTZ-Kamera, die ein Objekt automatisch verfolgt. Der Kompromiss besteht darin, entweder einige Megabyte an Aufzeichnungen zu verlieren oder ein paar kurze Clips zu überprüfen. Leer lassen um diese Funktion zu deaktivieren." }, "improve_contrast": { "label": "Kontrast verbessern", @@ -530,7 +665,8 @@ }, "raw_mask": { "label": "Maskierung" - } + }, + "description": "Die Standard-Einstellungen für die Bewegungserkennung gelten für alle Kameras, sofern sie nicht für einzelne Kameras überschrieben werden." }, "tls": { "label": "TLS", @@ -568,7 +704,7 @@ "label": "Detektor-Hardware", "description": "Konfiguration für Objektdetektoren (CPU, GPU, ONNX-Backends) und alle detektorspezifischen Modelleinstellungen.", "type": { - "label": "Detektortyp", + "label": "Type", "description": "Art des für die Objekterkennung zu verwendenden Detektors (z. B. „cpu“, „edgetpu“, „openvino“)." }, "cpu": { @@ -690,7 +826,7 @@ }, "api_timeout": { "label": "DeepStack-API-Zeitlimit (in Sekunden)", - "description": "Maximum time allowed for a DeepStack API request." + "description": "Maximal zulässige Zeit für eine DeepStack-API-Anfrage." }, "api_key": { "label": "DeepStack-API-Schlüssel (falls erforderlich)", @@ -887,6 +1023,12 @@ "input_dtype": { "label": "Modell-Eingangs-D-Typ" } + }, + "label": "RKNN", + "description": "RKNN-Detektor für Rockchip-NPUs; führt kompilierte RKNN-Modelle auf Rockchip-Hardware aus.", + "num_cores": { + "label": "Anzahl der zu verwendenden NPU-Kerne.", + "description": "Die Anzahl der zu verwendenden NPU-Kerne (0 für automatische Einstellung)." } }, "memryx": { @@ -930,6 +1072,110 @@ "label": "Standard-Pixel-Farbformat", "description": "Vom Modell erwarteter Pixel-Farbraum: „rgb“, „bgr“ oder „yuv“." } + }, + "device": { + "label": "Geräte Pfad", + "description": "Das für die MemryX-Inferenz zu verwendende Gerät (z. B. „PCIe“)." + } + }, + "model": { + "label": "Detektorspezifische Modellkonfiguration", + "description": "Detektorspezifische Optionen zur Modellkonfiguration (Pfad, Eingabegröße usw.). Detektorspezifische Modellkonfiguration.", + "path": { + "label": "Pfad zum benutzerdefinierten Objekterkennungsmodell", + "description": "Pfad zu einer benutzerdefinierten Erkennungsmodelldatei (oder plus:// für Frigate+-Modelle)." + }, + "labelmap_path": { + "label": "Label-Karte für benutzerdefinierten Objektdetektor", + "description": "Pfad zu einer Labelmap-Datei, die numerische Klassen dem Detektor als Zeichenfolgenbezeichnungen zuordnet." + }, + "width": { + "label": "Eingabebreite des Objekterkennungsmodells", + "description": "Breite des Modell-Eingabetensors in Pixeln." + }, + "height": { + "label": "Eingabehöhe für das Objekterkennungsmodell", + "description": "Höhe des Modell-Eingabetensors in Pixeln." + }, + "labelmap": { + "label": "Anpassung der Labelmap", + "description": "Überschreibt oder ordnet Einträge neu zu, um sie in die Standard-Labelmap zu integrieren." + }, + "attributes_map": { + "label": "Zuordnung der Objektbezeichnungen zu ihren Attributbezeichnungen", + "description": "Zuordnung von Objektbezeichnungen zu Attributbezeichnungen, die zum Hinzufügen von Metadaten verwendet werden (zum Beispiel „Auto“ -> [„Kennzeichen“])." + }, + "input_tensor": { + "label": "Form des Modell-Eingabetensors", + "description": "Vom Modell erwartetes Tensor-Format: „nhwc“ oder „nchw“." + }, + "input_pixel_format": { + "label": "Standard-Pixel-Farbformat", + "description": "Vom Modell erwarteter Pixel-Farbraum: „rgb“, „bgr“ oder „yuv“." + }, + "input_dtype": { + "label": "Modell-Eingangs-D-Typ", + "description": "Datentyp des Modell-Eingabetensors (z. B. „float32“)." + }, + "model_type": { + "label": "Typ des Objekterkennungsmodells", + "description": "Typ der Detektor-Modellarchitektur (ssd, yolox, yolonas), der von einigen Detektoren zur Optimierung verwendet wird." + } + }, + "model_path": { + "label": "Detektorspezifischer Modellpfad", + "description": "Dateipfad zur Binärdatei des Detektormodells, falls dies für den ausgewählten Detektor erforderlich ist." + }, + "axengine": { + "label": "AXEngine NPU", + "description": "AXERA AX650N/AX8850N NPU-Detektor, der kompilierte .axmodel-Dateien über die AXEngine-Laufzeitumgebung ausführt." + }, + "onnx": { + "label": "ONNX", + "description": "ONNX-Detektor zum Ausführen von ONNX-Modellen; nutzt verfügbare Beschleunigungs-Backends (CUDA/ROCm/OpenVINO), sofern vorhanden.", + "device": { + "label": "Gerätetyp", + "description": "Das für die ONNX-Inferenz zu verwendende Gerät (z. B. „AUTO“, „CPU“, „GPU“)." + } + }, + "openvino": { + "label": "OpenVINO", + "description": "OpenVINO-Detektor für AMD- und Intel-CPUs, Intel-GPUs und Intel-VPU-Hardware.", + "device": { + "label": "Geräte Type", + "description": "Das für die OpenVINO-Inferenz zu verwendende Gerät (z. B. „CPU“, „GPU“, „NPU“)." + } + }, + "synaptics": { + "label": "Synaptics", + "description": "Synaptics-NPU-Detektor für Modelle im .synap-Format unter Verwendung des Synap SDK auf Synaptics-Hardware." + }, + "teflon_tfl": { + "label": "Teflon", + "description": "Teflon-Delegate-Detektor für TFLite unter Verwendung der Mesa-Teflon-Delegate-Bibliothek zur Beschleunigung der Inferenz auf unterstützten GPUs." + }, + "tensorrt": { + "label": "TensorRT", + "description": "TensorRT-Detektor für Nvidia Jetson-Geräte unter Verwendung serialisierter TensorRT-Engines zur Beschleunigung der Inferenz.", + "device": { + "label": "GPU-Geräteindex", + "description": "Der zu verwendende GPU-Geräteindex." + } + }, + "zmq": { + "label": "ZMQ IPC", + "description": "ZMQ-IPC-Detektor, der die Inferenz über einen ZeroMQ-IPC-Endpunkt an einen externen Prozess auslagert.", + "endpoint": { + "label": "ZMQ IPC Endpunkt", + "description": "Der ZMQ-Endpunkt, mit dem eine Verbindung hergestellt werden soll." + }, + "request_timeout_ms": { + "label": "ZMQ-Anfrage-Timeout in Millisekunden", + "description": "Zeitlimit für ZMQ-Anfragen in Millisekunden." + }, + "linger_ms": { + "label": "Verweilzeit des ZMQ-Sockets in Millisekunden", + "description": "Verweilzeit des Sockets in Millisekunden." } } }, @@ -937,7 +1183,8 @@ "label": "Objekte", "description": "Standardeinstellungen für die Objektverfolgung, einschließlich der zu verfolgenden Labels und Filter pro Objekt.", "track": { - "label": "Zu verfolgende Objekte" + "label": "Zu verfolgende Objekte", + "description": "Liste der Objektbezeichnungen, die von allen Kameras verfolgt werden sollen; kann für jede Kamera individuell überschrieben werden." }, "filters": { "label": "Objektfilter", @@ -1033,7 +1280,8 @@ "record": { "label": "Aufnahme", "enabled": { - "label": "Aufnahme aktivieren" + "label": "Aufnahme aktivieren", + "description": "Aufzeichnung für alle Kameras aktivieren oder deaktivieren; kann für jede Kamera einzeln überschrieben werden." }, "expire_interval": { "label": "Bereinigungsintervall festlegen", @@ -1122,7 +1370,8 @@ "enabled_in_config": { "label": "Ursprünglicher Aufnahmestatus", "description": "Gibt an, ob die Aufzeichnung in der ursprünglichen statischen Konfiguration aktiviert war." - } + }, + "description": "Die Einstellungen für Aufzeichnung und Speicherung gelten für alle Kameras, sofern sie nicht für einzelne Kameras überschrieben werden." }, "review": { "label": "Rezension", @@ -1130,7 +1379,8 @@ "label": "Benachrichtigungseinstellungen", "description": "Einstellungen dazu, bei welchen überwachten Objekten Warnmeldungen generiert werden und wie lange diese aufbewahrt werden.", "enabled": { - "label": "Benachrichtigungen aktivieren" + "label": "Benachrichtigungen aktivieren", + "description": "Die Erzeugung von Warnmeldungen für alle Kameras aktivieren oder deaktivieren; diese Einstellung kann für jede Kamera individuell überschrieben werden." }, "labels": { "label": "Warnhinweise", @@ -1151,9 +1401,10 @@ }, "detections": { "label": "Konfiguration der Erkennungen", - "description": "Einstellungen zum Erstellen von Erkennungsereignissen (ohne Alarm) und zur Festlegung ihrer Aufbewahrungsdauer.", + "description": "Einstellungen, die festlegen, bei welchen verfolgten Objekten Erkennungen (ohne Alarm) generiert werden und wie lange diese Erkennungen gespeichert bleiben.", "enabled": { - "label": "Erkennung aktivieren" + "label": "Erkennung aktivieren", + "description": "Erkennungsereignisse für alle Kameras aktivieren oder deaktivieren; kann für jede Kamera einzeln überschrieben werden." }, "labels": { "label": "Kennzeichnungen zur Erkennung", @@ -1211,16 +1462,83 @@ "label": "Aufforderung zum Aktivitätskontext", "description": "Eine benutzerdefinierte Eingabeaufforderung, die beschreibt, was als verdächtiges Verhalten gilt und was nicht, um den Zusammenfassungen der generativen KI einen Kontext zu geben." } - } + }, + "description": "Einstellungen, die Benachrichtigungen, Erkennungen und GenAI-Zusammenfassungen steuern, die von der Benutzeroberfläche und dem Speicher verwendet werden." }, "onvif": { "autotracking": { "required_zones": { - "label": "Erforderliche Zonen" + "label": "Erforderliche Zonen", + "description": "Objekte müssen in eine dieser Zonen eintreten, bevor die automatische Verfolgung beginnt." }, "movement_weights": { - "description": "Diese Kalibrierungswerte werden automatisch durch die Kamerakalibrierung generiert. Bitte nicht manuell ändern." + "description": "Diese Kalibrierungswerte werden automatisch durch die Kamerakalibrierung generiert. Bitte nicht manuell ändern.", + "label": "Bewegungsgewichte" + }, + "label": "Automatische Verfolgung", + "description": "Bewegliche Objekte automatisch verfolgen und sie mithilfe von PTZ-Kamerabewegungen im Bildausschnitt zentriert halten.", + "enabled": { + "label": "Automatische Verfolgung aktivieren", + "description": "Aktivieren oder deaktivieren Sie die automatische PTZ-Kamera-Verfolgung erkannter Objekte." + }, + "calibrate_on_startup": { + "label": "Beim Start kalibrieren", + "description": "Messen Sie die Drehzahlen der PTZ-Motoren beim Start, um die Nachführgenauigkeit zu verbessern. Frigate aktualisiert die Konfiguration nach der Kalibrierung mit den Bewegungsgewichten." + }, + "zooming": { + "label": "Zoom-Modus", + "description": "Zoomverhalten steuern: deaktiviert (nur Schwenken/Neigen), absolut (am besten kompatibel) oder relativ (gleichzeitiges Schwenken/Neigen/Zoomen)." + }, + "zoom_factor": { + "label": "Zoomfaktor", + "description": "Steuert den Zoomfaktor bei verfolgten Objekten. Bei niedrigeren Werten bleibt mehr von der Szene im Bild; bei höheren Werten wird näher herangezoomt, wobei jedoch die Verfolgung verloren gehen kann. Werte zwischen 0,1 und 0,75." + }, + "track": { + "label": "Verfolgte Objekte", + "description": "Liste der Objekttypen, die das automatische Tracking auslösen sollen." + }, + "return_preset": { + "label": "Voreinstellung setzen", + "description": "Der in der Kamera-Firmware konfigurierte ONVIF-Voreinstellungsname, zu dem nach Beendigung der Verfolgung zurückgekehrt werden soll." + }, + "timeout": { + "label": "Zeitüberschreitung bei der Rückgabe", + "description": "Warte nach dem Verlust der Verfolgung so viele Sekunden, bevor die Kamera in die voreingestellte Position zurückkehrt." + }, + "enabled_in_config": { + "label": "Ursprünglicher Autotrack-Status", + "description": "Internes Feld zur Erfassung, ob die automatische Nachführung in der Konfiguration aktiviert wurde." } + }, + "label": "ONVIF", + "description": "ONVIF-Verbindung und Einstellungen für die automatische PTZ-Verfolgung dieser Kamera.", + "host": { + "label": "ONVIF Host", + "description": "Host (und optional Schema) für den ONVIF-Dienst dieser Kamera." + }, + "port": { + "label": "ONVIF Port", + "description": "Portnummer für den ONVIF-Dienst." + }, + "user": { + "label": "ONVIF-Benutzername", + "description": "Benutzername für die ONVIF-Authentifizierung; bei einigen Geräten ist für ONVIF ein Admin-Benutzer erforderlich." + }, + "password": { + "label": "ONVIF-Passwort", + "description": "Passwort für die ONVIF-Authentifizierung." + }, + "tls_insecure": { + "label": "TLS-Überprüfung deaktivieren", + "description": "TLS-Überprüfung überspringen und Digest-Authentifizierung für ONVIF deaktivieren (unsicher; nur in sicheren Netzwerken verwenden)." + }, + "ignore_time_mismatch": { + "label": "Zeitabweichung ignorieren", + "description": "Ignoriere Zeitunterschiede zwischen Kamera und Frigate-Server bei der ONVIF-Kommunikation." + }, + "profile": { + "label": "ONVIF Profile", + "description": "Spezifisches ONVIF-Medienprofil für die PTZ-Steuerung, das anhand eines Tokens oder Namens ausgewählt wird. Ist kein Profil festgelegt, wird automatisch das erste Profil mit gültiger PTZ-Konfiguration ausgewählt." } }, "semantic_search": { @@ -1252,12 +1570,34 @@ "label": "Trigger-Aktionen", "description": "Liste der Aktionen, die ausgeführt werden sollen, wenn der Trigger ausgelöst wird (Benachrichtigung, Unterbezeichnung, Attribut)." } + }, + "description": "Einstellungen für die semantische Suche, die Objekt-Embeddings erstellt und abfragt, um ähnliche Elemente zu finden.", + "enabled": { + "label": "Semantische Suche aktivieren", + "description": "Aktivieren oder deaktivieren Sie die semantische Suchfunktion." + }, + "reindex": { + "label": "Beim Start neu indizieren", + "description": "Lösen Sie eine vollständige Neuindizierung der historisch erfassten Objekte in der Embedding-Datenbank aus." + }, + "model": { + "label": "Semantisches Suchmodell oder Name des GenAI-Anbieters", + "description": "Das für die semantische Suche zu verwendende Einbettungsmodell (z. B. „jinav1“) oder der Name eines GenAI-Anbieters mit der Rolle „Einbettungen“." + }, + "model_size": { + "label": "Modellgröße", + "description": "Wählen Sie die Modellgröße aus; „small“ läuft auf der CPU, während „large“ in der Regel eine GPU erfordert." + }, + "device": { + "label": "Gerät", + "description": "Dies ist eine Übersteuerung, um ein bestimmtes Gerät anzusprechen. Weitere Informationen finden Sie unter https://onnxruntime.ai/docs/execution-providers/" } }, "snapshots": { "label": "Schnappschüsse", "enabled": { - "label": "Schnappschüsse aktiviert" + "label": "Schnappschüsse aktivieren", + "description": "Das Speichern von Momentaufnahmen für alle Kameras aktivieren oder deaktivieren; diese Einstellung kann für jede Kamera individuell überschrieben werden." }, "clean_copy": { "label": "Saubere Kopie speichern", @@ -1265,11 +1605,292 @@ }, "timestamp": { "label": "Zeitstempel-Einblendung", - "description": "Füge den gespeicherten Momentaufnahmen einen Zeitstempel hinzu." + "description": "Füge einen Zeitstempel auf die von der API abgerufenen Momentaufnahmen ein." }, "bounding_box": { "label": "Einblendung der Begrenzungsrahmen", - "description": "Zeichne Begrenzungsrahmen für verfolgte Objekte auf gespeicherten Momentaufnahmen." + "description": "Zeichne Begrenzungsrahmen für verfolgte Objekte auf Momentaufnahmen aus der API." + }, + "crop": { + "label": "Ertragsübersicht", + "description": "Schnappschüsse aus der API auf die Begrenzungsrahmen der erkannten Objekte zuschneiden." + }, + "required_zones": { + "label": "Erforderliche Zonen", + "description": "Bereiche, die ein Objekt betreten muss, damit ein Schnappschuss gespeichert wird." + }, + "height": { + "label": "Höhe der Momentaufnahme", + "description": "Höhe (Pixel), auf die Schnappschüsse über die API skaliert werden sollen; leer lassen, um die Originalgröße beizubehalten." + }, + "retain": { + "label": "Aufbewahrungsdauer von Snapshots", + "description": "Aufbewahrungseinstellungen für Snapshots, einschließlich Standarddauer in Tagen und objektspezifischer Überschreibungen.", + "default": { + "label": "Standard-Aufbewahrungsfrist", + "description": "Standardmäßige Anzahl von Tagen, für die Snapshots aufbewahrt werden." + }, + "mode": { + "label": "Speichermodus", + "description": "Speichermodus: „all“ (alle Segmente speichern), „motion“ (Segmente mit Bewegung speichern) oder „active_objects“ (Segmente mit aktiven Objekten speichern)." + }, + "objects": { + "label": "Objektaufbewahrung", + "description": "Objektbezogene Überschreibungen für die Aufbewahrungsdauer von Snapshots." + } + }, + "quality": { + "label": "Qualität der Momentaufnahme", + "description": "Codierungsqualität für gespeicherte Momentaufnahmen (0–100)." + }, + "description": "Einstellungen für API-generierte Momentaufnahmen von verfolgten Objekten für alle Kameras; können für jede Kamera individuell überschrieben werden." + }, + "model": { + "label": "Erkennungsmodell", + "description": "Einstellungen zur Konfiguration eines benutzerdefinierten Objekterkennungsmodells und seiner Eingabeform.", + "path": { + "label": "Pfad zum benutzerdefinierten Objekterkennungsmodell", + "description": "Pfad zu einer benutzerdefinierten Erkennungsmodelldatei (oder plus:// für Frigate+-Modelle)." + }, + "labelmap_path": { + "label": "Label-Karte für benutzerdefinierten Objektdetektor", + "description": "Pfad zu einer Labelmap-Datei, die numerische Klassen dem Detektor als Zeichenfolgenbezeichnungen zuordnet." + }, + "width": { + "label": "Eingabebreite des Objekterkennungsmodells", + "description": "Breite des Modell-Eingabetensors in Pixeln." + }, + "height": { + "label": "Eingabehöhe für das Objekterkennungsmodell", + "description": "Höhe des Modell-Eingabetensors in Pixeln." + }, + "labelmap": { + "label": "Anpassung der Labelmap", + "description": "Überschreibt oder ordnet Einträge neu zu, um sie in die Standard-Labelmap zu integrieren." + }, + "attributes_map": { + "label": "Zuordnung der Objektbezeichnungen zu ihren Attributbezeichnungen", + "description": "Zuordnung von Objektbezeichnungen zu Attributbezeichnungen, die zum Hinzufügen von Metadaten verwendet werden (zum Beispiel „Auto“ -> [„Kennzeichen“])." + }, + "input_tensor": { + "label": "Form des Modell-Eingabetensors", + "description": "Vom Modell erwartetes Tensor-Format: „nhwc“ oder „nchw“." + }, + "input_pixel_format": { + "label": "Standard-Pixel-Farbformat", + "description": "Vom Modell erwarteter Pixel-Farbraum: „rgb“, „bgr“ oder „yuv“." + }, + "input_dtype": { + "label": "Modell-Eingangs-D-Typ", + "description": "Datentyp des Modell-Eingabetensors (z. B. „float32“)." + }, + "model_type": { + "label": "Typ des Objekterkennungsmodells", + "description": "Typ der Detektor-Modellarchitektur (ssd, yolox, yolonas), der von einigen Detektoren zur Optimierung verwendet wird." + } + }, + "genai": { + "label": "Konfiguration generativer KI", + "description": "Einstellungen für integrierte Anbieter generativer KI, die zur Erstellung von Objektbeschreibungen und Zusammenfassungen von Rezensionen verwendet werden.", + "api_key": { + "label": "API Schlüssel", + "description": "Von einigen Anbietern wird ein API-Schlüssel benötigt (kann auch über Umgebungsvariablen festgelegt werden)." + }, + "base_url": { + "label": "Base URL", + "description": "Basis-URL für selbst gehostete oder kompatible Anbieter (z. B. eine Ollama-Instanz)." + }, + "model": { + "label": "Model", + "description": "Das vom Anbieter bereitzustellende Modell zur Erstellung von Beschreibungen oder Zusammenfassungen." + }, + "provider": { + "label": "Anbieter", + "description": "Der zu verwendende GenAI-Anbieter (z. B.: Ollama, Gemini, OpenAI)." + }, + "roles": { + "label": "Rollen", + "description": "GenAI-Rollen (Tools, Vision, Einbettungen); ein Anbieter pro Rolle." + }, + "provider_options": { + "label": "Anbieter Optionen", + "description": "Zusätzliche anbieterspezifische Optionen, die an den GenAI-Client übergeben werden sollen." + }, + "runtime_options": { + "label": "Laufzeit Optinenen", + "description": "Laufzeitoptionen, die bei jedem Inferenzaufruf an den Anbieter übergeben werden." + } + }, + "timestamp_style": { + "label": "Format für Zeitstempel", + "position": { + "label": "Position des Zeitstempels", + "description": "Position des Zeitstempels auf dem Bild (tl/tr/bl/br)." + }, + "format": { + "label": "Zeitstempelformat", + "description": "Datums- und Uhrzeitformatzeichenfolge für Zeitstempel (Python-Datums- und Uhrzeitformatcodes)." + }, + "color": { + "label": "Farbe des Zeitstempels", + "description": "RGB-Farbwerte für den Zeitstempeltext (alle Werte zwischen 0 und 255).", + "red": { + "label": "Rot", + "description": "Rotwert (0–255) für die Farbe des Zeitstempels." + }, + "green": { + "label": "Grün", + "description": "Grünanteil (0–255) für die Farbe des Zeitstempels." + }, + "blue": { + "label": "Blau", + "description": "Blauer Farbanteil (0–255) für die Farbe des Zeitstempels." + } + }, + "thickness": { + "label": "Stärke der Zeitmarke", + "description": "Linienstärke des Zeitstempeltextes." + }, + "effect": { + "label": "Zeitstempeleffekt", + "description": "Visuelle Darstellung des Zeitstempeltextes (keine, durchgehend, Schatten)." + }, + "description": "Gestaltungsoptionen für Zeitstempel im Feed, die auf die Debug-Ansicht und Snapshots angewendet werden." + }, + "profiles": { + "label": "Profile", + "description": "Benannte Profildefinitionen mit aussagekräftigen Namen. Kameraprofile müssen auf die hier definierten Namen verweisen.", + "friendly_name": { + "label": "Anzeigename", + "description": "Anzeigename für dieses Profil, der in der Benutzeroberfläche angezeigt wird." + } + }, + "classification": { + "label": "Objektklassifizierung", + "description": "Einstellungen für Klassifizierungsmodelle, die zur Verfeinerung von Objektbezeichnungen oder zur Zustandsklassifizierung verwendet werden.", + "bird": { + "label": "Konfiguration der Vogelklassifizierung", + "description": "Einstellungen speziell für Modelle zur Klassifizierung von Vögeln.", + "enabled": { + "label": "Vogelklassifizierung", + "description": "Vogelklassifizierung aktivieren oder deaktivieren." + }, + "threshold": { + "label": "Mindestpunktzahl", + "description": "Mindestpunktzahl, die erforderlich ist, um eine Vogelklassifizierung zu akzeptieren." + } + }, + "custom": { + "label": "Benutzerdefinierte Klassifizierungsmodelle", + "description": "Konfiguration für benutzerdefinierte Klassifizierungsmodelle, die zur Objekt- oder Zustandserkennung verwendet werden.", + "enabled": { + "label": "Modell aktivieren", + "description": "Das benutzerdefinierte Klassifizierungsmodell aktivieren oder deaktivieren." + }, + "name": { + "label": "Modellname", + "description": "Bezeichner für das zu verwendende benutzerdefinierte Klassifizierungsmodell." + }, + "threshold": { + "label": "Punktschwelle", + "description": "Punktschwelle, die zur Änderung des Klassifizierungsstatus herangezogen wird." + }, + "save_attempts": { + "label": "Speicherungen", + "description": "Wie viele Klassifizierungsversuche sollen für die Benutzeroberfläche „Letzte Klassifizierungen“ gespeichert werden?" + }, + "object_config": { + "objects": { + "label": "Objekte klassifizieren", + "description": "Liste der Objekttypen, für die eine Objektklassifizierung durchgeführt werden soll." + }, + "classification_type": { + "label": "Klassifizierungstyp", + "description": "Verwendeter Klassifizierungstyp: „sub_label“ (fügt „sub_label“ hinzu) oder andere unterstützte Typen." + } + }, + "state_config": { + "cameras": { + "label": "Klassifizierungskameras", + "description": "Bildausschnitt und Einstellungen pro Kamera für die Klassifizierung des Laufzustands.", + "crop": { + "label": "Klassifizierungsfeld", + "description": "Zuschneidekoordinaten, die für die Klassifizierung mit dieser Kamera verwendet werden sollen." + } + }, + "motion": { + "description": "Falls zutreffend, führe die Klassifizierung durch, sobald innerhalb des angegebenen Ausschnitts eine Bewegung erkannt wird.", + "label": "Bei Bewegung ausführen" + }, + "interval": { + "label": "Klassifizierungsintervall", + "description": "Intervall (in Sekunden) zwischen den regelmäßigen Klassifizierungsläufen für die Zustandsklassifizierung." + } + } + } + }, + "camera_groups": { + "label": "Kameragruppen", + "description": "Konfiguration für benannte Kameragruppen, die zur Organisation der Kameras in der Benutzeroberfläche verwendet werden.", + "cameras": { + "label": "Kameraübersicht", + "description": "Liste der in dieser Gruppe enthaltenen Kameramodelle." + }, + "icon": { + "label": "Gruppensymbol", + "description": "Symbol, das in der Benutzeroberfläche die Kameragruppe darstellt." + }, + "order": { + "label": "Sortierreihenfolge", + "description": "Numerische Reihenfolge, nach der die Kameragruppen in der Benutzeroberfläche sortiert werden; höhere Zahlen erscheinen später." + } + }, + "active_profile": { + "label": "Aktives Profil", + "description": "Name des derzeit aktiven Profils. Nur zur Laufzeit gültig, wird nicht in YAML gespeichert." + }, + "camera_mqtt": { + "label": "MQTT", + "description": "Einstellungen für die Veröffentlichung von Bildern über MQTT.", + "enabled": { + "label": "Bild senden", + "description": "Aktivieren Sie für diese Kamera die Veröffentlichung von Bild-Snapshots für Objekte an MQTT-Themen." + }, + "timestamp": { + "label": "Zeitstempel hinzufügen", + "description": "Füge einen Zeitstempel auf Bilder ein, die über MQTT veröffentlicht werden." + }, + "bounding_box": { + "label": "Begrenzungsrahmen hinzufügen", + "description": "Zeichne Begrenzungsrahmen auf Bilder, die über MQTT veröffentlicht werden." + }, + "crop": { + "label": "Bild zuschneiden", + "description": "Bilder, die über MQTT veröffentlicht werden, werden auf die Begrenzungsrahmen der erkannten Objekte zugeschnitten." + }, + "height": { + "label": "Bildhöhe", + "description": "Höhe (in Pixeln) zur Größenanpassung von über MQTT veröffentlichten Bildern." + }, + "required_zones": { + "label": "Benötigte Zonen", + "description": "Zonen, die ein Objekt betreten muss, damit ein MQTT-Bild veröffentlicht wird." + }, + "quality": { + "label": "JPEG Qualität", + "description": "JPEG Qualität für über MQTT veröffentlichte Bilder (0–100)." + } + }, + "camera_ui": { + "label": "Kamera UI", + "description": "Die Reihenfolge und Sichtbarkeit dieser Kamera wird in der UI angezeigt. Die Reihenfolge wirkt sich auf das Standard-Dashboard aus. Für eine feinere Kontrolle verwenden Sie Kamera-Gruppen.", + "order": { + "label": "UI-Reihenfolge", + "description": "Numerische Reihenfolge, nach der die Kamera in der Benutzeroberfläche sortiert wird (Standard-Dashboard und Listen); höhere Zahlen erscheinen später." + }, + "dashboard": { + "label": "In der Benutzeroberfläche anzeigen", + "description": "Schalte ein, ob diese Kamera überall in der Benutzeroberfläche von „Frigate“ sichtbar ist. Wenn du diese Option deaktivierst, musst du die Konfiguration manuell bearbeiten, um diese Kamera wieder in der Benutzeroberfläche anzuzeigen." } } } diff --git a/web/public/locales/de/objects.json b/web/public/locales/de/objects.json index f3fdbd370..ae767c61d 100644 --- a/web/public/locales/de/objects.json +++ b/web/public/locales/de/objects.json @@ -116,5 +116,10 @@ "desk": "Schreibtisch", "raccoon": "Waschbär", "rabbit": "Kaninchen", - "gls": "GLS" + "gls": "GLS", + "canada_post": "Kanada Post", + "royal_mail": "Royal-Mail", + "school_bus": "Schulbus", + "skunk": "Stinktier", + "kangaroo": "Känguruh" } diff --git a/web/public/locales/de/views/classificationModel.json b/web/public/locales/de/views/classificationModel.json index 2de77e73e..4b55ff230 100644 --- a/web/public/locales/de/views/classificationModel.json +++ b/web/public/locales/de/views/classificationModel.json @@ -23,15 +23,18 @@ }, "toast": { "success": { - "deletedCategory": "Klasse gelöscht", - "deletedImage": "Bilder gelöscht", + "deletedCategory_one": "Klasse gelöscht", + "deletedCategory_other": "Klassen {{count}} gelöscht", + "deletedImage_one": "{{count}} Bild gelöscht", + "deletedImage_other": "{{count}} Bilder gelöscht", "deletedModel_one": "{{count}} Modell erfolgreich gelöscht", "deletedModel_other": "{{count}} Modelle erfolgreich gelöscht", "categorizedImage": "Erfolgreich klassifizierte Bilder", "trainedModel": "Modell erfolgreich trainiert.", "trainingModel": "Modelltraining erfolgreich gestartet.", "updatedModel": "Modellkonfiguration erfolgreich aktualisiert", - "renamedCategory": "Klasse erfolgreich in {{name}} umbenannt" + "renamedCategory": "Klasse erfolgreich in {{name}} umbenannt", + "reclassifiedImage": "Erfolgreich neu klassifiziertes Bild" }, "error": { "deleteImageFailed": "Löschen fehlgeschlagen: {{errorMessage}}", @@ -41,7 +44,8 @@ "updateModelFailed": "Aktualisierung des Modells fehlgeschlagen: {{errorMessage}}", "renameCategoryFailed": "Umbenennung der Klasse fehlgeschlagen: {{errorMessage}}", "categorizeFailed": "Bildkategorisierung fehlgeschlagen: {{errorMessage}}", - "trainingFailed": "Modelltraining fehlgeschlagen. Details sind in den Frigate-Protokollen zu finden." + "trainingFailed": "Modelltraining fehlgeschlagen. Details sind in den Frigate-Protokollen zu finden.", + "reclassifyFailed": "Die Neuklassifizierung des Bildes ist fehlgeschlagen: {{errorMessage}}" } }, "deleteCategory": { @@ -179,10 +183,17 @@ "generateSuccess": "Erfolgreich generierte Beispielbilder", "modelCreated": "Modell erfolgreich erstellt. Verwenden Sie die Ansicht „Aktuelle Klassifizierungen“, um Bilder für fehlende Zustände hinzuzufügen und trainieren Sie dann das Modell erneut.", "missingStatesWarning": { - "title": "Beispiele für fehlende Zustände", - "description": "Es wird empfohlen für alle Zustände Beispiele auszuwählen. Das Modell wird erst trainiert, wenn für alle Zustände Bilder vorhanden sind. Fahren Sie fort und verwenden Sie die Ansicht „Aktuelle Klassifizierungen“, um Bilder für die fehlenden Zustände zu klassifizieren. Trainieren Sie anschließend das Modell." + "title": "Beispiele für fehlende Klassen", + "description": "Nicht alle Klassen enthalten Beispiele. Versuchen Sie, neue Beispiele zu generieren, um die fehlende Klasse zu finden, oder fahren Sie fort und fügen Sie Bilder später über die Ansicht „Letzte Klassifizierungen“ hinzu." + }, + "refreshExamples": "Neue Beispiele erstellen", + "refreshConfirm": { + "title": "Neue Beispiele erstellen?", + "description": "Dadurch wird eine neue Reihe von Bildern generiert und alle Auswahlen, einschließlich aller bisherigen Klassen, werden gelöscht. Sie müssen für alle Klassen erneut Beispiele auswählen." } } }, - "none": "Keiner" + "none": "Keiner", + "reclassifyImageAs": "Bild neu klassifizieren als:", + "reclassifyImage": "Bild neu klassifizieren" } diff --git a/web/public/locales/de/views/events.json b/web/public/locales/de/views/events.json index 3a6629099..589a6e1a1 100644 --- a/web/public/locales/de/views/events.json +++ b/web/public/locales/de/views/events.json @@ -14,7 +14,9 @@ "description": "Überprüfungselemente können nur für eine Kamera erstellt werden, wenn Aufzeichnungen für diese Kamera aktiviert sind." } }, - "timeline": "Zeitleiste", + "timeline": { + "label": "Zeitleiste" + }, "timeline.aria": "Zeitleiste auswählen", "events": { "label": "Ereignisse", diff --git a/web/public/locales/de/views/explore.json b/web/public/locales/de/views/explore.json index 093001bb3..5ca822d74 100644 --- a/web/public/locales/de/views/explore.json +++ b/web/public/locales/de/views/explore.json @@ -82,7 +82,8 @@ "attributes": "Klassifizierungsattribute", "title": { "label": "Titel" - } + }, + "scoreInfo": "Punkte Info" }, "documentTitle": "Erkunde - Frigate", "generativeAI": "Generative KI", @@ -225,12 +226,18 @@ "debugReplay": { "label": "Debug-Wiedergabe", "aria": "Dieses verfolgte Objekt in der Debug-Wiedergabeansicht anzeigen" + }, + "more": { + "aria": "mehr" } }, "dialog": { "confirmDelete": { "title": "Löschen bestätigen", "desc": "Beim Löschen dieses verfolgten Objekts werden der Schnappschuss, alle gespeicherten Einbettungen und alle zugehörigen Verfolgungsdetails entfernt. Aufgezeichnetes Filmmaterial dieses verfolgten Objekts in der Verlaufsansicht wird NICHT gelöscht.

Sind Sie sicher, dass Sie fortfahren möchten?" + }, + "toast": { + "error": "Fehler beim Löschen dieses verfolgten Objekts: {{errorMessage}}" } }, "searchResult": { diff --git a/web/public/locales/de/views/exports.json b/web/public/locales/de/views/exports.json index 0d2ea5c2b..26d3eae16 100644 --- a/web/public/locales/de/views/exports.json +++ b/web/public/locales/de/views/exports.json @@ -1,5 +1,7 @@ { - "deleteExport": "Export löschen", + "deleteExport": { + "label": "Export löschen" + }, "editExport": { "title": "Export umbenennen", "desc": "Gib einen neuen Namen für diesen Export an.", diff --git a/web/public/locales/de/views/faceLibrary.json b/web/public/locales/de/views/faceLibrary.json index f99cfa792..d9269fd0e 100644 --- a/web/public/locales/de/views/faceLibrary.json +++ b/web/public/locales/de/views/faceLibrary.json @@ -67,7 +67,8 @@ "addFaceLibrary": "{{name}} wurde erfolgreich in die Gesichtsbibliothek aufgenommen!", "trainedFace": "Gesicht erfolgreich trainiert.", "updatedFaceScore": "Gesichtsbewertung erfolgreich auf {{name}} ({{score}}) aktualisiert.", - "renamedFace": "Gesicht erfolgreich in {{name}} umbenannt" + "renamedFace": "Gesicht erfolgreich in {{name}} umbenannt", + "reclassifiedFace": "Gesicht erfolgreich neu klassifiziert." }, "error": { "deleteFaceFailed": "Das Löschen ist fehlgeschlagen: {{errorMessage}}", @@ -76,7 +77,8 @@ "trainFailed": "Ausbildung fehlgeschlagen: {{errorMessage}}", "updateFaceScoreFailed": "Aktualisierung der Gesichtsbewertung fehlgeschlagen: {{errorMessage}}", "deleteNameFailed": "Name kann nicht gelöscht werden: {{errorMessage}}", - "renameFaceFailed": "Gesicht konnte nicht umbenannt werden: {{errorMessage}}" + "renameFaceFailed": "Gesicht konnte nicht umbenannt werden: {{errorMessage}}", + "reclassifyFailed": "Die Gesichtsbewertung ist fehlgeschlagen: {{errorMessage}}" } }, "steps": { @@ -98,5 +100,7 @@ "desc_other": "Bist du sicher, dass du {{count}} Gesichter löschen möchtest? Diese Aktion kann nicht rückgängig gemacht werden." }, "nofaces": "Keine Gesichter verfügbar", - "pixels": "{{area}}px" + "pixels": "{{area}}px", + "reclassifyFaceAs": "Gesicht neu klassifizieren als:", + "reclassifyFace": "Gesicht neu klassifizieren" } diff --git a/web/public/locales/de/views/live.json b/web/public/locales/de/views/live.json index eec43861a..854886b36 100644 --- a/web/public/locales/de/views/live.json +++ b/web/public/locales/de/views/live.json @@ -13,7 +13,8 @@ "clickMove": { "disable": "Bewegen per Klick deaktivieren", "enable": "Bewegen per Klick aktivieren", - "label": "Zum Zentrieren der Kamera ins Bild klicken" + "label": "Zum Zentrieren der Kamera ins Bild klicken", + "enableWithZoom": "Ermögliche Bewegung durch auswählen / Vergrößern durch ziehen" }, "up": { "label": "PTZ-Kamera nach oben bewegen" @@ -51,7 +52,9 @@ } } }, - "documentTitle": "Live - Frigate", + "documentTitle": { + "default": "Live - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Live - Frigate", "muteCameras": { "disable": "Stumm aller Kameras aufheben", diff --git a/web/public/locales/de/views/settings.json b/web/public/locales/de/views/settings.json index 1e4c32d75..81606f16e 100644 --- a/web/public/locales/de/views/settings.json +++ b/web/public/locales/de/views/settings.json @@ -5,7 +5,7 @@ "camera": "Kameraeinstellungen - Frigate", "masksAndZones": "Masken- und Zoneneditor – Frigate", "object": "Debug - Frigate", - "general": "Profil-Einstellungen - Frigate", + "general": "Benutzeroberflächen-Einstellungen - Frigate", "frigatePlus": "Frigate+ Einstellungen – Frigate", "classification": "Klassifizierungseinstellungen – Frigate", "motionTuner": "Bewegungserkennungs-Optimierer – Frigate", @@ -15,7 +15,8 @@ "cameraReview": "Kameraeinstellungen prüfen - Frigate", "globalConfig": "Grundeinstellungen - Frigate", "cameraConfig": "Kameraeinstellungen - Frigate", - "maintenance": "Wartung - Frigate" + "maintenance": "Wartung - Frigate", + "profiles": "Profile - Frigate" }, "menu": { "ui": "Benutzeroberfläche", @@ -87,7 +88,11 @@ "cameraTimestampStyle": "Zeitstempel Stil", "cameraMqtt": "Kamera mqtt", "mediaSync": "Medien-Synchronisierung", - "regionGrid": "Regionsraster" + "regionGrid": "Regionsraster", + "uiSettings": "Benutzeroberfläche Einstellung", + "profiles": "Profile", + "systemGo2rtcStreams": "go2rtc-streams", + "maintenance": "Wartung" }, "dialog": { "unsavedChanges": { @@ -100,7 +105,7 @@ "noCamera": "Keine Kamera" }, "general": { - "title": "Profileinstellungen", + "title": "Benutzeroberflächen Einstellungen", "liveDashboard": { "title": "Live Übersicht", "playAlertVideos": { @@ -346,6 +351,17 @@ "error": { "mustBeGreaterOrEqualTo": "Der Geschwindigkeitsschwellwert muss größer oder gleich 0,1 sein." } + }, + "id": { + "error": { + "mustNotBeEmpty": "Die ID darf nicht leer sein.", + "alreadyExists": "Für diese Kamera existiert bereits eine Maske mit dieser ID." + } + }, + "name": { + "error": { + "mustNotBeEmpty": "Das Feld „Name“ darf nicht leer sein." + } } }, "toast": { @@ -410,6 +426,10 @@ "loiteringTime": { "desc": "Legt eine Mindestzeit in Sekunden fest, die das Objekt in dem Bereich sein muss, damit es aktiviert wird. Standard: 0", "title": "Verweilzeit" + }, + "enabled": { + "title": "Aktiviert", + "description": "Ob diese Zone in der Konfigurationsdatei aktiv und aktiviert ist. Ist sie deaktiviert, kann sie nicht über MQTT aktiviert werden. Deaktivierte Zonen werden zur Laufzeit ignoriert." } }, "motionMasks": { @@ -438,7 +458,13 @@ }, "point_one": "{{count}} Punkt", "point_other": "{{count}} Punkte", - "label": "Bewegungsmaske" + "label": "Bewegungsmaske", + "defaultName": "Bewegungsmaske {{number}}", + "name": { + "title": "Name", + "description": "Ein optionaler beschreibender Name für diese Bewegungsmaske.", + "placeholder": "Gib einen Namen ein..." + } }, "restart_required": "Neustart erforderlich (Maske/Zone hat sich geändert)", "objectMasks": { @@ -464,10 +490,24 @@ "title": "Objekte", "desc": "Der Objekttyp, für den diese Objektmaske gilt.", "allObjectTypes": "Alle Objekttypen" + }, + "name": { + "title": "Name", + "description": "Ein optionaler beschreibender Name für diese Objektmaske.", + "placeholder": "Gib einen Namen ein..." } }, "motionMaskLabel": "Bewegungsmaske {{number}}", - "objectMaskLabel": "Objektmaske {{number}}" + "objectMaskLabel": "Objektmaske {{number}}", + "disabledInConfig": "Der Eintrag ist in der Konfigurationsdatei deaktiviert", + "profileBase": "(Base)", + "profileOverride": "(Überschreiben)", + "masks": { + "enabled": { + "title": "Aktiviert", + "description": "Ob diese Maske in der Konfigurationsdatei aktiviert ist. Ist sie deaktiviert, kann sie nicht über MQTT aktiviert werden. Deaktivierte Masken werden zur Laufzeit ignoriert." + } + } }, "debug": { "objectShapeFilterDrawing": { @@ -733,8 +773,8 @@ "plusLink": "Lese mehr zu Frigate+" }, "snapshotConfig": { - "desc": "Für die Übermittlung an Frigate+ muss in der Konfiguration sowohl Snapshots als auch clean_copy-Snapshots aktiviert sein.", - "cleanCopyWarning": "Einige Kameras haben Snapshots aktiviert aber clean copy deaktiviert. Aktiviere clean_copy in der Snapshot Konfiguration um Bilder an Frigate+ zu senden.", + "desc": "Für die Übermittlung an Frigate+ müssen Snapshots in Ihrer Konfiguration aktiviert sein.", + "cleanCopyWarning": "Bei einigen Kameras ist die Schnappschussfunktion deaktiviert", "documentation": "die Dokumentation lesen", "table": { "camera": "Kamera", @@ -765,14 +805,21 @@ "success": "Frigate+ Einstellungen wurden gespeichert. Starte Frigate neu um Änderungen anzuwenden." }, "restart_required": "Neustart erforderlich (Frigate+ Model geändert)", - "unsavedChanges": "Nicht gespeicherte Änderungen an den Frigate+-Einstellungen" + "unsavedChanges": "Nicht gespeicherte Änderungen an den Frigate+-Einstellungen", + "description": "Frigate+ ist ein Abonnementdienst, der Ihnen Zugriff auf zusätzliche Funktionen und Möglichkeiten für Ihre Frigate-Instanz bietet, darunter die Möglichkeit, benutzerdefinierte Objekterkennungsmodelle zu verwenden, die auf Ihren eigenen Daten trainiert wurden. Hier können Sie Ihre Frigate+-Modelleinstellungen verwalten.", + "cardTitles": { + "api": "API", + "currentModel": "Aktuelles Modell", + "otherModels": "Anderes Modell", + "configuration": "Konfiguration" + } }, "enrichments": { "birdClassification": { "title": "Vogelerkennung", "desc": "Die Vogelerkennung identifiziert Vögelarten mithilfe eines quantisierten Tensorflowmodells. Wenn eine Vogelart erkannt wird, wird ihr Name als sub_label hinzugefügt. Diese Informationen sind in der Benutzeroberfläche, in Filtern und in Benachrichtigungen enthalten." }, - "title": "Anreicherungseinstellungen", + "title": "Verfeinerungseinstellungen", "unsavedChanges": "Ungesicherte geänderte Verbesserungseinstellungen", "semanticSearch": { "reindexNow": { @@ -1293,7 +1340,12 @@ "backToSettings": "Zurück zu Kameraeinstellungen", "streams": { "title": "Kameras aktivieren / deaktivieren", - "desc": "Deaktiviere eine Kamera vorübergehend, bis Frigate neu gestartet wird. Deaktivierung einer Kamera stoppt die Verarbeitung der Streams dieser Kamera durch Frigate vollständig. Erkennung, Aufzeichnung und Debugging sind dann nicht mehr verfügbar.
Hinweis: Dies deaktiviert nicht die go2rtc restreams." + "desc": "Deaktiviere eine Kamera vorübergehend, bis Frigate neu gestartet wird. Deaktivierung einer Kamera stoppt die Verarbeitung der Streams dieser Kamera durch Frigate vollständig. Erkennung, Aufzeichnung und Debugging sind dann nicht mehr verfügbar.
Hinweis: Dies deaktiviert nicht die go2rtc restreams.", + "enableLabel": "Aktivierte Kameras", + "enableDesc": "Eine aktivierte Kamera vorübergehend deaktivieren, bis Frigate neu gestartet wird. Durch das Deaktivieren einer Kamera wird die Verarbeitung der Streams dieser Kamera durch Frigate vollständig unterbrochen. Erkennung, Aufzeichnung und Fehlerbehebung stehen dann nicht mehr zur Verfügung.
Hinweis: go2rtc-Restreams werden dadurch nicht deaktiviert.", + "disableLabel": "Deaktivierte Kameras", + "disableDesc": "Aktivieren Sie eine Kamera, die derzeit in der Benutzeroberfläche nicht sichtbar und in der Konfiguration deaktiviert ist. Nach der Aktivierung ist ein Neustart von Frigate erforderlich.", + "enableSuccess": "{{cameraName}} wurde in der Konfiguration aktiviert. Starte Frigate neu, um die Änderungen zu übernehmen." }, "cameraConfig": { "add": "Kamera hinzufügen", @@ -1335,6 +1387,14 @@ "confirmButton": "Dauerhalft löschen", "success": "Die Kamera {{cameraName}} wurde erfolgreich gelöscht", "error": "Das Löschen der Kamera {{cameraName}} ist fehlgeschlagen" + }, + "profiles": { + "title": "Profilkameraumschaltungen", + "selectLabel": "Profil auswählen", + "description": "Legen Sie fest, welche Kameras bei der Aktivierung eines Profils aktiviert oder deaktiviert werden sollen. Kameras, für die „Übernehmen“ eingestellt ist, behalten ihren ursprünglichen Aktivierungsstatus bei.", + "inherit": "Erben", + "enabled": "Aktiviert", + "disabled": "Deaktiviert" } }, "cameraReview": { @@ -1389,6 +1449,433 @@ "value": { "label": "Neuer Wert", "reset": "Zurücksetzen" + }, + "profile": { + "label": "Profil" + } + }, + "button": { + "overriddenGlobalTooltip": "Diese Kamera überschreibt globale Konfigurationseinstellungen in diesem Abschnitt", + "overriddenBaseConfig": "Überschrieben (Basiskonfiguration)", + "overriddenBaseConfigTooltip": "Das {{profile}}-Profil überschreibt Konfigurationseinstellungen in diesem Abschnitt", + "overriddenGlobal": "Überschrieben (Global)" + }, + "timestampPosition": { + "tl": "Oben links", + "tr": "Oben rechts", + "bl": "Unten links", + "br": "Unten rechts" + }, + "detectionModel": { + "plusActive": { + "title": "Verwaltung von Frigate+-Modellen", + "label": "Aktuelle Modellquelle", + "description": "Auf diesem Rechner läuft ein Frigate+-Modell. Wählen Sie Ihr Modell in den Frigate+-Einstellungen aus oder ändern Sie es.", + "goToFrigatePlus": "Zu den Frigate+-Einstellungen gehen", + "showModelForm": "Ein Modell manuell konfigurieren" + } + }, + "maintenance": { + "title": "Wartung", + "sync": { + "title": "Medien-Synchronisierung", + "desc": "Frigate bereinigt Medien regelmäßig nach einem festgelegten Zeitplan entsprechend Ihrer Konfiguration zur Aufbewahrungsdauer. Es ist normal, dass während der Ausführung von Frigate einige verwaiste Dateien angezeigt werden. Nutzen Sie diese Funktion, um verwaiste Mediendateien von der Festplatte zu entfernen, auf die in der Datenbank nicht mehr verwiesen wird.", + "started": "Die Mediensynchronisierung wurde gestartet.", + "alreadyRunning": "Ein Synchronisierungsauftrag wird bereits ausgeführt", + "error": "Die Synchronisierung konnte nicht gestartet werden", + "currentStatus": "Status", + "jobId": "Job ID", + "startTime": "Startzeit", + "endTime": "Endzeit", + "statusLabel": "Status", + "results": "Ergebnisse", + "errorLabel": "Fehler", + "mediaTypes": "Medientypen", + "allMedia": "Alle Medien", + "dryRun": "Probelauf", + "dryRunEnabled": "Es werden keine Dateien gelöscht", + "dryRunDisabled": "Die Dateien werden gelöscht", + "force": "Zwingen", + "forceDesc": "Die Sicherheitsschwelle umgehen und die Synchronisierung abschließen, selbst wenn mehr als 50 % der Dateien gelöscht würden.", + "verbose": "Ausführlich", + "verboseDesc": "Erstelle eine vollständige Liste der verwaisten Dateien auf der Festplatte zur Überprüfung.", + "running": "Synchronisierung läuft...", + "start": "Synchronisierung starten", + "inProgress": "Die Synchronisierung läuft. Diese Seite ist deaktiviert.", + "status": { + "queued": "In der Warteschlange", + "running": "läuft", + "completed": "Abgeschlossen", + "failed": "Fehlgeschlagen", + "notRunning": "läuft nicht" + }, + "resultsFields": { + "filesChecked": "Datein geprüft", + "orphansFound": "Datenleiche gefunden", + "orphansDeleted": "Datenleiche gelöscht", + "aborted": "Abgebrochen. Die Löschung würde den Sicherheitsgrenzwert überschreiten.", + "error": "Fehler", + "totals": "Total" + }, + "event_snapshots": "Momentaufnahmen von verfolgten Objekten", + "event_thumbnails": "Miniaturansichten der verfolgten Objekte", + "review_thumbnails": "Vorschau-Miniaturansichten", + "previews": "Vorschau", + "exports": "Exporte", + "recordings": "Aufnahmen" + }, + "regionGrid": { + "title": "Regionraster", + "desc": "Das Erfassungsraster ist ein optimiertes Modell, das lernt, wo Objekte unterschiedlicher Größe typischerweise im Sichtfeld der einzelnen Kameras auftreten. Frigate nutzt diese Daten, um die Größe der Erfassungsbereiche effizient anzupassen. Das Raster wird im Laufe der Zeit automatisch aus den Daten der verfolgten Objekte erstellt.", + "clear": "Regionsraster löschen", + "clearConfirmTitle": "Raster der Region löschen", + "clearConfirmDesc": "Es wird nicht empfohlen, das Regionsraster zu löschen, es sei denn, Sie haben kürzlich die Größe Ihres Detektormodells geändert oder die physische Position Ihrer Kamera angepasst und haben Probleme bei der Objektverfolgung. Das Raster wird im Laufe der Zeit automatisch neu aufgebaut, sobald Objekte verfolgt werden. Damit die Änderungen wirksam werden, ist ein Neustart von Frigate erforderlich.", + "clearSuccess": "Das Regionsraster wurde erfolgreich gelöscht", + "clearError": "Das Löschen des Regionsrasters ist fehlgeschlagen", + "restartRequired": "Ein Neustart ist erforderlich, damit die Änderungen am regionalen Netz wirksam werden" + } + }, + "configForm": { + "global": { + "title": "Globale Einstellung", + "description": "Diese Einstellungen gelten für alle Kameras, sofern sie nicht in den kameraspezifischen Einstellungen überschrieben werden." + }, + "camera": { + "title": "Kamera Einstellung", + "description": "Diese Einstellungen gelten nur für diese Kamera und haben Vorrang vor den allgemeinen Einstellungen.", + "noCameras": "Keine Kameras verfügbar" + }, + "advancedSettingsCount": "Erweiterte Einstellungen ({{count}})", + "advancedCount": "Fortgeschritten ({{count}})", + "showAdvanced": "Erweiterte Einstellungen anzeigen", + "tabs": { + "sharedDefaults": "Gemeinsame Standardeinstellungen", + "system": "System", + "integrations": "Integrationen" + }, + "additionalProperties": { + "keyLabel": "Schlüssel", + "valueLabel": "Wert", + "keyPlaceholder": "Neuer Schlüssel", + "remove": "Entfernen" + }, + "timezone": { + "defaultOption": "Zeitzone des Browsers verwenden" + }, + "roleMap": { + "empty": "Keine Rollenzuordnungen", + "roleLabel": "Rolle", + "groupsLabel": "Gruppe", + "addMapping": "Rollenzuordnung hinzufügen", + "remove": "Entfernen" + }, + "ffmpegArgs": { + "preset": "Voreinstellung", + "manual": "Manuelle Argumente", + "inherit": "Von den Kameraeinstellungen übernehmen", + "none": "Keine", + "useGlobalSetting": "Von der globalen Einstellung übernehmen", + "selectPreset": "Voreinstellung auswählen", + "manualPlaceholder": "FFmpeg-Argumente eingeben", + "presetLabels": { + "preset-rpi-64-h264": "Raspberry Pi (H.264)", + "preset-rpi-64-h265": "Raspberry Pi (H.265)", + "preset-vaapi": "VAAPI (Intel/AMD GPU)", + "preset-intel-qsv-h264": "Intel QuickSync (H.264)", + "preset-intel-qsv-h265": "Intel QuickSync (H.265)", + "preset-nvidia": "NVIDIA GPU", + "preset-jetson-h264": "NVIDIA Jetson (H.264)", + "preset-jetson-h265": "NVIDIA Jetson (H.265)", + "preset-rkmpp": "Rockchip RKMPP", + "preset-http-jpeg-generic": "HTTP JPEG (Generic)", + "preset-http-mjpeg-generic": "HTTP MJPEG (Generic)", + "preset-http-reolink": "HTTP - Reolink Cameras", + "preset-rtmp-generic": "RTMP (Generic)", + "preset-rtsp-generic": "RTSP (Generic)", + "preset-rtsp-restream": "RTSP - Restream von go2rtc", + "preset-rtsp-restream-low-latency": "FFmpeg-Argumente eingeben: RTSP – Neustreaming von go2rtc (geringe Latenz)", + "preset-rtsp-udp": "RTSP - UDP", + "preset-rtsp-blue-iris": "RTSP - Blue Iris", + "preset-record-generic": "Aufnahme (allgemein, ohne Ton)", + "preset-record-generic-audio-copy": "Aufnahme (Allgemein + Audio kopieren)", + "preset-record-generic-audio-aac": "Aufnahme (Allgemein + Audio in AAC)", + "preset-record-mjpeg": "Aufzeichnung – MJPEG-Kameras", + "preset-record-jpeg": "Aufnahme – JPEG-Kameras", + "preset-record-ubiquiti": "Aufzeichnung – Ubiquiti-Kameras" + } + }, + "cameraInputs": { + "itemTitle": "Stream {{index}}" + }, + "restartRequiredField": "Neustart erforderlich", + "restartRequiredFooter": "Konfiguration geändert – Neustart erforderlich", + "sections": { + "detect": "Erkennung", + "record": "Aufnahme", + "snapshots": "Schnappschüsse", + "motion": "Antrag", + "objects": "Objekte", + "review": "überprüfen", + "audio": "Audio", + "notifications": "Benachrichtigungen", + "live": "Live Ansicht", + "timestamp_style": "Zeitstempel", + "mqtt": "MQTT", + "database": "Datenbank", + "telemetry": "Telemetrie", + "auth": "Authentifizierung", + "tls": "TLS", + "proxy": "Proxy", + "go2rtc": "go2rtc", + "ffmpeg": "FFmpeg", + "detectors": "Detektoren", + "model": "Modell", + "semantic_search": "Semantische Suche", + "genai": "GenAI", + "face_recognition": "Gesichtserkennung", + "lpr": "Kennzeichenerkennung", + "birdseye": "Birdseye", + "masksAndZones": "Masken / Zonen" + }, + "detect": { + "title": "Erkennungseinstellungen" + }, + "detectors": { + "title": "Erkennungseinstellungen", + "singleType": "Es ist nur ein {{type}}-Detektor zulässig.", + "keyRequired": "Der Name des Detektors ist erforderlich.", + "keyDuplicate": "Der Name des Detektors ist bereits vorhanden.", + "noSchema": "Es sind keine Detektorschemata verfügbar.", + "none": "Es sind keine Detektorinstanzen konfiguriert.", + "add": "Detektor hinzufügen" + }, + "record": { + "title": "Aufnahmeeinstellungen" + }, + "snapshots": { + "title": "Einstellungen für Momentaufnahmen" + }, + "motion": { + "title": "Bewegungseinstellungen" + }, + "objects": { + "title": "Objekteinstellungen" + }, + "audioLabels": { + "summary": "{{count}} Audio-Labels ausgewählt", + "empty": "Es sind keine Audio-Bezeichnungen verfügbar" + }, + "objectLabels": { + "summary": "{{count}} Objekttypen ausgewählt", + "empty": "Es sind keine Objektbeschriftungen verfügbar" + }, + "reviewLabels": { + "summary": "{{count}} Etiketten ausgewählt", + "empty": "Keine Beschriftungen verfügbar", + "allNonAlertDetections": "Alle Aktivitäten, die keine Warnmeldungen auslösen, werden als Erkennungen erfasst." + }, + "filters": { + "objectFieldLabel": "{{field}} für {{label}}" + }, + "zoneNames": { + "summary": "{{count}} ausgewählt", + "empty": "Keine Zonen verfügbar" + }, + "inputRoles": { + "summary": "{{count}} Rollen ausgewählt", + "empty": "Es sind keine Rollen verfügbar", + "options": { + "detect": "Erkennen", + "record": "Aufnahme", + "audio": "Audio" + } + }, + "genaiRoles": { + "options": { + "embeddings": "Einbetten", + "vision": "Vision", + "tools": "Werkzeuge" + } + }, + "semanticSearchModel": { + "placeholder": "Modell auswählen…", + "builtIn": "Vorbereitete Modelle", + "genaiProviders": "GenAI Anbieter" + }, + "review": { + "title": "Einstellungen überprüfen" + }, + "audio": { + "title": "Audioeinstellungen" + }, + "notifications": { + "title": "Benachrichtigungseinstellungen" + }, + "live": { + "title": "Einstellungen für die Live-Ansicht" + }, + "timestamp_style": { + "title": "Einstellungen für Zeitstempel" + }, + "searchPlaceholder": "Suche...", + "addCustomLabel": "Benutzerdefiniertes Etikett hinzufügen..." + }, + "globalConfig": { + "title": "Globale Konfiguration", + "description": "Konfigurieren Sie globale Einstellungen, die für alle Kameras gelten, sofern sie nicht überschrieben werden.", + "toast": { + "success": "Die globalen Einstellungen wurden erfolgreich gespeichert", + "error": "Das Speichern der globalen Einstellungen ist fehlgeschlagen", + "validationError": "Validierung fehlgeschlagen" + } + }, + "cameraConfig": { + "title": "Kamerakonfiguration", + "description": "Konfigurieren Sie die Einstellungen für einzelne Kameras. Diese Einstellungen haben Vorrang vor den globalen Standardeinstellungen.", + "overriddenBadge": "Überschrieben", + "resetToGlobal": "Auf globale Einstellungen zurücksetzen", + "toast": { + "success": "Die Kameraeinstellungen wurden erfolgreich gespeichert", + "error": "Das Speichern der Kameraeinstellungen ist fehlgeschlagen" + } + }, + "toast": { + "success": "Einstellungen erfolgreich gespeichert", + "applied": "Einstellungen wurden erfolgreich übernommen", + "successRestartRequired": "Die Einstellungen wurden erfolgreich gespeichert. Starte Frigate neu, um die Änderungen zu übernehmen.", + "error": "Das Speichern der Einstellungen ist fehlgeschlagen", + "validationError": "Validierung fehlgeschlagen: {{message}}", + "resetSuccess": "Auf globale Standardeinstellungen zurücksetzen", + "resetError": "Das Zurücksetzen der Einstellungen ist fehlgeschlagen", + "saveAllSuccess_one": "Der Abschnitt {{count}} wurde erfolgreich gespeichert.", + "saveAllSuccess_other": "Alle {{count}} Abschnitte wurden erfolgreich gespeichert.", + "saveAllPartial_one": "{{successCount}} von {{totalCount}} Abschnitt wurden gespeichert. {{failCount}} sind fehlgeschlagen.", + "saveAllPartial_other": "{{successCount}} von {{totalCount}} Abschnitten wurden gespeichert. {{failCount}} sind fehlgeschlagen.", + "saveAllFailure": "Es konnten nicht alle Abschnitte gespeichert werden." + }, + "profiles": { + "title": "Profile", + "activeProfile": "Aktive Profile", + "noActiveProfile": "Kein aktives Profil", + "active": "Aktiv", + "activated": "Profil „{{profile}}“ aktiviert", + "activateFailed": "Das Profil konnte nicht eingerichtet werden", + "deactivated": "Profil deaktiviert", + "noProfiles": "Es sind keine Profile definiert.", + "noOverrides": "Keine Überschreibungen", + "cameraCount_one": "{{count}} Kamera", + "cameraCount_other": "{{count}} Kameras", + "columnCamera": "Kamera", + "columnOverrides": "Profilüberschreibungen", + "baseConfig": "Basis Konfiguration", + "addProfile": "Profil hinzufügen", + "newProfile": "Neues Profil", + "profileNamePlaceholder": "z. B. „Scharf“, „Abwesend“, „Nachtmodus“", + "friendlyNameLabel": "Profilname", + "profileIdLabel": "Profile-ID", + "profileIdDescription": "Interne Kennung, die in der Konfiguration und in Automatisierungen verwendet wird", + "nameInvalid": "Es sind nur Kleinbuchstaben, Zahlen und Unterstriche zulässig", + "nameDuplicate": "Ein Profil mit diesem Namen existiert bereits", + "error": { + "mustBeAtLeastTwoCharacters": "Muss mindestens 2 Zeichen lang sein", + "mustNotContainPeriod": "Darf keine Punkte enthalten", + "alreadyExists": "Ein Profil mit dieser ID existiert bereits" + }, + "renameProfile": "Profil umbenennen", + "renameSuccess": "Profil in „{{profile}}“ umbenannt", + "deleteProfile": "Profil löschen", + "deleteProfileConfirm": "Profil „{{profile}}“ von allen Kameras löschen? Dieser Vorgang kann nicht rückgängig gemacht werden.", + "deleteSuccess": "Profil „{{profile}}“ gelöscht", + "createSuccess": "Profil „{{profile}}“ erstellt", + "removeOverride": "Profil-Überschreibung aufheben", + "deleteSection": "Abschnittsüberschreibungen löschen", + "deleteSectionConfirm": "Die Überschreibungen von {{section}} für das Profil {{profile}} auf {{camera}} entfernen?", + "deleteSectionSuccess": "Die Überschreibungen von {{section}} für {{profile}} wurden entfernt", + "enableSwitch": "Profile aktivieren", + "enabledDescription": "Profile sind aktiviert. Erstellen Sie unten ein neues Profil, navigieren Sie zum Abschnitt „Kamera-Konfiguration“, um Ihre Änderungen vorzunehmen, und speichern Sie diese, damit sie wirksam werden.", + "disabledDescription": "Mit Profilen können Sie benannte Gruppen von Kamera-Konfigurationsänderungen (z. B. „aktiviert“, „abwesend“, „Nacht“) definieren, die bei Bedarf aktiviert werden können." + }, + "unsavedChanges": "Sie haben noch nicht gespeicherte Änderungen", + "confirmReset": "Zurücksetzen bestätigen", + "resetToDefaultDescription": "Dadurch werden alle Einstellungen in diesem Abschnitt auf ihre Standardwerte zurückgesetzt. Dieser Vorgang kann nicht rückgängig gemacht werden.", + "resetToGlobalDescription": "Dadurch werden die Einstellungen in diesem Abschnitt auf die globalen Standardwerte zurückgesetzt. Dieser Vorgang kann nicht rückgängig gemacht werden.", + "go2rtcStreams": { + "title": "go2rtc-Streams", + "description": "Verwalten Sie die go2rtc-Stream-Konfigurationen für das Restreaming von Kamerabildern. Jeder Stream verfügt über einen Namen und eine oder mehrere Quell-URLs.", + "addStream": "Stream hinzufügen", + "addStreamDesc": "Geben Sie einen Namen für den neuen Stream ein. Dieser Name wird verwendet, um in Ihrer Kamerakonfiguration auf den Stream zu verweisen.", + "addUrl": "URL hinzufügen", + "streamName": "Stream-Name", + "streamNamePlaceholder": "z.B., Vordertür", + "streamUrlPlaceholder": "z.B., rtsp://user:pass@192.168.1.100/stream", + "deleteStream": "Stream löschen", + "deleteStreamConfirm": "Möchten Sie den Stream „{{streamName}}“ wirklich löschen? Kameras, die auf diesen Stream verweisen, funktionieren möglicherweise nicht mehr.", + "noStreams": "Es sind keine go2rtc-Streams konfiguriert. Füge einen Stream hinzu, um loszulegen.", + "validation": { + "nameRequired": "Der Name des Streams ist erforderlich", + "nameDuplicate": "Ein Stream mit diesem Namen existiert bereits", + "nameInvalid": "Der Name des Streams darf nur Buchstaben, Zahlen, Unterstriche und Bindestriche enthalten", + "urlRequired": "Es ist mindestens eine URL erforderlich" + }, + "renameStream": "Stream umbenennen", + "renameStreamDesc": "Geben Sie einen neuen Namen für diesen Stream ein. Das Umbenennen eines Streams kann dazu führen, dass Kameras oder andere Streams, die namentlich darauf verweisen, nicht mehr funktionieren.", + "newStreamName": "Neuer Stream-Name", + "ffmpeg": { + "useFfmpegModule": "Kompatibilitätsmodus verwenden (ffmpeg)", + "video": "Video", + "audio": "Audio", + "hardware": "Hardwarebeschleunigung", + "videoCopy": "Kopieren", + "videoH264": "Transcode zu H.264", + "videoH265": "Transcode zu H.265", + "videoExclude": "Ausschließen", + "audioCopy": "Kopieren", + "audioAac": "Transcode zu AAC", + "audioOpus": "Transcode zu Opus", + "audioPcmu": "Transcode zu PCM μ-law", + "audioPcma": "Transcode zu PCM A-law", + "audioPcm": "Transcode zu PCM", + "audioMp3": "Transcode zu MP3", + "audioExclude": "Ausschließen", + "hardwareNone": "Keine Hardwarebeschleunigung", + "hardwareAuto": "Automatische Hardwarebeschleunigung" + } + }, + "onvif": { + "profileAuto": "Auto", + "profileLoading": "Profile werden geladen..." + }, + "configMessages": { + "review": { + "recordDisabled": "Aufnahme ist deaktiviert, Überprüfungspunkt konnte nicht erstellt werden.", + "detectDisabled": "Die Objekterkennung ist deaktiviert. Für die Überprüfung von Elementen müssen Objekte erkannt werden, um Warnmeldungen und Erkennungen zu kategorisieren.", + "allNonAlertDetections": "Alle Aktivitäten, die keine Warnmeldungen auslösen, werden als Erkennungen erfasst." + }, + "audio": { + "noAudioRole": "Für keinen Stream ist die Audio-Rolle definiert. Sie müssen die Audio-Rolle aktivieren, damit die Audioerkennung funktioniert." + }, + "audioTranscription": { + "audioDetectionDisabled": "Die Audioerkennung ist für diese Kamera nicht aktiviert. Für die Audio-Transkription muss die Audioerkennung aktiviert sein." + }, + "detect": { + "fpsGreaterThanFive": "Es wird nicht empfohlen, den Wert für die FPS-Erkennung auf mehr als 5 einzustellen." + }, + "faceRecognition": { + "globalDisabled": "Die Gesichtserkennung ist auf globaler Ebene nicht aktiviert. Aktivieren Sie sie in den globalen Einstellungen, damit die Gesichtserkennung auf Kameraebene funktioniert.", + "personNotTracked": "Für die Gesichtserkennung muss das Objekt „person“ verfolgt werden. Stellen Sie sicher, dass „person“ in der Objektverfolgungsliste enthalten ist." + }, + "lpr": { + "globalDisabled": "Die Kennzeichenerkennung ist auf globaler Ebene nicht aktiviert. Aktivieren Sie sie in den globalen Einstellungen, damit die Kennzeichenerkennung auf Kameraebene funktioniert.", + "vehicleNotTracked": "Für die Kennzeichenerkennung muss entweder ein „Pkw“ oder ein „Motorrad“ erfasst werden." + }, + "record": { + "noRecordRole": "Für keinen Stream ist die Rolle „Record“ definiert. Die Aufzeichnung funktioniert nicht." + }, + "birdseye": { + "objectsModeDetectDisabled": "Birdseye ist auf den Modus „Objekte“ eingestellt, doch die Objekterkennung ist für diese Kamera deaktiviert. Die Kamera wird in Birdseye nicht angezeigt." + }, + "snapshots": { + "detectDisabled": "Die Objekterkennung ist deaktiviert. Es werden keine Momentaufnahmen von verfolgten Objekten erstellt." } } } diff --git a/web/public/locales/de/views/system.json b/web/public/locales/de/views/system.json index 050b77f5f..3b41b03b7 100644 --- a/web/public/locales/de/views/system.json +++ b/web/public/locales/de/views/system.json @@ -38,7 +38,8 @@ "description": "Dies ist ein bekannter Fehler in den GPU-Statistik-Tools von Intel (intel_gpu_top), bei dem das Tool ausfällt und wiederholt eine GPU-Auslastung von 0 % anzeigt, selbst wenn die Hardwarebeschleunigung und die Objekterkennung auf der (i)GPU korrekt funktionieren. Dies ist kein Fehler von Frigate. Du kannst den Host neu starten, um das Problem vorübergehend zu beheben und zu prüfen, ob die GPU korrekt funktioniert. Dies hat keine Auswirkungen auf die Leistung." }, "gpuTemperature": "GPU Temperatur", - "npuTemperature": "NPU Temperatur" + "npuTemperature": "NPU Temperatur", + "gpuCompute": "GPU Compute / Encode" }, "title": "Allgemein", "detector": { @@ -119,9 +120,11 @@ "empty": "Noch keine Nachrichten erfasst", "count": "{{count}} Nachrichten", "expanded": { - "payload": "Nutzlast" + "payload": "Nutzinhalt" }, - "resume": "fortsetzen" + "resume": "fortsetzen", + "count_one": "{{count}} Nachrichten", + "count_other": "{{count}} Nachrichten" } }, "metrics": "Systemmetriken", @@ -187,7 +190,8 @@ "cameraDetect": "{{camName}} Erkennung", "cameraFramesPerSecond": "{{camName}} Bilder pro Sekunde", "cameraDetectionsPerSecond": "{{camName}} Erkennungen pro Sekunde", - "cameraSkippedDetectionsPerSecond": "{{camName}} übersprungene Erkennungen pro Sekunde" + "cameraSkippedDetectionsPerSecond": "{{camName}} übersprungene Erkennungen pro Sekunde", + "cameraGpu": "{{camName}} GPU" }, "title": "Kameras", "framesAndDetections": "Bilder / Erkennungen", diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index ebe775504..1b524c347 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -529,7 +529,7 @@ }, "detections": { "label": "Detections config", - "description": "Settings for creating detection events (non-alert) and how long to keep them.", + "description": "Settings for which tracked objects generate detections (non-alert) and how detections are retained.", "enabled": { "label": "Enable detections", "description": "Enable or disable detection events for this camera." @@ -787,6 +787,10 @@ "label": "Disable TLS verify", "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." }, + "profile": { + "label": "ONVIF profile", + "description": "Specific ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically." + }, "autotracking": { "label": "Autotracking", "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", diff --git a/web/public/locales/en/config/global.json b/web/public/locales/en/config/global.json index 8587ec263..69c77fad1 100644 --- a/web/public/locales/en/config/global.json +++ b/web/public/locales/en/config/global.json @@ -293,7 +293,7 @@ "label": "Detector specific model configuration", "description": "Detector-specific model configuration options (path, input size, etc.).", "path": { - "label": "Custom Object detection model path", + "label": "Custom object detector model path", "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." }, "labelmap_path": { @@ -466,7 +466,7 @@ "label": "Detection model", "description": "Settings to configure a custom object detection model and its input shape.", "path": { - "label": "Custom Object detection model path", + "label": "Custom object detector model path", "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." }, "labelmap_path": { @@ -527,7 +527,7 @@ }, "roles": { "label": "Roles", - "description": "GenAI roles (tools, vision, embeddings); one provider per role." + "description": "GenAI roles (chat, descriptions, embeddings); one provider per role." }, "provider_options": { "label": "Provider options", @@ -752,7 +752,7 @@ }, "live": { "label": "Live playback", - "description": "Settings used by the Web UI to control live stream resolution and quality.", + "description": "Settings to control the jsmpeg live stream resolution and quality. This does not affect restreamed cameras that use go2rtc for live view.", "streams": { "label": "Live stream names", "description": "Mapping of configured stream names to restream/go2rtc names used for live playback." @@ -1044,7 +1044,7 @@ }, "detections": { "label": "Detections config", - "description": "Settings for creating detection events (non-alert) and how long to keep them.", + "description": "Settings for which tracked objects generate detections (non-alert) and how detections are retained.", "enabled": { "label": "Enable detections", "description": "Enable or disable detection events for all cameras; can be overridden per-camera." @@ -1536,6 +1536,10 @@ "label": "Disable TLS verify", "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." }, + "profile": { + "label": "ONVIF profile", + "description": "Specific ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically." + }, "autotracking": { "label": "Autotracking", "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", diff --git a/web/public/locales/en/views/chat.json b/web/public/locales/en/views/chat.json index ec9e65e6e..ca0520d88 100644 --- a/web/public/locales/en/views/chat.json +++ b/web/public/locales/en/views/chat.json @@ -1,4 +1,5 @@ { + "documentTitle": "Chat - Frigate", "title": "Frigate Chat", "subtitle": "Your AI assistant for camera management and insights", "placeholder": "Ask anything...", @@ -15,10 +16,14 @@ "suggested_requests": "Try asking:", "starting_requests": { "show_recent_events": "Show recent events", - "show_camera_status": "Show camera status" + "show_camera_status": "Show camera status", + "recap": "What happened while I was away?", + "watch_camera": "Watch a camera for activity" }, "starting_requests_prompts": { "show_recent_events": "Show me the recent events from the last hour", - "show_camera_status": "What is the current status of my cameras?" + "show_camera_status": "What is the current status of my cameras?", + "recap": "What happened while I was away?", + "watch_camera": "Watch the front door and let me know if anyone shows up" } } diff --git a/web/public/locales/en/views/live.json b/web/public/locales/en/views/live.json index 878470187..37e6b15db 100644 --- a/web/public/locales/en/views/live.json +++ b/web/public/locales/en/views/live.json @@ -17,6 +17,7 @@ "clickMove": { "label": "Click in the frame to center the camera", "enable": "Enable click to move", + "enableWithZoom": "Enable click to move / drag to zoom", "disable": "Disable click to move" }, "left": { diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 42de28d52..a151e9ca9 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -539,6 +539,7 @@ }, "restart_required": "Restart required (masks/zones changed)", "disabledInConfig": "Item is disabled in the config file", + "addDisabledProfile": "Add to the base config first, then override in the profile", "profileBase": "(base)", "profileOverride": "(override)", "toast": { @@ -613,6 +614,10 @@ "desc": "Are you sure you want to delete the {{type}} {{name}}?", "success": "{{name}} has been deleted." }, + "revertOverride": { + "title": "Revert to Base Config", + "desc": "This will remove the profile override for the {{type}} {{name}} and revert to the base configuration." + }, "error": { "mustBeFinished": "Polygon drawing must be finished before saving." } @@ -825,6 +830,12 @@ "area": "Area" } }, + "timestampPosition": { + "tl": "Top left", + "tr": "Top right", + "bl": "Bottom left", + "br": "Bottom right" + }, "users": { "title": "Users", "management": { @@ -1342,7 +1353,22 @@ "preset-nvidia": "NVIDIA GPU", "preset-jetson-h264": "NVIDIA Jetson (H.264)", "preset-jetson-h265": "NVIDIA Jetson (H.265)", - "preset-rkmpp": "Rockchip RKMPP" + "preset-rkmpp": "Rockchip RKMPP", + "preset-http-jpeg-generic": "HTTP JPEG (Generic)", + "preset-http-mjpeg-generic": "HTTP MJPEG (Generic)", + "preset-http-reolink": "HTTP - Reolink Cameras", + "preset-rtmp-generic": "RTMP (Generic)", + "preset-rtsp-generic": "RTSP (Generic)", + "preset-rtsp-restream": "RTSP - Restream from go2rtc", + "preset-rtsp-restream-low-latency": "RTSP - Restream from go2rtc (Low Latency)", + "preset-rtsp-udp": "RTSP - UDP", + "preset-rtsp-blue-iris": "RTSP - Blue Iris", + "preset-record-generic": "Record (Generic, no audio)", + "preset-record-generic-audio-copy": "Record (Generic + Copy Audio)", + "preset-record-generic-audio-aac": "Record (Generic + Audio to AAC)", + "preset-record-mjpeg": "Record - MJPEG Cameras", + "preset-record-jpeg": "Record - JPEG Cameras", + "preset-record-ubiquiti": "Record - Ubiquiti Cameras" } }, "cameraInputs": { @@ -1388,7 +1414,8 @@ "keyDuplicate": "Detector name already exists.", "noSchema": "No detector schemas are available.", "none": "No detector instances configured.", - "add": "Add detector" + "add": "Add detector", + "addCustomKey": "Add custom key" }, "record": { "title": "Recording Settings" @@ -1410,6 +1437,10 @@ "summary": "{{count}} object types selected", "empty": "No object labels available" }, + "reviewLabels": { + "summary": "{{count}} labels selected", + "empty": "No labels available" + }, "filters": { "objectFieldLabel": "{{field}} for {{label}}" }, @@ -1453,7 +1484,13 @@ "timestamp_style": { "title": "Timestamp Settings" }, - "searchPlaceholder": "Search..." + "searchPlaceholder": "Search...", + "addCustomLabel": "Add custom label...", + "genaiModel": { + "placeholder": "Select model…", + "search": "Search models…", + "noModels": "No models available" + } }, "globalConfig": { "title": "Global Configuration", @@ -1500,6 +1537,8 @@ "noOverrides": "No overrides", "cameraCount_one": "{{count}} camera", "cameraCount_other": "{{count}} cameras", + "columnCamera": "Camera", + "columnOverrides": "Profile Overrides", "baseConfig": "Base Config", "addProfile": "Add Profile", "newProfile": "New Profile", @@ -1573,5 +1612,46 @@ "hardwareNone": "No hardware acceleration", "hardwareAuto": "Automatic hardware acceleration" } + }, + "onvif": { + "profileAuto": "Auto", + "profileLoading": "Loading profiles..." + }, + "configMessages": { + "review": { + "recordDisabled": "Recording is disabled, review items will not be generated.", + "detectDisabled": "Object detection is disabled. Review items require detected objects to categorize alerts and detections.", + "allNonAlertDetections": "All non-alert activity will be included as detections." + }, + "audio": { + "noAudioRole": "No streams have the audio role defined. You must enable the audio role for audio detection to function." + }, + "audioTranscription": { + "audioDetectionDisabled": "Audio detection is not enabled for this camera. Audio transcription requires audio detection to be active." + }, + "detect": { + "fpsGreaterThanFive": "Setting the detect FPS higher than 5 is not recommended." + }, + "faceRecognition": { + "globalDisabled": "Face recognition is not enabled at the global level. Enable it in global settings for camera-level face recognition to function.", + "personNotTracked": "Face recognition requires the 'person' object to be tracked. Ensure 'person' is in the object tracking list." + }, + "lpr": { + "globalDisabled": "License plate recognition is not enabled at the global level. Enable it in global settings for camera-level LPR to function.", + "vehicleNotTracked": "License plate recognition requires 'car' or 'motorcycle' to be tracked." + }, + "record": { + "noRecordRole": "No streams have the record role defined. Recording will not function." + }, + "birdseye": { + "objectsModeDetectDisabled": "Birdseye is set to 'objects' mode, but object detection is disabled for this camera. The camera will not appear in Birdseye." + }, + "snapshots": { + "detectDisabled": "Object detection is disabled. Snapshots are generated from tracked objects and will not be created." + }, + "detectors": { + "mixedTypes": "All detectors must use the same type. Remove existing detectors to use a different type.", + "mixedTypesSuggestion": "All detectors must use the same type. Remove existing detectors or select {{type}}." + } } } diff --git a/web/public/locales/en/views/system.json b/web/public/locales/en/views/system.json index 0e3d6a35e..6c3f37f71 100644 --- a/web/public/locales/en/views/system.json +++ b/web/public/locales/en/views/system.json @@ -78,6 +78,7 @@ "gpuUsage": "GPU Usage", "gpuMemory": "GPU Memory", "gpuEncoder": "GPU Encoder", + "gpuCompute": "GPU Compute / Encode", "gpuDecoder": "GPU Decoder", "gpuTemperature": "GPU Temperature", "gpuInfo": { @@ -188,6 +189,7 @@ "cameraFfmpeg": "{{camName}} FFmpeg", "cameraCapture": "{{camName}} capture", "cameraDetect": "{{camName}} detect", + "cameraGpu": "{{camName}} GPU", "cameraFramesPerSecond": "{{camName}} frames per second", "cameraDetectionsPerSecond": "{{camName}} detections per second", "cameraSkippedDetectionsPerSecond": "{{camName}} skipped detections per second" diff --git a/web/public/locales/es/components/camera.json b/web/public/locales/es/components/camera.json index 69605875e..05bca2742 100644 --- a/web/public/locales/es/components/camera.json +++ b/web/public/locales/es/components/camera.json @@ -82,6 +82,7 @@ "motion": "Movimiento", "regions": "Regiones", "boundingBox": "Caja delimitadora", - "mask": "Máscara" + "mask": "Máscara", + "paths": "Trayectorias" } } diff --git a/web/public/locales/es/config/cameras.json b/web/public/locales/es/config/cameras.json index 39dff6712..aeb608371 100644 --- a/web/public/locales/es/config/cameras.json +++ b/web/public/locales/es/config/cameras.json @@ -19,12 +19,88 @@ "description": "Cantidad de segundos sin el tipo de audio configurado antes de que finalice el evento de audio." }, "min_volume": { - "label": "Volumen mínimo" + "label": "Volumen mínimo", + "description": "Umbral mínimo de volumen RMS requerido para ejecutar la detección de audio; los valores más bajos aumentan la sensibilidad (p. ej., 200 alta, 500 media, 1000 baja)." + }, + "listen": { + "label": "Tipos de escucha", + "description": "Lista de tipos de eventos de audio a detectar (por ejemplo: ladrido, alarma de incendios, grito, voz, alarido)." + }, + "filters": { + "label": "Filtros de audio", + "description": "Ajustes de filtrado por tipo de audio, como umbrales de confianza utilizados para reducir los falsos positivos." + }, + "enabled_in_config": { + "description": "Indica si la detección de audio estaba habilitada originalmente en el archivo de configuración estática.", + "label": "Estado original del audio" + }, + "num_threads": { + "label": "Hilos de detección" } }, "friendly_name": { "label": "Nombre descriptivo", "description": "Nombre descriptivo de la cámara utilizado en la interfaz de usuario de Frigate" }, - "label": "Configuración de Cámara" + "label": "Configuración de Cámara", + "onvif": { + "profile": { + "label": "Perfil ONVIF" + } + }, + "zones": { + "distances": { + "label": "Distancias reales" + }, + "coordinates": { + "description": "Coordenadas del polígono que definen el área de la zona. Puede ser una cadena separada por comas o una lista de cadenas de coordenadas. Las coordenadas deben ser relativas (0-1) o absolutas (heredadas).", + "label": "Coordenadas" + }, + "filters": { + "raw_mask": { + "label": "Máscara en bruto" + }, + "mask": { + "description": "Coordenadas del polígono que definen dónde se aplica este filtro dentro del fotograma.", + "label": "Máscara de filtro" + }, + "min_score": { + "description": "Confianza mínima en un solo fotograma requerida para que el objeto sea contabilizado.", + "label": "Confianza mínima" + }, + "threshold": { + "description": "Umbral de confianza promedio requerido para que el objeto sea considerado un positivo real.", + "label": "Umbral de confianza" + }, + "max_ratio": { + "description": "Relación máxima de ancho/alto permitida para que el cuadro delimitador califique.", + "label": "Relación de aspecto máxima" + }, + "min_ratio": { + "description": "Relación mínima de ancho/alto requerida para que el cuadro delimitador califique.", + "label": "Relación de aspecto mínima" + }, + "max_area": { + "description": "Área máxima del cuadro delimitador (píxeles o porcentaje) permitida para este tipo de objeto. Puede expresarse en píxeles (entero) o como porcentaje (decimal entre 0,000001 y 0,99).", + "label": "Área máxima del objeto" + } + } + }, + "objects": { + "raw_mask": { + "label": "Máscara en bruto" + }, + "genai": { + "label": "Configuración de objetos GenAI", + "description": "Opciones de GenAI para describir objetos rastreados y enviar fotogramas para su generación.", + "enabled": { + "label": "Activar GenAI", + "description": "Activar por defecto la generación de descripciones de GenAI para los objetos rastreados." + }, + "use_snapshot": { + "label": "Usar instantáneas", + "description": "Usar instantáneas de objetos en lugar de miniaturas para la generación de descripciones de GenAI." + } + } + } } diff --git a/web/public/locales/es/config/global.json b/web/public/locales/es/config/global.json index c0940c717..53cdd0aa6 100644 --- a/web/public/locales/es/config/global.json +++ b/web/public/locales/es/config/global.json @@ -33,11 +33,80 @@ "description": "Cantidad de segundos sin el tipo de audio configurado antes de que finalice el evento de audio." }, "min_volume": { - "label": "Volumen mínimo" + "label": "Volumen mínimo", + "description": "Umbral mínimo de volumen RMS requerido para ejecutar la detección de audio; los valores más bajos aumentan la sensibilidad (p. ej., 200 alta, 500 media, 1000 baja)." + }, + "listen": { + "label": "Tipos de escucha", + "description": "Lista de tipos de eventos de audio a detectar (por ejemplo: ladrido, alarma de incendios, grito, voz, alarido)." + }, + "filters": { + "label": "Filtros de audio", + "description": "Ajustes de filtrado por tipo de audio, como umbrales de confianza utilizados para reducir los falsos positivos." + }, + "enabled_in_config": { + "description": "Indica si la detección de audio estaba habilitada originalmente en el archivo de configuración estática.", + "label": "Estado original del audio" + }, + "num_threads": { + "label": "Hilos de detección" } }, "auth": { "label": "Autenticación", - "description": "Configuración relacionada con la autenticación y la sesión, incluidas las opciones de cookies y límite de peticiones." + "description": "Configuración relacionada con la autenticación y la sesión, incluidas las opciones de cookies y límite de peticiones.", + "enabled": { + "label": "Activar autenticación", + "description": "Activar la autenticación nativa para la interfaz de Frigate." + }, + "reset_admin_password": { + "label": "Restablecer contraseña de administrador", + "description": "Si se activa, restablece la contraseña del administrador al iniciar y muestra la nueva contraseña en los registros." + }, + "cookie_name": { + "description": "Nombre de la cookie utilizada para almacenar el token JWT para la autenticación nativa.", + "label": "Nombre de la cookie JWT" + }, + "cookie_secure": { + "label": "Flag de cookie segura", + "description": "Establece el flag de seguridad en la cookie de autenticación; debe ser 'true' cuando se utilice TLS." + } + }, + "onvif": { + "profile": { + "label": "Perfil ONVIF" + } + }, + "objects": { + "raw_mask": { + "label": "Máscara en bruto" + }, + "genai": { + "label": "Configuración de objetos GenAI", + "description": "Opciones de GenAI para describir objetos rastreados y enviar fotogramas para su generación.", + "enabled": { + "label": "Activar GenAI", + "description": "Activar por defecto la generación de descripciones de GenAI para los objetos rastreados." + }, + "use_snapshot": { + "label": "Usar instantáneas", + "description": "Usar instantáneas de objetos en lugar de miniaturas para la generación de descripciones de GenAI." + } + } + }, + "detectors": { + "deepstack": { + "description": "Detector DeepStack/CodeProject.AI que envía imágenes a una API HTTP remota de DeepStack para la inferencia. No recomendado.", + "api_url": { + "description": "La URL de la API de DeepStack." + }, + "api_timeout": { + "label": "Tiempo de espera de la API de DeepStack (en segundos)", + "description": "Tiempo máximo permitido para una solicitud a la API de DeepStack." + }, + "api_key": { + "label": "Clave de API de DeepStack (si es necesaria)" + } + } } } diff --git a/web/public/locales/es/config/groups.json b/web/public/locales/es/config/groups.json index 4e09f2a51..d6b2b9d81 100644 --- a/web/public/locales/es/config/groups.json +++ b/web/public/locales/es/config/groups.json @@ -39,6 +39,26 @@ "global": { "resolution": "Resolución Global", "tracking": "Seguimiento Global" + }, + "cameras": { + "resolution": "Resolución", + "tracking": "Seguimiento" + } + }, + "objects": { + "global": { + "tracking": "Seguimiento global", + "filtering": "Filtrado global" + }, + "cameras": { + "filtering": "Filtrado", + "tracking": "Seguimiento" + } + }, + "record": { + "global": { + "retention": "Retención global", + "events": "Eventos globales" } } } diff --git a/web/public/locales/es/config/validation.json b/web/public/locales/es/config/validation.json index 3b595ea86..faf7032f8 100644 --- a/web/public/locales/es/config/validation.json +++ b/web/public/locales/es/config/validation.json @@ -12,5 +12,20 @@ "type": "Tipo de valor no válido", "enum": "Debe ser uno de los valores permitidos", "const": "El valor no coincide con la constante esperada", - "uniqueItems": "Todos los objetos deben ser únicos" + "uniqueItems": "Todos los objetos deben ser únicos", + "format": "Formato no válido", + "additionalProperties": "No se permite una propiedad desconocida", + "oneOf": "Debe coincidir exactamente con uno de los esquemas permitidos", + "ffmpeg": { + "inputs": { + "rolesUnique": "Cada rol solo puede asignarse a un flujo de entrada.", + "detectRequired": "Al menos un flujo de entrada debe tener asignado el rol 'detect'." + } + }, + "anyOf": "Debe coincidir con al menos uno de los esquemas permitidos", + "proxy": { + "header_map": { + "roleHeaderRequired": "Se requiere el encabezado de rol cuando hay mapeos de roles configurados." + } + } } diff --git a/web/public/locales/es/views/classificationModel.json b/web/public/locales/es/views/classificationModel.json index f70c69bf1..ee6fc5ed1 100644 --- a/web/public/locales/es/views/classificationModel.json +++ b/web/public/locales/es/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Clase Borrada", - "deletedImage": "Imágenes Borradas", + "deletedCategory_one": "Clase Borrada", + "deletedCategory_many": "", + "deletedCategory_other": "", + "deletedImage_one": "Imágenes Borradas", + "deletedImage_many": "", + "deletedImage_other": "", "deletedModel_one": "Borrado con éxito {{count}} modelo", "deletedModel_many": "Borrados con éxito {{count}} modelos", "deletedModel_other": "Borrados con éxito {{count}} modelos", @@ -21,7 +25,8 @@ "trainedModel": "Modelo entrenado correctamente.", "trainingModel": "Entrenamiento del modelo iniciado correctamente.", "updatedModel": "Configuración del modelo actualizada correctamente", - "renamedCategory": "Clase renombrada correctamente a {{name}}" + "renamedCategory": "Clase renombrada correctamente a {{name}}", + "reclassifiedImage": "Imagen reclasificada con éxito" }, "error": { "deleteImageFailed": "Fallo al borrar: {{errorMessage}}", @@ -31,7 +36,8 @@ "trainingFailed": "El entrenamiento del modelo ha fallado. Revisa los registros de Frigate para más detalles.", "updateModelFailed": "Fallo al actualizar modelo: {{errorMessage}}", "trainingFailedToStart": "No se pudo iniciar el entrenamiento del modelo: {{errorMessage}}", - "renameCategoryFailed": "Falló el renombrado de la clase: {{errorMessage}}" + "renameCategoryFailed": "Falló el renombrado de la clase: {{errorMessage}}", + "reclassifyFailed": "Error al reclasificar la imagen: {{errorMessage}}" } }, "deleteCategory": { @@ -144,7 +150,12 @@ }, "allImagesRequired_one": "Por favor clasifique todas las imágenes. Queda {{count}} imagen.", "allImagesRequired_many": "Por favor clasifique todas las imágenes. Quedan {{count}} imágenes.", - "allImagesRequired_other": "Por favor clasifique todas las imágenes. Quedan {{count}} imágenes." + "allImagesRequired_other": "Por favor clasifique todas las imágenes. Quedan {{count}} imágenes.", + "refreshConfirm": { + "description": "Esta acción generará un nuevo conjunto de imágenes y eliminará todas las selecciones, incluidas las clases anteriores. Deberás volver a seleccionar ejemplos para todas las clases.", + "title": "¿Generar nuevos ejemplos?" + }, + "refreshExamples": "Generar nuevos ejemplos" }, "title": "Crear nueva Clasificación" }, @@ -188,5 +199,7 @@ "description": "Cree un modelo personalizado para monitorear y clasificar los cambios de estado en áreas específicas de la cámara.", "buttonText": "Crear modelo de estado" } - } + }, + "reclassifyImage": "Reclasificar imagen", + "reclassifyImageAs": "Reclasificar imagen como:" } diff --git a/web/public/locales/es/views/events.json b/web/public/locales/es/views/events.json index d13daff60..f2bdab0e9 100644 --- a/web/public/locales/es/views/events.json +++ b/web/public/locales/es/views/events.json @@ -15,7 +15,9 @@ "description": "Solo se pueden crear elementos de revisión para una cámara cuando las grabaciones están habilitadas para esa cámara." } }, - "timeline": "Línea de tiempo", + "timeline": { + "label": "Línea de tiempo" + }, "timeline.aria": "Seleccionar línea de tiempo", "events": { "label": "Eventos", diff --git a/web/public/locales/es/views/explore.json b/web/public/locales/es/views/explore.json index f8f61ce83..ded5ca91f 100644 --- a/web/public/locales/es/views/explore.json +++ b/web/public/locales/es/views/explore.json @@ -112,7 +112,8 @@ "attributes": "Atributos de clasificación", "title": { "label": "Título" - } + }, + "scoreInfo": "Información de confianza" }, "documentTitle": "Explorar - Frigate", "trackedObjectDetails": "Detalles del objeto rastreado", @@ -222,12 +223,18 @@ }, "hideObjectDetails": { "label": "Ocultar la ruta del objeto" + }, + "more": { + "aria": "Más" } }, "dialog": { "confirmDelete": { "title": "Confirmar eliminación", "desc": "Al eliminar este objeto rastreado, se eliminan la instantánea, las incrustaciones guardadas y las entradas de detalles de seguimiento asociadas. Las grabaciones de este objeto rastreado en la vista Historial NO se eliminarán.

¿Seguro que desea continuar?" + }, + "toast": { + "error": "Error al eliminar este objeto rastreado: {{errorMessage}}" } }, "noTrackedObjects": "No se encontraron objetos rastreados", diff --git a/web/public/locales/es/views/exports.json b/web/public/locales/es/views/exports.json index 15be4164d..1099d45c8 100644 --- a/web/public/locales/es/views/exports.json +++ b/web/public/locales/es/views/exports.json @@ -2,7 +2,9 @@ "search": "Búsqueda", "documentTitle": "Exportar - Frigate", "noExports": "No se encontraron exportaciones", - "deleteExport": "Eliminar exportación", + "deleteExport": { + "label": "Eliminar exportación" + }, "editExport": { "desc": "Introduce un nuevo nombre para esta exportación.", "saveExport": "Guardar exportación", @@ -25,5 +27,12 @@ "headings": { "cases": "Casos", "uncategorizedExports": "Exportaciones sin categorizar" + }, + "caseDialog": { + "title": "Añadir al caso", + "newCaseOption": "Crear nuevo caso", + "nameLabel": "Nombre del caso", + "description": "Elige un caso existente o crea uno nuevo.", + "selectLabel": "Caso" } } diff --git a/web/public/locales/es/views/faceLibrary.json b/web/public/locales/es/views/faceLibrary.json index faca37408..f923082da 100644 --- a/web/public/locales/es/views/faceLibrary.json +++ b/web/public/locales/es/views/faceLibrary.json @@ -66,7 +66,8 @@ "deletedFace_many": "{{count}} rostros eliminados con éxito.", "deletedFace_other": "{{count}} rostros eliminados con éxito.", "uploadedImage": "Imagen subida con éxito.", - "renamedFace": "Rostro renombrado con éxito a {{name}}" + "renamedFace": "Rostro renombrado con éxito a {{name}}", + "reclassifiedFace": "Rostro reclasificado con éxito." }, "error": { "uploadingImageFailed": "No se pudo subir la imagen: {{errorMessage}}", @@ -75,7 +76,8 @@ "deleteNameFailed": "No se pudo eliminar el nombre: {{errorMessage}}", "trainFailed": "No se pudo entrenar: {{errorMessage}}", "updateFaceScoreFailed": "No se pudo actualizar la puntuación del rostro: {{errorMessage}}", - "renameFaceFailed": "No se pudo renombrar el rostro: {{errorMessage}}" + "renameFaceFailed": "No se pudo renombrar el rostro: {{errorMessage}}", + "reclassifyFailed": "Error al reclasificar el rostro: {{errorMessage}}" } }, "readTheDocs": "Leer la documentación", @@ -101,5 +103,7 @@ }, "collections": "Colecciones", "nofaces": "No hay rostros disponibles", - "pixels": "{{area}}px" + "pixels": "{{area}}px", + "reclassifyFace": "Reclasificar rostro", + "reclassifyFaceAs": "Reclasificar rostro como:" } diff --git a/web/public/locales/es/views/live.json b/web/public/locales/es/views/live.json index ce7c46ad5..fa473384a 100644 --- a/web/public/locales/es/views/live.json +++ b/web/public/locales/es/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Directo - Frigate", + "documentTitle": { + "default": "En vivo - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Directo - Frigate", "twoWayTalk": { "enable": "Habilitar conversación bidireccional", @@ -14,7 +16,8 @@ "clickMove": { "label": "Haz clic en el marco para centrar la cámara", "enable": "Habilitar clic para mover", - "disable": "Deshabilitar clic para mover" + "disable": "Deshabilitar clic para mover", + "enableWithZoom": "Activar clic para mover / arrastrar para hacer zoom" }, "up": { "label": "Mover la cámara PTZ hacia arriba" diff --git a/web/public/locales/es/views/settings.json b/web/public/locales/es/views/settings.json index 141abb957..c6157a750 100644 --- a/web/public/locales/es/views/settings.json +++ b/web/public/locales/es/views/settings.json @@ -7,7 +7,7 @@ "camera": "Configuración de cámara - Frigate", "motionTuner": "Ajuste de movimiento - Frigate", "classification": "Configuración de clasificación - Frigate", - "general": "Configuración de Interfaz de Usuario - Frigate", + "general": "Configuración de la interfaz - Frigate", "frigatePlus": "Configuración de Frigate+ - Frigate", "notifications": "Configuración de Notificaciones - Frigate", "enrichments": "Configuración de Análisis Avanzado - Frigate", @@ -15,7 +15,8 @@ "cameraReview": "Revisar Configuración de Cámaras - Frigate", "globalConfig": "Configuración Global - Frigate", "cameraConfig": "Configuración de Cámara - Frigate", - "maintenance": "Mantenimiento - Frigate" + "maintenance": "Mantenimiento - Frigate", + "profiles": "Perfiles - Frigate" }, "menu": { "cameras": "Configuración de Cámara", @@ -293,6 +294,11 @@ "error": { "mustBeGreaterOrEqualTo": "El umbral de velocidad debe ser mayor o igual a 0,1." } + }, + "name": { + "error": { + "mustNotBeEmpty": "El nombre no puede estar vacío." + } } }, "zones": { @@ -1234,5 +1240,25 @@ "success": "Se ha guardado la configuración de la clasificación de revisión. Reinicie Frigate para aplicar los cambios." } } + }, + "button": { + "overriddenGlobal": "Sobrescrito (Global)", + "overriddenBaseConfigTooltip": "El perfil {{profile}} sobrescribe los ajustes de configuración de esta sección", + "overriddenGlobalTooltip": "Esta cámara sobrescribe los ajustes de configuración global en esta sección", + "overriddenBaseConfig": "Sobrescrito (Configuración Base)" + }, + "onvif": { + "profileLoading": "Cargando perfiles..." + }, + "maintenance": { + "sync": { + "verboseDesc": "Escribe una lista completa de archivos huérfanos en el disco para su revisión.", + "verbose": "Detallado" + } + }, + "configForm": { + "camera": { + "noCameras": "No hay cámaras disponibles" + } } } diff --git a/web/public/locales/es/views/system.json b/web/public/locales/es/views/system.json index c2c3e39e8..6c211a77c 100644 --- a/web/public/locales/es/views/system.json +++ b/web/public/locales/es/views/system.json @@ -37,7 +37,18 @@ "label": "Mensajes", "pause": "Pausar", "resume": "Continuar", - "clear": "Limpiar" + "clear": "Limpiar", + "filter": { + "all": "Todos los temas", + "topics": "Temas", + "events": "Eventos", + "reviews": "Revisiones", + "face_recognition": "Reconocimiento facial", + "camera_activity": "Actividad de cámara", + "classification": "Clasificación" + }, + "count_other": "{{count}} mensajes", + "count_one": "{{count}} mensaje" } }, "title": "Sistema", diff --git a/web/public/locales/et/common.json b/web/public/locales/et/common.json index b1a715095..d066f8514 100644 --- a/web/public/locales/et/common.json +++ b/web/public/locales/et/common.json @@ -180,7 +180,8 @@ "faceLibrary": "Näoteek", "classification": "Klassifikatsioon", "chat": "Vestlus", - "actions": "Tegevused" + "actions": "Tegevused", + "profiles": "Profiilid" }, "unit": { "speed": { @@ -247,7 +248,8 @@ "resetToGlobal": "Lähtesta üldiseks väärtusteks", "savingAll": "Salvestan kõiki…", "undoAll": "Pööra kõik tegevused tagasi", - "applying": "Võtan kasutusele…" + "applying": "Võtan kasutusele…", + "retry": "Proovi uuesti" }, "label": { "back": "Mine tagasi", @@ -274,7 +276,8 @@ "error": { "title": "Seadistuste muudatuste salvestamine ei õnnestunud: {{errorMessage}}", "noMessage": "Seadistuste muudatuste salvestamine ei õnnestunud" - } + }, + "success": "Seadistuste muudatuste salvestamine õnnestus." } }, "role": { @@ -309,5 +312,7 @@ "readTheDocumentation": "Loe dokumentatsiooni ja juhendit", "information": { "pixels": "{{area}} px" - } + }, + "no_items": "Objekte pole", + "validation_errors": "Valideerimise vead" } diff --git a/web/public/locales/et/components/camera.json b/web/public/locales/et/components/camera.json index e5df620ec..5c467f81c 100644 --- a/web/public/locales/et/components/camera.json +++ b/web/public/locales/et/components/camera.json @@ -81,6 +81,7 @@ "zones": "Tsoonid", "mask": "Mask", "motion": "Liikumine", - "regions": "Alad" + "regions": "Alad", + "paths": "Asukohad" } } diff --git a/web/public/locales/et/config/cameras.json b/web/public/locales/et/config/cameras.json index 0967ef424..c2ff153fa 100644 --- a/web/public/locales/et/config/cameras.json +++ b/web/public/locales/et/config/cameras.json @@ -1 +1,6 @@ -{} +{ + "name": { + "label": "Kaamera nimi", + "description": "Kaamera nimi on nõutav" + } +} diff --git a/web/public/locales/et/objects.json b/web/public/locales/et/objects.json index 19830deaf..5cd7398b3 100644 --- a/web/public/locales/et/objects.json +++ b/web/public/locales/et/objects.json @@ -116,5 +116,10 @@ "nzpost": "NZPost-i sõiduk", "postnord": "PostNordi sõiduk", "gls": "GLS-i sõiduk", - "dpd": "DPD sõiduk" + "dpd": "DPD sõiduk", + "canada_post": "Canada Post", + "royal_mail": "Royal Mail", + "school_bus": "Koolibuss", + "skunk": "Vinukloom (skunk)", + "kangaroo": "Känguru" } diff --git a/web/public/locales/et/views/exports.json b/web/public/locales/et/views/exports.json index 667e5240f..ed9a37978 100644 --- a/web/public/locales/et/views/exports.json +++ b/web/public/locales/et/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "Eksport Frigate'ist", "search": "Otsi", "noExports": "Eksporditud sisu ei leidu", - "deleteExport": "Kustuta eksporditud sisu", + "deleteExport": { + "label": "Kustuta eksporditud sisu" + }, "deleteExport.desc": "Kas sa oled kindel et soovid „{{exportName}}“ kustutada?", "editExport": { "title": "Muuda eksporditud sisu nime", diff --git a/web/public/locales/et/views/live.json b/web/public/locales/et/views/live.json index 7a40fce90..891568c4d 100644 --- a/web/public/locales/et/views/live.json +++ b/web/public/locales/et/views/live.json @@ -133,7 +133,11 @@ "label": "Esita taustal", "desc": "Kasuta seda valikut, kui tahad voogedastuse jätkumist ka siis, kui pildivaade on peidetud." }, - "debugView": "Veaotsinguvaade" + "debugView": "Veaotsinguvaade", + "showStats": { + "label": "Näita statistikat", + "desc": "Selle eelistuse puhul näidatakse voogedastuse statistikat kaamerapildi peal." + } }, "noCameras": { "buttonText": "Lisa kaamera", diff --git a/web/public/locales/et/views/settings.json b/web/public/locales/et/views/settings.json index 6b262ab0a..a5b2c7670 100644 --- a/web/public/locales/et/views/settings.json +++ b/web/public/locales/et/views/settings.json @@ -329,7 +329,8 @@ "roles": "Rollid", "notifications": "Teavitused", "frigateplus": "Frigate+", - "cameraReview": "Ülevaatamine" + "cameraReview": "Ülevaatamine", + "profiles": "Profiilid" }, "dialog": { "unsavedChanges": { diff --git a/web/public/locales/fa/views/classificationModel.json b/web/public/locales/fa/views/classificationModel.json index 8fa0371a6..5bb59eaba 100644 --- a/web/public/locales/fa/views/classificationModel.json +++ b/web/public/locales/fa/views/classificationModel.json @@ -11,8 +11,10 @@ }, "toast": { "success": { - "deletedCategory": "کلاس حذف شده", - "deletedImage": "عکس های حذف شده", + "deletedCategory_one": "کلاس حذف شده", + "deletedCategory_other": "", + "deletedImage_one": "عکس های حذف شده", + "deletedImage_other": "", "categorizedImage": "تصویر طبقه بندی شده", "trainedModel": "مدل آموزش دیده شده.", "trainingModel": "آموزش دادن مدل با موفقیت شروع شد.", diff --git a/web/public/locales/fr/common.json b/web/public/locales/fr/common.json index ab8f4177b..2ba13dd18 100644 --- a/web/public/locales/fr/common.json +++ b/web/public/locales/fr/common.json @@ -102,7 +102,7 @@ "close": "Fermer", "copy": "Copier", "back": "Retour", - "history": "Chronologie", + "history": "Historique", "pictureInPicture": "Image dans l'image", "twoWayTalk": "Conversation bidirectionnelle", "off": "OFF", @@ -243,7 +243,8 @@ "uiPlayground": "Bac à sable de l'interface", "faceLibrary": "Bibliothèque de visages", "languages": "Langues", - "classification": "Classification" + "classification": "Classification", + "profiles": "Profils" }, "toast": { "save": { diff --git a/web/public/locales/fr/components/icons.json b/web/public/locales/fr/components/icons.json index fd5f1f8f6..f713f2f52 100644 --- a/web/public/locales/fr/components/icons.json +++ b/web/public/locales/fr/components/icons.json @@ -1,8 +1,8 @@ { "iconPicker": { "search": { - "placeholder": "Rechercher une icône" + "placeholder": "Rechercher une icône…" }, - "selectIcon": "Sélectionnez une icône." + "selectIcon": "Sélectionnez une icône" } } diff --git a/web/public/locales/fr/config/global.json b/web/public/locales/fr/config/global.json index 58cd06664..b3dd9d23f 100644 --- a/web/public/locales/fr/config/global.json +++ b/web/public/locales/fr/config/global.json @@ -16,11 +16,11 @@ "description": "Contrôle la verbosité par défaut des journaux et les exceptions de niveau par composant.", "default": { "label": "Niveau de journalisation", - "description": "Verbosité de l'ensemble des journaux par défaut (débogage, information, avertissement, erreur)" + "description": "Verbosité de l'ensemble des journaux par défaut (débogage, information, avertissement, erreur)." }, "logs": { "label": "Niveau de journalisation par processus", - "description": "Outrepasser le niveau de journalisation par composant pour augmenter ou diminuer la verbosité pour des modules spécifiques" + "description": "Personnaliser le niveau de journalisation par composant pour augmenter ou diminuer la verbosité pour des modules spécifiques." } }, "auth": { @@ -29,7 +29,7 @@ "label": "Activer l'authentification", "description": "Active l'authentification native de l'interface de Frigate." }, - "description": "Authentification et réglages en rapport avec la session incluant les options concernant les cookies et limite de taux.", + "description": "Paramètres d'authentification et de session, y compris les options relatives aux cookies et à la limitation du débit.", "reset_admin_password": { "label": "réinitialiser le mot de passe administrateur", "description": "Si vrai, réinitialise le mot de passe utilisateur administrateur au démarrage et écrit le nouveau mot de passe dans les journaux." @@ -48,10 +48,34 @@ }, "refresh_time": { "label": "Fenêtre de rafraichissement de session", - "description": "Lorsqu'une session est à moins de ce nombre de secondes de son expiration, actualisez là pour lui redonner sa durée complète." + "description": "Lorsqu'une session est à moins de ce nombre de secondes d'expirer, rétablissez-la à sa durée entière." }, "failed_login_rate_limit": { - "label": "Limite de connexions échouées" + "label": "Limite de connexions échouées", + "description": "Règles limitant la fréquence des tentatives ratées d'authentification afin de réduire les attaques de type \"brute-force\"." + }, + "trusted_proxies": { + "label": "Mandataire de confiance", + "description": "Liste des IP de mandataire de confiance quand il faut déterminer l'IP pour limiter le taux." + }, + "hash_iterations": { + "label": "Itérations de hachage", + "description": "Nombre d'itérations PBKDF2-SHA256 à utiliser quand les mots de passe utilisateur sont hachés." + }, + "roles": { + "label": "Correspondance des rôles", + "description": "Correspondance de rôles vers la liste des caméras. Une liste vide donne l'accès totale à toutes les caméras pour ce rôle." + }, + "admin_first_time_login": { + "label": "Drapeau admin première fois", + "description": "Si activé, l'interface peut afficher un lien d'aide sur la page d'identification des utilisateurs indiquant comment se connecter après une réinitialisation du mot de passe administrateur. " + } + }, + "database": { + "label": "Base de donnée", + "description": "Réglages concernant la base de donnée SQLite utilisé par Frigate pour stocker les objets suivis et enregistrer les métadonnées.", + "path": { + "label": "Chemin vers la base de donnée" } } } diff --git a/web/public/locales/fr/config/groups.json b/web/public/locales/fr/config/groups.json index 2178bea8a..2d1e6c039 100644 --- a/web/public/locales/fr/config/groups.json +++ b/web/public/locales/fr/config/groups.json @@ -1,7 +1,7 @@ { "audio": { "global": { - "detection": "Détection globale", + "detection": "Détection générale", "sensitivity": "Sensibilité globale" }, "cameras": { @@ -23,7 +23,51 @@ "algorithm": "Algorithme global" }, "cameras": { - "sensitivity": "Sensibilité" + "sensitivity": "Sensibilité", + "algorithm": "Algorithme" + } + }, + "snapshots": { + "global": { + "display": "Affichage Global" + }, + "cameras": { + "display": "Affichage" + } + }, + "detect": { + "global": { + "resolution": "Résolution globale", + "tracking": "Suivi global" + }, + "cameras": { + "resolution": "Résolution", + "tracking": "Suivi" + } + }, + "objects": { + "global": { + "tracking": "Suivi Global", + "filtering": "Filtrage Global" + }, + "cameras": { + "tracking": "Suivi", + "filtering": "Filtrage" + } + }, + "record": { + "global": { + "retention": "Rétention Globale", + "events": "Événements globaux" + }, + "cameras": { + "retention": "Rétention", + "events": "Événements" + } + }, + "ffmpeg": { + "cameras": { + "cameraFfmpeg": "Arguments FFmpeg spécifiques aux caméras" } } } diff --git a/web/public/locales/fr/config/validation.json b/web/public/locales/fr/config/validation.json index 64891267a..aa4acd887 100644 --- a/web/public/locales/fr/config/validation.json +++ b/web/public/locales/fr/config/validation.json @@ -1,5 +1,5 @@ { - "minimum": "Doit être au minimum {{limit}}", + "minimum": "Doit être au moins de {{limit}}", "maximum": "Ne doit pas dépasser {{limit}}", "exclusiveMinimum": "Doit être supérieur à {{limit}}", "exclusiveMaximum": "Doit être inférieur à {{limit}}", @@ -7,5 +7,26 @@ "maxLength": "Doit contenir au maximum {{limit}} caractère(s)", "minItems": "Doit contenir au moins {{limit}} élément(s)", "maxItems": "Doit contenir au maximum {{limit}} élément(s)", - "pattern": "Format incorrect" + "pattern": "Format incorrect", + "required": "Ce champ est requis", + "type": "Type de valeur incorrect", + "enum": "Doit être une des valeurs autorisées", + "const": "La valeur ne correspond pas à la constante attendu", + "uniqueItems": "Tous les éléments doivent être uniques", + "format": "Format invalide", + "additionalProperties": "Une propriété inconnue est interdite", + "oneOf": "Doit correspondre exactement à un des schémas autorisés", + "anyOf": "Doit correspondre à au moins un des schémas autorisés", + "proxy": { + "header_map": { + "roleHeaderRequired": "L'entête de rôle est nécessaire quand la cartographie des rôles est configurée." + } + }, + "ffmpeg": { + "inputs": { + "rolesUnique": "Chaque rôle ne peut être assigné qu'à un flux d'entrée.", + "detectRequired": "Au moins un flux d'entrée doit être assigné au rôle 'detect' (détection).", + "hwaccelDetectOnly": "Seulement le flux d'entrée avec le rôle de détection peut définir des arguments pour l'accélération matérielle." + } + } } diff --git a/web/public/locales/fr/views/classificationModel.json b/web/public/locales/fr/views/classificationModel.json index 0926f4cd6..df090b3cc 100644 --- a/web/public/locales/fr/views/classificationModel.json +++ b/web/public/locales/fr/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Classe supprimée", - "deletedImage": "Images supprimées", + "deletedCategory_one": "{{count}} classe supprimée", + "deletedCategory_many": "{{count}} classes supprimées", + "deletedCategory_other": "{{count}} classes supprimées", + "deletedImage_one": "{{count}} image supprimée", + "deletedImage_many": "{{count}} images supprimées", + "deletedImage_other": "{{count}} images supprimées", "categorizedImage": "Image classifiée avec succès", "trainedModel": "Modèle entraîné avec succès.", "trainingModel": "L'entraînement du modèle a démarré avec succès.", @@ -21,7 +25,8 @@ "deletedModel_many": "{{count}} modèles supprimés avec succès", "deletedModel_other": "{{count}} modèles supprimés avec succès", "updatedModel": "Configuration du modèle mise à jour avec succès", - "renamedCategory": "Classe renommée en {{name}} avec succès" + "renamedCategory": "Classe renommée en {{name}} avec succès", + "reclassifiedImage": "Image reclassifiée avec succès" }, "error": { "deleteImageFailed": "Échec de la suppression : {{errorMessage}}", @@ -31,7 +36,8 @@ "deleteModelFailed": "Impossible de supprimer le modèle : {{errorMessage}}", "updateModelFailed": "Impossible de mettre à jour le modèle : {{errorMessage}}", "renameCategoryFailed": "Impossible de renommer la classe : {{errorMessage}}", - "trainingFailedToStart": "Impossible de démarrer l'entraînement du modèle : {{errorMessage}}" + "trainingFailedToStart": "Impossible de démarrer l'entraînement du modèle : {{errorMessage}}", + "reclassifyFailed": "Échec de reclassification de l'image : {{errorMessage}}" } }, "deleteCategory": { diff --git a/web/public/locales/fr/views/events.json b/web/public/locales/fr/views/events.json index 6baaf9b93..e5fb25113 100644 --- a/web/public/locales/fr/views/events.json +++ b/web/public/locales/fr/views/events.json @@ -15,7 +15,9 @@ "description": "Les activités ne peuvent être générées pour une caméra que si l'enregistrement est activé pour celle-ci." } }, - "timeline": "Chronologie", + "timeline": { + "label": "Chronologie" + }, "events": { "label": "Événements", "aria": "Sélectionner les événements", diff --git a/web/public/locales/fr/views/exports.json b/web/public/locales/fr/views/exports.json index fae8186f3..9e26a27e5 100644 --- a/web/public/locales/fr/views/exports.json +++ b/web/public/locales/fr/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "Exports - Frigate", "search": "Rechercher", "noExports": "Aucune exportation trouvée", - "deleteExport": "Supprimer l'exportation", + "deleteExport": { + "label": "Supprimer l'exportation" + }, "deleteExport.desc": "Êtes-vous sûr de vouloir supprimer {{exportName}} ?", "editExport": { "title": "Renommer l'exportation", diff --git a/web/public/locales/fr/views/faceLibrary.json b/web/public/locales/fr/views/faceLibrary.json index 4389786cd..83138d7ec 100644 --- a/web/public/locales/fr/views/faceLibrary.json +++ b/web/public/locales/fr/views/faceLibrary.json @@ -1,7 +1,7 @@ { "description": { "addFace": "Ajoutez une nouvelle collection à la bibliothèque de visages en téléversant votre première image.", - "placeholder": "Saisissez un nom pour cette collection.", + "placeholder": "Saisissez un nom pour cette collection", "invalidName": "Nom invalide. Les noms ne peuvent contenir que des lettres, des chiffres, des espaces, des apostrophes, des traits de soulignement et des tirets.", "nameCannotContainHash": "Le nom ne peut pas contenir le caractère #." }, diff --git a/web/public/locales/fr/views/live.json b/web/public/locales/fr/views/live.json index 935a96bc6..fc9d6f3a4 100644 --- a/web/public/locales/fr/views/live.json +++ b/web/public/locales/fr/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Direct - Frigate", + "documentTitle": { + "default": "Direct - Frigate" + }, "lowBandwidthMode": "Mode bande passante faible", "documentTitle.withCamera": "{{camera}} - Direct - Frigate", "twoWayTalk": { @@ -15,7 +17,8 @@ "clickMove": { "label": "Cliquez dans le cadre pour centrer la caméra", "enable": "Activer le clic pour déplacer", - "disable": "Désactiver le clic pour déplacer" + "disable": "Désactiver le clic pour déplacer", + "enableWithZoom": "Activer le clic pour déplacer / faire glisser vers le zoom" }, "left": { "label": "Déplacer la caméra PTZ vers la gauche" diff --git a/web/public/locales/fr/views/settings.json b/web/public/locales/fr/views/settings.json index 635e46fcc..c9b3ccb87 100644 --- a/web/public/locales/fr/views/settings.json +++ b/web/public/locales/fr/views/settings.json @@ -5,7 +5,7 @@ "camera": "Paramètres des caméras - Frigate", "classification": "Paramètres de classification - Frigate", "motionTuner": "Réglage de la détection de mouvement - Frigate", - "general": "Paramètres du profil - Frigate", + "general": "Paramètres de l'interface - Frigate", "masksAndZones": "Éditeur de masques et de zones - Frigate", "object": "Débogage - Frigate", "frigatePlus": "Paramètres Frigate+ - Frigate", @@ -15,7 +15,8 @@ "cameraReview": "Paramètres des activités caméra - Frigate", "globalConfig": "Configuration globale - Frigate", "cameraConfig": "Configuration de la caméra - Frigate", - "maintenance": "Maintenance - Frigate" + "maintenance": "Maintenance - Frigate", + "profiles": "Profils - Frigate" }, "menu": { "ui": "Interface utilisateur", @@ -86,7 +87,9 @@ "cameraUi": "Interface de la caméra", "cameraTimestampStyle": "Style d'horodatage", "cameraMqtt": "MQTT de la caméra", - "maintenance": "Maintenance" + "maintenance": "Maintenance", + "uiSettings": "Paramètres IU", + "profiles": "Profils" }, "dialog": { "unsavedChanges": { @@ -99,7 +102,7 @@ "noCamera": "Aucune caméra" }, "general": { - "title": "Paramètres du profil", + "title": "Paramètres d'interface", "liveDashboard": { "title": "Tableau de bord en direct", "automaticLiveView": { @@ -222,14 +225,14 @@ "title": "Paramètres Frigate+", "snapshotConfig": { "documentation": "Lire la documentation", - "desc": "La soumission à Frigate+ nécessite à la fois que les instantanés et les instantanés clean_copy soient activés dans votre configuration.", + "desc": "La soumission à Frigate+ nécessite que les instantanés soient activés dans votre configuration.", "title": "Configuration des instantanés", "table": { "snapshots": "Instantanés", "camera": "Caméra", "cleanCopySnapshots": "Instantanés clean_copy" }, - "cleanCopyWarning": "Certaines caméras ont des instantanés activés, mais la copie propre est désactivée. Vous devez activer clean_copy dans votre configuration d'instantanés pour pouvoir envoyer les images de ces caméras à Frigate+." + "cleanCopyWarning": "Certaines caméras ont les instantanés désactivés" }, "modelInfo": { "baseModel": "Modèle de base", @@ -568,7 +571,7 @@ } }, "restart_required": "Redémarrage requis (masques/zones changés)", - "objectMaskLabel": "Masque d'objet {{number}} ({{label}})", + "objectMaskLabel": "Masque d'objet {{number}}", "motionMaskLabel": "Masque de mouvement {{number}}" }, "motionDetectionTuner": { @@ -1390,5 +1393,17 @@ "label": "Nouvelle valeur", "reset": "Réinitialiser" } + }, + "button": { + "overriddenBaseConfigTooltip": "Le profil {{profile}} remplace les paramètres de configuration dans cette section", + "overriddenGlobalTooltip": "Cette caméra remplace les paramètres de la configuration globale dans cette section", + "overriddenGlobal": "Remplacé (Global)", + "overriddenBaseConfig": "Remplacée (Configuration de base)" + }, + "maintenance": { + "title": "Maintenance", + "sync": { + "title": "Synchronisation du Média" + } } } diff --git a/web/public/locales/fr/views/system.json b/web/public/locales/fr/views/system.json index 0963b0f7b..f29b87170 100644 --- a/web/public/locales/fr/views/system.json +++ b/web/public/locales/fr/views/system.json @@ -34,6 +34,33 @@ "fetchingLogsFailed": "Erreur lors de la récupération des logs : {{errorMessage}}", "whileStreamingLogs": "Erreur lors de la diffusion des logs : {{errorMessage}}" } + }, + "websocket": { + "label": "Messages", + "pause": "Pause", + "resume": "Reprendre", + "clear": "Effacer", + "filter": { + "all": "Tous les sujets", + "topics": "Sujets", + "events": "Évènements", + "reviews": "Revues", + "classification": "Classification", + "face_recognition": "Reconnaissance Faciale", + "lpr": "LAPI", + "camera_activity": "Activités de la caméra", + "system": "Système", + "camera": "Caméra", + "all_cameras": "Toutes les caméras", + "cameras_count_one": "{{count}} Caméra", + "cameras_count_other": "{{count}} Caméras" + }, + "empty": "Aucun message capturé jusque là", + "count_one": "{{count}} message", + "count_other": "{{count}} messages", + "expanded": { + "payload": "Charge utile" + } } }, "general": { diff --git a/web/public/locales/he/views/classificationModel.json b/web/public/locales/he/views/classificationModel.json index 0e965eb74..ea08b0e74 100644 --- a/web/public/locales/he/views/classificationModel.json +++ b/web/public/locales/he/views/classificationModel.json @@ -23,8 +23,12 @@ }, "toast": { "success": { - "deletedCategory": "הקטגוריה נמחקה", - "deletedImage": "התמונות נמחקו", + "deletedCategory_one": "הקטגוריה נמחקה", + "deletedCategory_two": "", + "deletedCategory_other": "", + "deletedImage_one": "התמונות נמחקו", + "deletedImage_two": "", + "deletedImage_other": "", "deletedModel_one": "נמחק בהצלחה {{count}} מודל", "deletedModel_two": "נמחקו בהצלחה {{count}} מודלים", "deletedModel_other": "", diff --git a/web/public/locales/hr/views/classificationModel.json b/web/public/locales/hr/views/classificationModel.json index 97bfff234..b61defad9 100644 --- a/web/public/locales/hr/views/classificationModel.json +++ b/web/public/locales/hr/views/classificationModel.json @@ -23,8 +23,12 @@ }, "toast": { "success": { - "deletedImage": "Obrisane slike", - "deletedCategory": "Izbrisana Klasa", + "deletedImage_one": "Obrisane slike", + "deletedImage_few": "", + "deletedImage_other": "", + "deletedCategory_one": "Izbrisana Klasa", + "deletedCategory_few": "", + "deletedCategory_other": "", "deletedModel_one": "Uspješno izbrisan {{count}} model", "deletedModel_few": "Uspješno izbrisana {{count}} modela", "deletedModel_other": "Uspješno izbrisano {{count}} modela", diff --git a/web/public/locales/hu/views/classificationModel.json b/web/public/locales/hu/views/classificationModel.json index 494ff7c06..1c26b80cc 100644 --- a/web/public/locales/hu/views/classificationModel.json +++ b/web/public/locales/hu/views/classificationModel.json @@ -12,11 +12,13 @@ }, "toast": { "success": { - "deletedImage": "Törölt képek", + "deletedImage_one": "Törölt képek", + "deletedImage_other": "", "deletedModel_one": "Sikeresen törölve {{count}} modell", "deletedModel_other": "Sikeresen törölve {{count}} modell", "categorizedImage": "A kép sikeresen osztályozva", - "deletedCategory": "Osztály törlése", + "deletedCategory_one": "Osztály törlése", + "deletedCategory_other": "", "trainedModel": "Sikeresen betanított modell.", "trainingModel": "A modell tanítás sikeresen megkezdődött.", "updatedModel": "Modellkonfiguráció sikeresen frissítve", diff --git a/web/public/locales/id/views/classificationModel.json b/web/public/locales/id/views/classificationModel.json index 6724a3b26..55ca9051d 100644 --- a/web/public/locales/id/views/classificationModel.json +++ b/web/public/locales/id/views/classificationModel.json @@ -23,8 +23,8 @@ }, "toast": { "success": { - "deletedCategory": "Class Dihapus", - "deletedImage": "Image dihapus", + "deletedCategory_other": "Class Dihapus", + "deletedImage_other": "Image dihapus", "deletedModel_other": "Berhasil menghapus {{count}} model", "categorizedImage": "Berhasil Mengklasifikasikan Gambar", "trainedModel": "Berhasil melatih model.", diff --git a/web/public/locales/is/components/auth.json b/web/public/locales/is/components/auth.json index 0967ef424..077e14853 100644 --- a/web/public/locales/is/components/auth.json +++ b/web/public/locales/is/components/auth.json @@ -1 +1,5 @@ -{} +{ + "form": { + "user": "Notandanafn" + } +} diff --git a/web/public/locales/is/components/dialog.json b/web/public/locales/is/components/dialog.json index 0967ef424..d6a23f570 100644 --- a/web/public/locales/is/components/dialog.json +++ b/web/public/locales/is/components/dialog.json @@ -1 +1,5 @@ -{} +{ + "restart": { + "title": "Ert þú viss um að þú viljir endurræsa Frigate?" + } +} diff --git a/web/public/locales/is/components/filter.json b/web/public/locales/is/components/filter.json index 0967ef424..3066802c6 100644 --- a/web/public/locales/is/components/filter.json +++ b/web/public/locales/is/components/filter.json @@ -1 +1,3 @@ -{} +{ + "filter": "Sía" +} diff --git a/web/public/locales/is/components/icons.json b/web/public/locales/is/components/icons.json index 0967ef424..1ff5ba9f8 100644 --- a/web/public/locales/is/components/icons.json +++ b/web/public/locales/is/components/icons.json @@ -1 +1,5 @@ -{} +{ + "iconPicker": { + "selectIcon": "Veldu tákn" + } +} diff --git a/web/public/locales/is/components/input.json b/web/public/locales/is/components/input.json index 0967ef424..392bb4342 100644 --- a/web/public/locales/is/components/input.json +++ b/web/public/locales/is/components/input.json @@ -1 +1,7 @@ -{} +{ + "button": { + "downloadVideo": { + "label": "Hala niður myndbandi" + } + } +} diff --git a/web/public/locales/is/views/configEditor.json b/web/public/locales/is/views/configEditor.json index 0967ef424..14f2f84d5 100644 --- a/web/public/locales/is/views/configEditor.json +++ b/web/public/locales/is/views/configEditor.json @@ -1 +1,3 @@ -{} +{ + "documentTitle": "Stillingastjórn - Frigate" +} diff --git a/web/public/locales/is/views/events.json b/web/public/locales/is/views/events.json index 0967ef424..6d1ed90af 100644 --- a/web/public/locales/is/views/events.json +++ b/web/public/locales/is/views/events.json @@ -1 +1,3 @@ -{} +{ + "alerts": "Atvik" +} diff --git a/web/public/locales/is/views/recording.json b/web/public/locales/is/views/recording.json index 0967ef424..3066802c6 100644 --- a/web/public/locales/is/views/recording.json +++ b/web/public/locales/is/views/recording.json @@ -1 +1,3 @@ -{} +{ + "filter": "Sía" +} diff --git a/web/public/locales/it/common.json b/web/public/locales/it/common.json index 7a7edb48c..4067fe4fc 100644 --- a/web/public/locales/it/common.json +++ b/web/public/locales/it/common.json @@ -139,7 +139,9 @@ "resetToDefault": "Ripristina impostazioni predefinite", "saveAll": "Salva tutto", "savingAll": "Salvataggio di tutto…", - "undoAll": "Annulla tutto" + "undoAll": "Annulla tutto", + "applying": "Applica…", + "retry": "Riprova" }, "unit": { "speed": { @@ -271,7 +273,9 @@ "withSystem": "Sistema", "faceLibrary": "Raccolta volti", "classification": "Classificazione", - "chat": "Chat" + "chat": "Chat", + "profiles": "Profili", + "actions": "Azioni" }, "pagination": { "next": { @@ -308,7 +312,8 @@ "title": "Impossibile salvare le modifiche alla configurazione: {{errorMessage}}", "noMessage": "Impossibile salvare le modifiche alla configurazione" }, - "title": "Salva" + "title": "Salva", + "success": "Modifiche alla configurazione salvate correttamente." } }, "selectItem": "Seleziona {{item}}", @@ -324,5 +329,7 @@ "field": { "optional": "Opzionale", "internalID": "L'ID interno che Frigate utilizza nella configurazione e nel database" - } + }, + "no_items": "Nessun elemento", + "validation_errors": "Errori di convalida" } diff --git a/web/public/locales/it/components/camera.json b/web/public/locales/it/components/camera.json index a681de1a5..29ee897f3 100644 --- a/web/public/locales/it/components/camera.json +++ b/web/public/locales/it/components/camera.json @@ -82,6 +82,7 @@ "zones": "Zone", "mask": "Maschera", "motion": "Movimento", - "regions": "Regioni" + "regions": "Regioni", + "paths": "Percorsi" } } diff --git a/web/public/locales/it/config/cameras.json b/web/public/locales/it/config/cameras.json index 0967ef424..491b69052 100644 --- a/web/public/locales/it/config/cameras.json +++ b/web/public/locales/it/config/cameras.json @@ -1 +1,31 @@ -{} +{ + "label": "Configurazione telecamera", + "name": { + "label": "Nome telecamera", + "description": "Il nome della telecamera è necessario" + }, + "friendly_name": { + "description": "Nome amichevole della telecamera utilizzato nell'interfaccia utente di Frigate", + "label": "Nome amichevole" + }, + "enabled": { + "label": "Abilitato", + "description": "Abilitato" + }, + "audio": { + "label": "Eventi audio", + "description": "Impostazioni per il rilevamento di eventi audio per questa telecamera.", + "enabled": { + "label": "Abilita il rilevamento audio", + "description": "Abilita o disabilita il rilevamento degli eventi audio per questa telecamera." + }, + "min_volume": { + "label": "Volume minimo" + } + }, + "ffmpeg": { + "path": { + "label": "Percorso FFmpeg" + } + } +} diff --git a/web/public/locales/it/config/global.json b/web/public/locales/it/config/global.json index 0967ef424..dbd4f3ec6 100644 --- a/web/public/locales/it/config/global.json +++ b/web/public/locales/it/config/global.json @@ -1 +1,51 @@ -{} +{ + "safe_mode": { + "label": "Modalità sicura", + "description": "Quando abilitata, avvia Frigate in modalità sicura con funzionalità ridotte per la risoluzione dei problemi." + }, + "environment_vars": { + "label": "Variabili d'ambiente", + "description": "Coppie chiave/valore di variabili d'ambiente da impostare per il processo Frigate in Home Assistant OS. Gli utenti non HAOS devono utilizzare la configurazione delle variabili d'ambiente di Docker." + }, + "version": { + "label": "Versione configurazione attuale", + "description": "Versione numerica o stringa della configurazione attiva per facilitare il rilevamento di migrazioni o modifiche di formato." + }, + "audio": { + "label": "Eventi audio", + "enabled": { + "label": "Abilita il rilevamento audio" + }, + "min_volume": { + "label": "Volume minimo" + } + }, + "logger": { + "description": "Consente di controllare il livello di dettaglio predefinito dei registri e le opzioni di sovrascrittura per ciascun componente.", + "default": { + "label": "Livello di registrazione", + "description": "Livello di dettaglio predefinito del registro globale (debug, info, warning, error)." + }, + "logs": { + "label": "Livello di registro per processo", + "description": "Opzioni di sovrsacrittura del livello di registro per ciascun componente, per aumentare o diminuire il livello di dettaglio dei singoli moduli." + } + }, + "auth": { + "label": "Autenticazione", + "description": "Impostazioni di autenticazione e relative alla sessione, incluse le opzioni relative ai cookie e al limite di frequenza.", + "enabled": { + "label": "Abilita autenticazione", + "description": "Abilita l'autenticazione nativa per l'interfaccia utente di Frigate." + }, + "reset_admin_password": { + "label": "Reimposta la password di amministratore", + "description": "Se la condizione è vera, reimposta la password dell'utente amministratore all'avvio e stampa la nuova password nei registri." + } + }, + "ffmpeg": { + "path": { + "label": "Percorso FFmpeg" + } + } +} diff --git a/web/public/locales/it/config/groups.json b/web/public/locales/it/config/groups.json index 0967ef424..72164c31d 100644 --- a/web/public/locales/it/config/groups.json +++ b/web/public/locales/it/config/groups.json @@ -1 +1,73 @@ -{} +{ + "audio": { + "global": { + "detection": "Rilevamento globale", + "sensitivity": "Sensibilità globale" + }, + "cameras": { + "detection": "Rilevamento", + "sensitivity": "Sensibilità" + } + }, + "timestamp_style": { + "global": { + "appearance": "Aspetto globale" + }, + "cameras": { + "appearance": "Aspetto" + } + }, + "motion": { + "global": { + "algorithm": "Algoritmo globale", + "sensitivity": "Sensibilità globale" + }, + "cameras": { + "sensitivity": "Sensibilità", + "algorithm": "Algoritmo" + } + }, + "snapshots": { + "global": { + "display": "Visualizzazione globale" + }, + "cameras": { + "display": "Visualizzazione" + } + }, + "detect": { + "global": { + "tracking": "Tracciamento globale", + "resolution": "Risoluzione globale" + }, + "cameras": { + "resolution": "Risoluzione", + "tracking": "Tracciamento" + } + }, + "objects": { + "global": { + "tracking": "Tracciamento globale", + "filtering": "Filtro globale" + }, + "cameras": { + "filtering": "Filtro", + "tracking": "Tracciamento" + } + }, + "record": { + "global": { + "events": "Eventi globali", + "retention": "Conservazione globale" + }, + "cameras": { + "events": "Eventi", + "retention": "Conservazione" + } + }, + "ffmpeg": { + "cameras": { + "cameraFfmpeg": "Parametri FFmpeg specifici per la telecamera" + } + } +} diff --git a/web/public/locales/it/config/validation.json b/web/public/locales/it/config/validation.json index 0967ef424..a37fcd3c7 100644 --- a/web/public/locales/it/config/validation.json +++ b/web/public/locales/it/config/validation.json @@ -1 +1,8 @@ -{} +{ + "minimum": "Deve essere almeno {{limit}}", + "maximum": "Deve essere al massimo {{limit}}", + "exclusiveMinimum": "Deve essere maggiore di {{limit}}", + "exclusiveMaximum": "Deve essere minore di {{limit}}", + "minLength": "Deve essere almeno {{limit}} carattere(i)", + "maxLength": "Deve essere al massimo {{limit}} carattere(i)" +} diff --git a/web/public/locales/it/objects.json b/web/public/locales/it/objects.json index a512b0021..069acd07b 100644 --- a/web/public/locales/it/objects.json +++ b/web/public/locales/it/objects.json @@ -116,5 +116,10 @@ "an_post": "An Post", "purolator": "Purolator", "gls": "GLS", - "dpd": "DPD" + "dpd": "DPD", + "canada_post": "Canada Post", + "royal_mail": "Royal Mail", + "school_bus": "Autobus scolastico", + "skunk": "Puzzola", + "kangaroo": "Canguro" } diff --git a/web/public/locales/it/views/classificationModel.json b/web/public/locales/it/views/classificationModel.json index a35a39172..c5f0f7539 100644 --- a/web/public/locales/it/views/classificationModel.json +++ b/web/public/locales/it/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Classe eliminata", - "deletedImage": "Immagini eliminate", + "deletedCategory_one": "{{count}} classe eliminata", + "deletedCategory_many": "{{count}} classi eliminate", + "deletedCategory_other": "{{count}} classi eliminate", + "deletedImage_one": "{{count}} immagine eliminata", + "deletedImage_many": "{{count}} immagini eliminate", + "deletedImage_other": "{{count}} immagini eliminate", "categorizedImage": "Immagine classificata con successo", "trainedModel": "Modello addestrato con successo.", "trainingModel": "Avviato con successo l'addestramento del modello.", @@ -21,7 +25,8 @@ "deletedModel_many": "Eliminati con successo {{count}} modelli", "deletedModel_other": "Eliminati con successo {{count}} modelli", "updatedModel": "Configurazione del modello aggiornata correttamente", - "renamedCategory": "Classe rinominata correttamente in {{name}}" + "renamedCategory": "Classe rinominata correttamente in {{name}}", + "reclassifiedImage": "Immagine riclassificata con successo" }, "error": { "deleteImageFailed": "Impossibile eliminare: {{errorMessage}}", @@ -31,7 +36,8 @@ "deleteModelFailed": "Impossibile eliminare il modello: {{errorMessage}}", "updateModelFailed": "Impossibile aggiornare il modello: {{errorMessage}}", "trainingFailedToStart": "Impossibile avviare l'addestramento del modello: {{errorMessage}}", - "renameCategoryFailed": "Impossibile rinominare la classe: {{errorMessage}}" + "renameCategoryFailed": "Impossibile rinominare la classe: {{errorMessage}}", + "reclassifyFailed": "Impossibile riclassificare l'immagine: {{errorMessage}}" } }, "deleteCategory": { @@ -156,8 +162,13 @@ "allImagesRequired_other": "Classifica tutte le immagini. Rimangono {{count}} immagini.", "modelCreated": "Modello creato correttamente. Utilizza la vista Classificazioni recenti per aggiungere immagini per gli stati mancanti, quindi addestrare il modello.", "missingStatesWarning": { - "title": "Esempi di stati mancanti", - "description": "Per ottenere risultati ottimali, si consiglia di selezionare esempi per tutti gli stati. È possibile continuare senza selezionare tutti gli stati, ma il modello non verrà addestrato finché tutti gli stati non avranno immagini. Dopo aver continuato, utilizza la vista Classificazioni recenti per classificare le immagini per gli stati mancanti, quindi addestra il modello." + "title": "Esempi di classi mancanti", + "description": "Non tutte le classi hanno esempi. Prova a generare nuovi esempi per trovare la classe mancante oppure continua e usa la vista Classificazioni recenti per aggiungere immagini in seguito." + }, + "refreshExamples": "Genera nuovi esempi", + "refreshConfirm": { + "title": "Generare nuovi esempi?", + "description": "Questo genererà una nuova serie di immagini e cancellerà tutte le selezioni, comprese le classi precedenti. Dovrai riselezionare gli esempi per tutte le classi." } } }, @@ -189,5 +200,7 @@ "noNewImages": "Nessuna nuova immagine da addestrare. Classifica prima più immagini nel database.", "noChanges": "Nessuna modifica al database dall'ultimo addestramento." }, - "none": "Nessuno" + "none": "Nessuno", + "reclassifyImageAs": "Riclassifica immagine come:", + "reclassifyImage": "Riclassifica immagine" } diff --git a/web/public/locales/it/views/explore.json b/web/public/locales/it/views/explore.json index 498e09465..7cb9b4b80 100644 --- a/web/public/locales/it/views/explore.json +++ b/web/public/locales/it/views/explore.json @@ -113,7 +113,8 @@ "attributes": "Attributi di classificazione", "title": { "label": "Titolo" - } + }, + "scoreInfo": "Informazioni sul punteggio" }, "objectLifecycle": { "annotationSettings": { @@ -221,12 +222,22 @@ "downloadCleanSnapshot": { "label": "Scarica istantanea pulita", "aria": "Scarica istantanea pulita" + }, + "debugReplay": { + "label": "Riproduzione di correzione", + "aria": "Visualizza questo oggetto tracciato nella vista di riproduzione di correzione" + }, + "more": { + "aria": "Altri" } }, "dialog": { "confirmDelete": { "desc": "L'eliminazione di questo oggetto tracciato rimuove l'istantanea, eventuali incorporamenti salvati e tutte le voci associate ai dettagli di tracciamento. Il filmato registrato di questo oggetto tracciato nella vista Storico NON verrà eliminato.

Vuoi davvero procedere?", "title": "Conferma eliminazione" + }, + "toast": { + "error": "Errore durante l'eliminazione di questo oggetto tracciato: {{errorMessage}}" } }, "trackedObjectDetails": "Dettagli dell'oggetto tracciato", diff --git a/web/public/locales/it/views/exports.json b/web/public/locales/it/views/exports.json index 232ac8254..63bebbefa 100644 --- a/web/public/locales/it/views/exports.json +++ b/web/public/locales/it/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "Esporta - Frigate", "search": "Cerca", "noExports": "Nessuna esportazione trovata", - "deleteExport": "Elimina esportazione", + "deleteExport": { + "label": "Elimina esportazione" + }, "deleteExport.desc": "Sei sicuro di voler eliminare {{exportName}}?", "editExport": { "desc": "Inserisci un nuovo nome per questa esportazione.", diff --git a/web/public/locales/it/views/faceLibrary.json b/web/public/locales/it/views/faceLibrary.json index 7ffd4dc48..12d640aa8 100644 --- a/web/public/locales/it/views/faceLibrary.json +++ b/web/public/locales/it/views/faceLibrary.json @@ -43,7 +43,8 @@ "updatedFaceScore": "Punteggio del volto aggiornato con successo a {{name}} ({{score}}).", "uploadedImage": "Immagine caricata correttamente.", "addFaceLibrary": "{{name}} è stato aggiunto con successo alla Libreria dei Volti!", - "renamedFace": "Rinominato correttamente il volto in {{name}}" + "renamedFace": "Rinominato correttamente il volto in {{name}}", + "reclassifiedFace": "Volto riclassificato con successo." }, "error": { "addFaceLibraryFailed": "Impossibile impostare il nome del volto: {{errorMessage}}", @@ -52,7 +53,8 @@ "trainFailed": "Impossibile addestrare: {{errorMessage}}", "updateFaceScoreFailed": "Impossibile aggiornare il punteggio del volto: {{errorMessage}}", "deleteNameFailed": "Impossibile eliminare il nome: {{errorMessage}}", - "renameFaceFailed": "Impossibile rinominare il volto: {{errorMessage}}" + "renameFaceFailed": "Impossibile rinominare il volto: {{errorMessage}}", + "reclassifyFailed": "Impossibile riclassificare il volto: {{errorMessage}}" } }, "imageEntry": { @@ -101,5 +103,7 @@ "desc_other": "Vuoi davvero eliminare {{count}} volti? Questa azione non può essere annullata." }, "nofaces": "Nessun volto disponibile", - "pixels": "{{area}}px" + "pixels": "{{area}}px", + "reclassifyFaceAs": "Riclassifica il volto come:", + "reclassifyFace": "Riclassifica il volto" } diff --git a/web/public/locales/it/views/live.json b/web/public/locales/it/views/live.json index 42a5264cc..7aa3302c9 100644 --- a/web/public/locales/it/views/live.json +++ b/web/public/locales/it/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Dal vivo - Frigate", + "documentTitle": { + "default": "In diretta - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Dal vivo - Frigate", "lowBandwidthMode": "Modalità a bassa larghezza di banda", "twoWayTalk": { @@ -35,7 +37,7 @@ "autotracking": "Tracciamento automatico", "title": "Impostazioni di {{camera}}", "cameraEnabled": "Telecamera abilitata", - "objectDetection": "Rilevamento di oggetti", + "objectDetection": "Rilevamento oggetti", "recording": "Registrazione", "audioDetection": "Rilevamento audio", "transcription": "Trascrizione audio" @@ -54,8 +56,9 @@ "move": { "clickMove": { "enable": "Abilita clic per spostare", - "disable": "Disabilita il clic per spostare", - "label": "Fai clic nella cornice per centrare la telecamera" + "disable": "Disabilita clic per spostare", + "label": "Fai clic nella cornice per centrare la telecamera", + "enableWithZoom": "Abilita clic per muovere / trascina per ingrandire" }, "left": { "label": "Sposta la telecamera PTZ a sinistra" @@ -191,7 +194,7 @@ } }, "snapshot": { - "takeSnapshot": "Scarica l'istantanea attuale", + "takeSnapshot": "Scarica istantanea attuale", "noVideoSource": "Nessuna sorgente video disponibile per l'istantanea.", "captureFailed": "Impossibile catturare l'istantanea.", "downloadStarted": "Scaricamento istantanea avviato." diff --git a/web/public/locales/it/views/settings.json b/web/public/locales/it/views/settings.json index d6bf3715f..38951855e 100644 --- a/web/public/locales/it/views/settings.json +++ b/web/public/locales/it/views/settings.json @@ -16,7 +16,7 @@ }, "frigatePlus": { "snapshotConfig": { - "cleanCopyWarning": "Alcune telecamere hanno le istantanee abilitate ma la copia pulita disabilitata. È necessario abilitare clean_copy nella configurazione delle istantanee per poter inviare le immagini da queste telecamere a Frigate+.", + "cleanCopyWarning": "Alcune telecamere hanno la funzione di istantanea disabilitata", "table": { "snapshots": "Istantanee", "camera": "Telecamera", @@ -297,7 +297,7 @@ }, "restart_required": "Riavvio richiesto (maschere/zone modificate)", "motionMaskLabel": "Maschera di movimento {{number}}", - "objectMaskLabel": "Maschera di oggetto {{number}} ({{label}})" + "objectMaskLabel": "Maschera di oggetto {{number}}" }, "cameraSetting": { "camera": "Telecamera", @@ -389,7 +389,8 @@ "triggers": "Inneschi", "roles": "Ruoli", "cameraManagement": "Gestione", - "cameraReview": "Rivedi" + "cameraReview": "Rivedi", + "profiles": "Profili" }, "users": { "dialog": { diff --git a/web/public/locales/ja/config/cameras.json b/web/public/locales/ja/config/cameras.json index 0967ef424..8c5cb3254 100644 --- a/web/public/locales/ja/config/cameras.json +++ b/web/public/locales/ja/config/cameras.json @@ -1 +1,22 @@ -{} +{ + "label": "カメラ設定", + "name": { + "label": "カメラ名" + }, + "enabled": { + "label": "有効", + "description": "有効" + }, + "audio": { + "label": "音声イベント", + "enabled": { + "label": "音声検知を有効化" + }, + "min_volume": { + "label": "最小ボリューム" + }, + "filters": { + "label": "音声フィルタ" + } + } +} diff --git a/web/public/locales/ja/config/global.json b/web/public/locales/ja/config/global.json index 0967ef424..2073a59d8 100644 --- a/web/public/locales/ja/config/global.json +++ b/web/public/locales/ja/config/global.json @@ -1 +1,41 @@ -{} +{ + "safe_mode": { + "label": "セーフモード", + "description": "有効にすると、トラブルシューティングのため機能を制限したセーフモードでFrigateを起動します。" + }, + "environment_vars": { + "label": "環境変数" + }, + "audio": { + "label": "音声イベント", + "enabled": { + "label": "音声検知を有効化" + }, + "min_volume": { + "label": "最小ボリューム" + }, + "filters": { + "label": "音声フィルタ" + } + }, + "logger": { + "default": { + "label": "ログレベル" + }, + "logs": { + "label": "プロセス毎のログレベル" + } + }, + "auth": { + "label": "認証", + "enabled": { + "label": "認証を有効化" + }, + "reset_admin_password": { + "label": "adminパスワードをリセット" + } + }, + "version": { + "label": "現在の設定バージョン" + } +} diff --git a/web/public/locales/ja/config/groups.json b/web/public/locales/ja/config/groups.json index 0967ef424..7d0053948 100644 --- a/web/public/locales/ja/config/groups.json +++ b/web/public/locales/ja/config/groups.json @@ -1 +1,48 @@ -{} +{ + "audio": { + "global": { + "sensitivity": "グローバル感度", + "detection": "グローバル検出" + }, + "cameras": { + "detection": "検知", + "sensitivity": "感度" + } + }, + "timestamp_style": { + "cameras": { + "appearance": "外観" + } + }, + "motion": { + "cameras": { + "sensitivity": "感度", + "algorithm": "アルゴリズム" + } + }, + "detect": { + "global": { + "resolution": "グローバル解像度", + "tracking": "グローバルトラッキング" + }, + "cameras": { + "resolution": "解像度", + "tracking": "トラッキング" + } + }, + "objects": { + "global": { + "tracking": "グローバルトラッキング", + "filtering": "グローバルフィルタ" + }, + "cameras": { + "tracking": "トラッキング", + "filtering": "フィルタ" + } + }, + "record": { + "global": { + "events": "グローバルイベント" + } + } +} diff --git a/web/public/locales/ja/config/validation.json b/web/public/locales/ja/config/validation.json index 0967ef424..5b67869a7 100644 --- a/web/public/locales/ja/config/validation.json +++ b/web/public/locales/ja/config/validation.json @@ -1 +1,6 @@ -{} +{ + "pattern": "無効なフォーマット", + "required": "この項目は必須です", + "type": "無効な値タイプ", + "format": "無効なフォーマット" +} diff --git a/web/public/locales/ja/views/classificationModel.json b/web/public/locales/ja/views/classificationModel.json index e16f1fce5..180135390 100644 --- a/web/public/locales/ja/views/classificationModel.json +++ b/web/public/locales/ja/views/classificationModel.json @@ -12,11 +12,11 @@ }, "toast": { "success": { - "deletedImage": "削除された画像", + "deletedImage_other": "削除された画像", "categorizedImage": "画像の分類に成功しました", "trainedModel": "モデルを正常に学習させました。", "trainingModel": "モデルのトレーニングを正常に開始しました。", - "deletedCategory": "クラスを削除しました", + "deletedCategory_other": "クラスを削除しました", "deletedModel_other": "{{count}} 件のモデルを削除しました", "updatedModel": "モデル設定を更新しました", "renamedCategory": "クラス名を {{name}} に変更しました" diff --git a/web/public/locales/ja/views/exports.json b/web/public/locales/ja/views/exports.json index 3e8ce14d4..b32c8c62f 100644 --- a/web/public/locales/ja/views/exports.json +++ b/web/public/locales/ja/views/exports.json @@ -1,5 +1,5 @@ { - "documentTitle": "書き出し - Frigate", + "documentTitle": "エクスポート - Frigate", "noExports": "書き出しは見つかりません", "search": "検索", "deleteExport": "書き出しを削除", diff --git a/web/public/locales/ja/views/recording.json b/web/public/locales/ja/views/recording.json index 7d76d191f..e505c1302 100644 --- a/web/public/locales/ja/views/recording.json +++ b/web/public/locales/ja/views/recording.json @@ -1,7 +1,7 @@ { "filter": "フィルター", "calendar": "カレンダー", - "export": "書き出し", + "export": "エクスポート", "filters": "フィルター", "toast": { "error": { diff --git a/web/public/locales/ja/views/settings.json b/web/public/locales/ja/views/settings.json index 1e9f5cc52..324fec964 100644 --- a/web/public/locales/ja/views/settings.json +++ b/web/public/locales/ja/views/settings.json @@ -11,7 +11,9 @@ "frigatePlus": "Frigate+ 設定 - Frigate", "notifications": "通知設定 - Frigate", "cameraManagement": "カメラ設定 - Frigate", - "cameraReview": "カメラレビュー設定 - Frigate" + "cameraReview": "カメラレビュー設定 - Frigate", + "maintenance": "メンテナンス - Frigate", + "profiles": "プロファイル - Frigate" }, "menu": { "ui": "UI", @@ -26,7 +28,10 @@ "frigateplus": "Frigate+", "cameraManagement": "管理", "cameraReview": "レビュー", - "roles": "区分" + "roles": "区分", + "general": "一般", + "globalConfig": "グローバル設定", + "system": "システム" }, "dialog": { "unsavedChanges": { diff --git a/web/public/locales/ja/views/system.json b/web/public/locales/ja/views/system.json index b0694039d..d3f8f88a7 100644 --- a/web/public/locales/ja/views/system.json +++ b/web/public/locales/ja/views/system.json @@ -1,6 +1,6 @@ { "documentTitle": { - "cameras": "カメラ統計 - Frigate", + "cameras": "カメラ統計情報 - Frigate", "general": "一般統計 - Frigate", "storage": "ストレージ統計 - Frigate", "enrichments": "高度解析統計 - Frigate", @@ -33,6 +33,17 @@ "fetchingLogsFailed": "ログの取得エラー: {{errorMessage}}", "whileStreamingLogs": "ログのストリーミング中にエラー: {{errorMessage}}" } + }, + "websocket": { + "label": "メッセージ", + "pause": "一時停止", + "resume": "再開", + "clear": "クリア", + "filter": { + "events": "イベント", + "classification": "分類", + "face_recognition": "顔認識" + } } }, "general": { diff --git a/web/public/locales/ko/audio.json b/web/public/locales/ko/audio.json index 812746c7c..dac93a5c7 100644 --- a/web/public/locales/ko/audio.json +++ b/web/public/locales/ko/audio.json @@ -3,13 +3,13 @@ "snoring": "코골이", "singing": "노래", "yell": "비명", - "speech": "말소리", + "speech": "음성", "babbling": "옹알이", "bicycle": "자전거", "a_capella": "아카펠라", "accelerating": "가속", "accordion": "아코디언", - "acoustic_guitar": "어쿠스틱 기타", + "acoustic_guitar": "통기타", "car": "차량", "motorcycle": "원동기", "bus": "버스", @@ -17,7 +17,7 @@ "boat": "보트", "bird": "새", "cat": "고양이", - "dog": "강아지", + "dog": "개", "horse": "말", "sheep": "양", "skateboard": "스케이트보드", @@ -32,7 +32,7 @@ "toothbrush": "칫솔", "vehicle": "탈 것", "animal": "동물", - "bark": "개", + "bark": "짖는 소리", "goat": "염소", "bellow": "포효", "whoop": "환성", @@ -68,5 +68,105 @@ "gargling": "가글", "stomach_rumble": "배 꼬르륵", "burping": "트림", - "camera": "카메라" + "camera": "카메라", + "hiccup": "딸꾹질", + "fart": "방귀", + "hands": "손", + "finger_snapping": "손가락 튕기기", + "clapping": "박수", + "heartbeat": "심장 박동", + "heart_murmur": "심장 잡음", + "cheering": "환호", + "applause": "환호", + "chatter": "수다", + "crowd": "군중", + "children_playing": "놀고 있는 아이들", + "pets": "반려동물", + "yip": "깽깽거림", + "howl": "하울링", + "bow_wow": "짖는 소리", + "growling": "으르렁거림", + "whimper_dog": "낑낑거림", + "purr": "가르릉거림", + "meow": "야옹", + "hiss": "하악질", + "caterwaul": "발정기 울음", + "livestock": "가축", + "clip_clop": "딸깍딸깍", + "neigh": "말 울음소리", + "cattle": "소", + "moo": "음메", + "cowbell": "워낭 소리", + "pig": "돼지", + "oink": "꿀꿀거림", + "bleat": "메에", + "fowl": "새", + "chicken": "닭", + "cluck": "닭 울음소리", + "cock_a_doodle_doo": "꼬꼬댁", + "turkey": "칠면조", + "gobble": "칠면조 울음소리", + "duck": "오리", + "quack": "오리 울음소리", + "goose": "거위", + "honk": "거위 울음소리", + "wild_animals": "야생 동물", + "roaring_cats": "맹수 포효", + "roar": "포효", + "chirp": "새 울음소리", + "squawk": "지저귐", + "pigeon": "비둘기", + "coo": "비둘기 울음소리", + "crow": "까마귀", + "caw": "까마귀 울음소리", + "owl": "부엉이", + "hoot": "부엉이 울음소리", + "flapping_wings": "날갯짓", + "bicycle_bell": "자전거 벨", + "tuning_fork": "소리굽쇠", + "chime": "차임벨", + "wind_chime": "풍경", + "harmonica": "하모니카", + "steel_guitar": "스틸 기타", + "tapping": "두드림", + "strum": "기타 스트로크", + "banjo": "밴조", + "sitar": "시타르", + "mandolin": "만돌린", + "zither": "지더", + "ukulele": "우쿨렐레", + "lawn_mower": "잔디깎이", + "chainsaw": "전기톱", + "medium_engine": "중형 엔진", + "heavy_engine": "대형 엔진", + "engine_knocking": "엔진 노킹", + "engine_starting": "엔진 시동", + "idling": "공회전", + "alarm": "알람", + "telephone": "전화", + "telephone_bell_ringing": "전화 소리", + "ringtone": "벨소리", + "telephone_dialing": "전화 다이얼", + "dial_tone": "발신음", + "cash_register": "금전등록기", + "printer": "프린터", + "single-lens_reflex_camera": "카메라 셔터", + "tools": "도구들", + "hammer": "망치", + "jackhammer": "착암기", + "sawing": "톱질", + "filing": "연마", + "sanding": "사포질", + "power_tool": "전동 도구", + "drill": "드릴", + "explosion": "폭발", + "gunshot": "총소리", + "machine_gun": "기관총", + "fusillade": "연속 총성", + "artillery_fire": "포격", + "cap_gun": "화약 총", + "fireworks": "불꽃놀이", + "firecracker": "폭죽", + "car_alarm": "차량 경보", + "power_windows": "전동 창문" } diff --git a/web/public/locales/ko/common.json b/web/public/locales/ko/common.json index 103e54c1f..80293f4f0 100644 --- a/web/public/locales/ko/common.json +++ b/web/public/locales/ko/common.json @@ -184,7 +184,8 @@ "faceLibrary": "얼굴 라이브러리", "classification": "분류", "chat": "채팅", - "actions": "작업" + "actions": "작업", + "profiles": "프로필" }, "unit": { "speed": { @@ -260,7 +261,8 @@ "saveAll": "모두 저장", "savingAll": "모두 저장 중. …", "undoAll": "모두 실행 취소", - "applying": "적용 중…" + "applying": "적용 중…", + "retry": "재시도" }, "toast": { "copyUrlToClipboard": "클립보드에 URL이 복사되었습니다.", @@ -269,7 +271,8 @@ "error": { "title": "설정 저장 실패: {{errorMessage}}", "noMessage": "설정 저장이 실패했습니다" - } + }, + "success": "설정 변경이 성공적으로 저장되었습니다." } }, "role": { @@ -302,5 +305,7 @@ "field": { "optional": "선택", "internalID": "Frigate 내부 ID는 구성 및 데이터베이스에서 사용됩니다" - } + }, + "no_items": "내역 없음", + "validation_errors": "검증 오류" } diff --git a/web/public/locales/ko/objects.json b/web/public/locales/ko/objects.json index e3506b15d..da5ca783b 100644 --- a/web/public/locales/ko/objects.json +++ b/web/public/locales/ko/objects.json @@ -15,7 +15,7 @@ "bench": "벤치", "bird": "새", "cat": "고양이", - "dog": "강아지", + "dog": "개", "horse": "말", "sheep": "양", "cow": "소", @@ -93,7 +93,7 @@ "squirrel": "다람쥐", "deer": "사슴", "animal": "동물", - "bark": "개", + "bark": "짖는 소리", "fox": "여우", "goat": "염소", "rabbit": "토끼", diff --git a/web/public/locales/ko/views/settings.json b/web/public/locales/ko/views/settings.json index 969d92d08..c17eaa7fd 100644 --- a/web/public/locales/ko/views/settings.json +++ b/web/public/locales/ko/views/settings.json @@ -112,7 +112,8 @@ "cameraTimestampStyle": "타임스탬프 스타일", "cameraMqtt": "카메라 MQTT", "mediaSync": "미디어 동기화", - "regionGrid": "영역 격자" + "regionGrid": "영역 격자", + "profiles": "프로필" }, "dialog": { "unsavedChanges": { diff --git a/web/public/locales/ko/views/system.json b/web/public/locales/ko/views/system.json index dc6609496..063137064 100644 --- a/web/public/locales/ko/views/system.json +++ b/web/public/locales/ko/views/system.json @@ -7,7 +7,8 @@ "logs": { "frigate": "Frigate 로그 -Frigate", "go2rtc": "Go2RTC 로그 - Frigate", - "nginx": "Nginx 로그 - Frigate" + "nginx": "Nginx 로그 - Frigate", + "websocket": "메세지 로그 - Frigate" } }, "title": "시스템", @@ -33,6 +34,29 @@ "fetchingLogsFailed": "로그 가져오기 오류: {{errorMessage}}", "whileStreamingLogs": "스크리밍 로그 중 오류: {{errorMessage}}" } + }, + "websocket": { + "label": "메세지", + "pause": "일시중지", + "resume": "재개", + "clear": "비우기", + "filter": { + "all": "전체 항목", + "topics": "항목", + "events": "이벤트", + "reviews": "리뷰", + "classification": "분류", + "face_recognition": "얼굴 인식", + "lpr": "번호판 인식", + "system": "시스템", + "camera": "카메라", + "all_cameras": "모든 카메라", + "cameras_count_one": "{{count}} 카메라", + "cameras_count_other": "{{count}} 카메라" + }, + "empty": "수신된 메시지 없음", + "count_one": "{{count}} 메세지", + "count_other": "{{count}} 메세지" } }, "general": { diff --git a/web/public/locales/lt/views/classificationModel.json b/web/public/locales/lt/views/classificationModel.json index fdebdf21a..878ae22b8 100644 --- a/web/public/locales/lt/views/classificationModel.json +++ b/web/public/locales/lt/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Ištrinta Klasę", - "deletedImage": "Ištrinti Nuotraukas", + "deletedCategory_one": "Ištrinta Klasę", + "deletedCategory_few": "", + "deletedCategory_other": "", + "deletedImage_one": "Ištrinti Nuotraukas", + "deletedImage_few": "", + "deletedImage_other": "", "categorizedImage": "Sekmingai Klasifikuotas Nuotrauka", "trainedModel": "Modelis sėkmingai apmokytas.", "trainingModel": "Sėkmingai pradėtas modelio apmokymas.", diff --git a/web/public/locales/nb-NO/common.json b/web/public/locales/nb-NO/common.json index a614cced1..921ddc77b 100644 --- a/web/public/locales/nb-NO/common.json +++ b/web/public/locales/nb-NO/common.json @@ -123,7 +123,19 @@ "export": "Eksporter", "deleteNow": "Slett nå", "next": "Neste", - "continue": "Fortsett" + "continue": "Fortsett", + "add": "Legg til", + "undo": "Angre", + "copiedToClipboard": "Kopiert til utklippstavlen", + "modified": "Modifisert", + "saveAll": "Lagre alt", + "savingAll": "Lagrer alt…", + "undoAll": "Angre alt", + "applying": "Bruker…", + "overridden": "Overstyrt", + "resetToGlobal": "Tilbakestill til global", + "resetToDefault": "Tilbakestill til standard", + "retry": "Prøv igjen" }, "menu": { "help": "Hjelp", @@ -226,7 +238,10 @@ "default": "Standard", "highcontrast": "Høy kontrast" }, - "classification": "Klassifisering" + "classification": "Klassifisering", + "profiles": "Profiler", + "chat": "Chat", + "actions": "Handlinger" }, "pagination": { "next": { @@ -274,7 +289,8 @@ "error": { "title": "Kunne ikke lagre endringer i konfigurasjonen: {{errorMessage}}", "noMessage": "Kunne ikke lagre endringer i konfigurasjonen" - } + }, + "success": "Konfigurasjonsendringer lagret." } }, "role": { @@ -306,5 +322,7 @@ "two": "{{0}} og {{1}}", "many": "{{items}}, og {{last}}", "separatorWithSpace": ", " - } + }, + "validation_errors": "Valideringsfeil", + "no_items": "Ingen elementer" } diff --git a/web/public/locales/nb-NO/components/camera.json b/web/public/locales/nb-NO/components/camera.json index 750e09e63..601da4bc1 100644 --- a/web/public/locales/nb-NO/components/camera.json +++ b/web/public/locales/nb-NO/components/camera.json @@ -77,11 +77,12 @@ "showOptions": "Vis alternativer", "hideOptions": "Skjul alternativer" }, - "boundingBox": "Avgrensningsboks", + "boundingBox": "Markeringsramme", "timestamp": "Tidsstempel", "zones": "Soner", "mask": "Maske", "motion": "Bevegelse", - "regions": "Regioner" + "regions": "Regioner", + "paths": "Stier" } } diff --git a/web/public/locales/nb-NO/components/dialog.json b/web/public/locales/nb-NO/components/dialog.json index fb9bb312d..6f38ca424 100644 --- a/web/public/locales/nb-NO/components/dialog.json +++ b/web/public/locales/nb-NO/components/dialog.json @@ -6,7 +6,8 @@ "title": "Frigate starter på nytt", "button": "Tving omlasting nå", "content": "Denne siden vil lastes inn på nytt om {{countdown}} sekunder." - } + }, + "description": "Dette vil stoppe Frigate et øyeblikk mens det starter på nytt." }, "explore": { "plus": { @@ -73,7 +74,11 @@ }, "select": "Velg", "export": "Eksporter", - "selectOrExport": "Velg eller eksporter" + "selectOrExport": "Velg eller eksporter", + "case": { + "label": "Sak", + "placeholder": "Velg en sak" + } }, "streaming": { "label": "Strøm", diff --git a/web/public/locales/nb-NO/config/cameras.json b/web/public/locales/nb-NO/config/cameras.json index 0967ef424..ef94b6f35 100644 --- a/web/public/locales/nb-NO/config/cameras.json +++ b/web/public/locales/nb-NO/config/cameras.json @@ -1 +1,945 @@ -{} +{ + "mqtt": { + "label": "MQTT", + "bounding_box": { + "description": "Tegn markeringsrammer på bilder som publiseres over MQTT.", + "label": "Legg til markeringsramme" + }, + "crop": { + "description": "Beskjær bilder publisert til MQTT til det detekterte objektets markeringsramme.", + "label": "Beskjær bilde" + }, + "description": "Innstillinger for bilde-publisering via MQTT.", + "enabled": { + "description": "Aktiver publisering av stillbilder for objekter til MQTT-emner for dette kameraet.", + "label": "Send bilde" + }, + "height": { + "description": "Høyde (piksler) for bilder som publiseres over MQTT.", + "label": "Bildehøyde" + }, + "quality": { + "description": "JPEG-kvalitet for bilder publisert til MQTT (0-100).", + "label": "JPEG-kvalitet" + }, + "required_zones": { + "description": "Soner et objekt må tre inn i for at et MQTT-bilde skal publiseres.", + "label": "Påkrevde soner" + }, + "timestamp": { + "description": "Legg et tidsstempel over bilder som publiseres til MQTT.", + "label": "Legg til tidsstempel" + } + }, + "notifications": { + "label": "Varslinger", + "enabled": { + "label": "Aktiver varslinger", + "description": "Aktiver eller deaktiver varslinger for dette kameraet." + }, + "email": { + "label": "E-postadresse for varsling", + "description": "E-postadresse som brukes for push-varslinger eller som kreves av visse varslingstjenester." + }, + "cooldown": { + "label": "Nedkjølingsperiode", + "description": "Nedkjøling (sekunder) mellom varslinger for å unngå å spamme mottakere." + }, + "enabled_in_config": { + "label": "Opprinnelig varslingsstatus", + "description": "Indikerer om varslinger var aktivert i den opprinnelige statiske konfigurasjonen." + }, + "description": "Innstillinger for å aktivere og kontrollere varslinger for dette kameraet." + }, + "audio": { + "label": "Lydhendelser", + "enabled": { + "label": "Aktiver lyddeteksjon", + "description": "Aktiver eller deaktiver deteksjon av lydhendelser for dette kameraet." + }, + "max_not_heard": { + "label": "Tidsavbrudd for avslutning", + "description": "Antall sekunder uten den konfigurerte lydtypen før lydhendelsen avsluttes." + }, + "min_volume": { + "label": "Minimumsvolum", + "description": "Minimum terskel for RMS-volum som kreves for å kjøre lyddeteksjon; lavere verdier øker følsomheten (f.eks. 200 høy, 500 middels, 1000 lav)." + }, + "listen": { + "label": "Lyttetyper", + "description": "Liste over typer lydhendelser som skal detekteres (f.eks. bjeff, brannalarm, skrik, tale, rop)." + }, + "filters": { + "label": "Lydfiltre", + "description": "Filterinnstillinger per lydtype, som konfidensterskler for å redusere falske positive." + }, + "enabled_in_config": { + "label": "Opprinnelig lydstatus", + "description": "Indikerer om lyddeteksjon opprinnelig var aktivert i den statiske konfigurasjonsfilen." + }, + "num_threads": { + "label": "Deteksjonstråder", + "description": "Antall tråder som skal brukes til prosessering av lyddeteksjon." + }, + "description": "Innstillinger for lydbasert hendelsesdeteksjon for dette kameraet." + }, + "birdseye": { + "label": "Fugleperspektiv", + "description": "Innstillinger for Fugleperspektiv (Birdseye) som setter sammen flere kamerastrømmer til ett felles oppsett.", + "enabled": { + "label": "Aktiver Fugleperspektiv", + "description": "Aktiver eller deaktiver funksjonen for Fugleperspektiv." + }, + "mode": { + "label": "Sporingsmodus", + "description": "Modus for å inkludere kameraer i Fugleperspektiv: 'objects', 'motion' eller 'continuous'." + }, + "order": { + "label": "Posisjon", + "description": "Numerisk posisjon som kontrollerer kameraenes rekkefølge i Fugleperspektiv-oppsettet." + } + }, + "detect": { + "label": "Objektdeteksjon", + "description": "Innstillinger for deteksjonsrollen brukt til å kjøre objektdeteksjon og starte sporing (trackere).", + "enabled": { + "label": "Aktiver objektdeteksjon", + "description": "Aktiver eller deaktiver objektdeteksjon for dette kameraet." + }, + "height": { + "label": "Deteksjonshøyde", + "description": "Høyde (piksler) på bilder brukt for deteksjonsstrømmen; la stå tom for å bruke strømmens opprinnelige oppløsning." + }, + "width": { + "label": "Deteksjonsbredde", + "description": "Bredde (piksler) på bilder brukt for deteksjonsstrømmen; la stå tom for å bruke strømmens opprinnelige oppløsning." + }, + "fps": { + "label": "Deteksjons-FPS", + "description": "Ønsket antall bilder per sekund (FPS) for deteksjon; lavere verdier reduserer CPU-bruk (anbefalt verdi er 5, sett kun høyere – maks 10 – ved sporing av objekter i svært høy fart)." + }, + "min_initialized": { + "label": "Minimum initialiseringsbilder", + "description": "Antall påfølgende deteksjonstreff som kreves før et sporet objekt opprettes. Øk for å redusere falske initialiseringer. Standardverdi er FPS delt på 2." + }, + "max_disappeared": { + "label": "Maks bilder borte", + "description": "Antall bilder uten deteksjon før et sporet objekt anses som borte." + }, + "stationary": { + "label": "Konfigurasjon for stasjonære objekter", + "description": "Innstillinger for å detektere og håndtere objekter som forblir i ro over en viss tid.", + "interval": { + "label": "Intervall for stasjonære objekter", + "description": "Hvor ofte (i antall bilder) det skal kjøres en deteksjonssjekk for å bekrefte et stasjonært objekt." + }, + "threshold": { + "label": "Terskel for stasjonære objekter", + "description": "Antall bilder uten posisjonsendring som kreves for å markere et objekt som stasjonært." + }, + "max_frames": { + "label": "Maks antall bilder", + "description": "Begrenser hvor lenge stasjonære objekter spores før de forkastes.", + "default": { + "label": "Standard maks bilder", + "description": "Standard maksimalt antall bilder et stasjonært objekt spores før det stoppes." + }, + "objects": { + "label": "Maks bilder per objekt", + "description": "Overstyringer per objekttype for maksimalt antall bilder stasjonære objekter skal spores." + } + }, + "classifier": { + "label": "Aktiver visuell klassifiserer", + "description": "Bruk en visuell klassifiserer for å detektere reelt stasjonære objekter selv når markeringsrammene \"skjelver\" (jitter)." + } + }, + "annotation_offset": { + "label": "Forskyvning av annotering", + "description": "Millisekunder for å forskyve deteksjonsannoteringer for bedre samsvar mellom markeringsrammer på tidslinjen og opptakene; kan være positiv eller negativ." + } + }, + "ffmpeg": { + "label": "FFmpeg", + "description": "FFmpeg-innstillinger, inkludert sti til binærfil, argumenter, alternativer for maskinvareakselerasjon og utdata-argumenter per rolle.", + "path": { + "label": "FFmpeg-sti", + "description": "Sti til FFmpeg-binærfilen som skal brukes, eller et versjonsalias (\"5.0\" eller \"7.0\")." + }, + "global_args": { + "label": "Globale FFmpeg-argumenter", + "description": "Globale argumenter som sendes til FFmpeg-prosesser." + }, + "hwaccel_args": { + "label": "Argumenter for maskinvareakselerasjon", + "description": "Argumenter for maskinvareakselerasjon i FFmpeg. Leverandørspesifikke forhåndsinnstillinger anbefales." + }, + "input_args": { + "label": "Inndata-argumenter", + "description": "Inndata-argumenter som brukes på FFmpeg-innstrømmer." + }, + "output_args": { + "label": "Utdata-argumenter", + "description": "Standard utdata-argumenter brukt for ulike FFmpeg-roller som deteksjon og opptak.", + "detect": { + "label": "Utdata-argumenter for deteksjon", + "description": "Standard utdata-argumenter for strømmer med deteksjonsrolle." + }, + "record": { + "label": "Utdata-argumenter for opptak", + "description": "Standard utdata-argumenter for strømmer med opptaksrolle." + } + }, + "retry_interval": { + "label": "FFmpeg-forsøksintervall", + "description": "Sekunder å vente før man prøver å koble til en kamerastrøm på nytt etter feil. Standard er 10." + }, + "apple_compatibility": { + "label": "Apple-kompatibilitet", + "description": "Aktiver HEVC-tagging for bedre kompatibilitet med Apple-avspillere ved opptak i H.265." + }, + "gpu": { + "label": "GPU-indeks", + "description": "Standard GPU-indeks som brukes til maskinvareakselerasjon hvis tilgjengelig." + }, + "inputs": { + "label": "Kamerainndata", + "description": "Liste over definisjoner for inndatastrømmer (stier og roller) for dette kameraet.", + "path": { + "label": "Inndatasti", + "description": "URL eller sti for kameraets inndatastrøm." + }, + "roles": { + "label": "Inndataroller", + "description": "Roller for denne inndatastrømmen." + }, + "global_args": { + "label": "Globale FFmpeg-argumenter", + "description": "Globale FFmpeg-argumenter for denne inndatastrømmen." + }, + "hwaccel_args": { + "label": "Argumenter for maskinvareakselerasjon", + "description": "Argumenter for maskinvareakselerasjon for denne inndatastrømmen." + }, + "input_args": { + "label": "Inndata-argumenter", + "description": "Inndata-argumenter spesifisert for denne strømmen." + } + } + }, + "live": { + "label": "Direkteavspilling", + "streams": { + "label": "Navn på direktestrømmer", + "description": "Kobling mellom konfigurerte strøm-navn og restream/go2rtc-navn brukt for direkteavspilling." + }, + "height": { + "label": "Direktevisningshøyde", + "description": "Høyde (piksler) for jsmpeg-direktestrømmen i web-grensesnittet; må være <= høyden på deteksjonsstrømmen." + }, + "quality": { + "label": "Direktevisningskvalitet", + "description": "Kodingskvalitet for jsmpeg-strømmen (1 høyest, 31 lavest)." + }, + "description": "Innstillinger brukt av web-grensesnittet for valg av direktestrøm, oppløsning og kvalitet." + }, + "motion": { + "label": "Bevegelsesdeteksjon", + "enabled": { + "label": "Aktiver bevegelsesdeteksjon", + "description": "Aktiver eller deaktiver bevegelsesdeteksjon for dette kameraet." + }, + "threshold": { + "label": "Terskel for bevegelse", + "description": "Terskel for pikselendring brukt av bevegelsesdetektoren; høyere verdier reduserer følsomheten (intervall 1–255)." + }, + "lightning_threshold": { + "label": "Terskel for lyn/lysglimt", + "description": "Terskel for å oppdage og ignorere korte lysglimt (lavere er mer følsom, verdier mellom 0,3 og 1,0). Dette stopper ikke bevegelsesdeteksjon helt; det fører bare til at detektoren slutter å analysere flere bilder når terskelen er nådd. Bevegelsesbaserte opptak blir fortsatt laget under slike hendelser." + }, + "skip_motion_threshold": { + "label": "Terskel for å hoppe over bevegelse", + "description": "Hvis satt til en verdi mellom 0,0 og 1,0, og mer enn denne andelen av bildet endres i ett enkelt bilde, vil detektoren ikke returnere noen bevegelsesbokser og kalibrere på nytt umiddelbart. Dette kan spare CPU og redusere falske positive under lyn, storm, osv., men kan gå glipp av ekte hendelser som at et PTZ-kamera autosporer et objekt. Avveiningen står mellom å miste noen megabyte med opptak mot å måtte se gjennom et par korte klipp. La stå tom (None) for å deaktivere denne funksjonen." + }, + "improve_contrast": { + "label": "Forbedre kontrast", + "description": "Bruk kontrastforbedring på bilder før bevegelsesanalyse for å hjelpe deteksjonen." + }, + "contour_area": { + "label": "Konturområde", + "description": "Minimum konturområde i piksler som kreves for at en bevegelseskontur skal telles med." + }, + "delta_alpha": { + "label": "Delta alfa", + "description": "Alfa-blandingsfaktor brukt i bildedifferensiering for bevegelsesberegning." + }, + "frame_alpha": { + "label": "Bilde-alfa", + "description": "Alfa-verdi brukt ved sammenfletting av bilder for forhåndsbehandling av bevegelse." + }, + "frame_height": { + "label": "Bildehøyde", + "description": "Høyde i piksler som bildene skal skaleres til ved beregning av bevegelse." + }, + "mask": { + "label": "Maskekoordinater", + "description": "Sorterte x,y-koordinater som definerer polygonet for bevegelsesmasken brukt til å inkludere/ekskludere områder." + }, + "mqtt_off_delay": { + "label": "MQTT-av-forsinkelse", + "description": "Sekunder å vente etter siste bevegelse før en MQTT 'av'-status publiseres." + }, + "enabled_in_config": { + "label": "Opprinnelig bevegelsesstatus", + "description": "Indikerer om bevegelsesdeteksjon var aktivert i den opprinnelige statiske konfigurasjonen." + }, + "raw_mask": { + "label": "Råmaske" + }, + "description": "Standardinnstillinger for bevegelsesdeteksjon for dette kameraet." + }, + "objects": { + "label": "Objekter", + "description": "Standardinnstillinger for objektsporing, inkludert hvilke etiketter som skal spores og filtre per objekt.", + "track": { + "label": "Objekter som skal spores", + "description": "Liste over objektetiketter som skal spores for dette kameraet." + }, + "filters": { + "label": "Objektfiltre", + "description": "Filtre som brukes på detekterte objekter for å redusere falske positive (område, forhold, konfidens).", + "min_area": { + "label": "Minimum objektområde", + "description": "Minimum areal for markeringsramme (piksler eller prosent) som kreves for denne objekttypen. Kan oppgis i piksler (heltall) eller prosent (desimaltall mellom 0,000001 og 0,99)." + }, + "max_area": { + "label": "Maksimum objektområde", + "description": "Maksimalt areal for markeringsramme (piksler eller prosent) tillatt for denne objekttypen." + }, + "min_ratio": { + "label": "Minimum størrelsesforhold", + "description": "Minimum forhold mellom bredde og høyde som kreves for at markeringsrammen skal kvalifisere." + }, + "max_ratio": { + "label": "Maksimum størrelsesforhold", + "description": "Maksimalt forhold mellom bredde og høyde tillatt for at markeringsrammen skal kvalifisere." + }, + "threshold": { + "label": "Konfidensterskel", + "description": "Gjennomsnittlig terskel for deteksjonskonfidens som kreves for at objektet skal anses som en ekte positiv." + }, + "min_score": { + "label": "Minimum konfidens", + "description": "Minimum deteksjonskonfidens for et enkeltbilde som kreves for at objektet skal telles med." + }, + "mask": { + "label": "Filtermaske", + "description": "Polygonkoordinater som definerer hvor dette filteret gjelder innenfor bildet." + }, + "raw_mask": { + "label": "Råmaske" + } + }, + "mask": { + "label": "Objektmaske", + "description": "Maskepolygon brukt for å forhindre objektdeteksjon i spesifiserte områder." + }, + "raw_mask": { + "label": "Råmaske" + }, + "genai": { + "label": "GenAI-objektkonfigurasjon", + "description": "GenAI-alternativer for å beskrive sporede objekter og sende bilder til generering.", + "enabled": { + "label": "Aktiver GenAI", + "description": "Aktiver GenAI-generering av beskrivelser for sporede objekter som standard." + }, + "use_snapshot": { + "label": "Bruk stillbilder", + "description": "Bruk stillbilder av objekter i stedet for miniatyrbilder for GenAI-beskrivelsesgenerering." + }, + "prompt": { + "label": "Ledetekst for bildetekst", + "description": "Standardmal for ledetekst brukt ved generering av beskrivelser med GenAI." + }, + "object_prompts": { + "label": "Objektspesifikke ledetekster", + "description": "Ledetekster per objekt for å tilpasse GenAI-resultater for spesifikke etiketter." + }, + "objects": { + "label": "GenAI-objekter", + "description": "Liste over objektetiketter som skal sendes til GenAI som standard." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner som må entres for at objekter skal kvalifisere for GenAI-beskrivelsesgenerering." + }, + "debug_save_thumbnails": { + "label": "Lagre miniatyrbilder", + "description": "Lagre miniatyrbilder sendt til GenAI for feilsøking og inspeksjon." + }, + "send_triggers": { + "label": "GenAI-utløsere", + "description": "Definerer når bilder skal sendes til GenAI (ved slutt, etter oppdateringer, osv.).", + "tracked_object_end": { + "label": "Send ved avslutning", + "description": "Send en forespørsel til GenAI når det sporede objektet avsluttes." + }, + "after_significant_updates": { + "label": "Tidlig GenAI-utløser", + "description": "Send en forespørsel til GenAI etter et spesifisert antall signifikante oppdateringer for det sporede objektet." + } + }, + "enabled_in_config": { + "label": "Opprinnelig GenAI-status", + "description": "Indikerer om GenAI var aktivert i den opprinnelige statiske konfigurasjonen." + } + } + }, + "record": { + "label": "Opptak", + "enabled": { + "label": "Aktiver opptak", + "description": "Aktiver eller deaktiver opptak for dette kameraet." + }, + "expire_interval": { + "label": "Intervall for opprydding av opptak", + "description": "Minutter mellom hver opprydding som fjerner foreldede opptakssegmenter." + }, + "continuous": { + "label": "Kontinuerlig bevaring", + "description": "Antall dager opptak skal bevares uavhengig av sporede objekter eller bevegelse.", + "days": { + "label": "Bevaringsdager", + "description": "Dager opptak skal bevares." + } + }, + "motion": { + "label": "Bevaring ved bevegelse", + "description": "Antall dager opptak utløst av bevegelse skal bevares uavhengig av sporede objekter.", + "days": { + "label": "Bevaringsdager", + "description": "Dager opptak skal bevares." + } + }, + "detections": { + "label": "Bevaring ved deteksjon", + "description": "Innstillinger for bevaring av opptak for deteksjonshendelser, inkludert varighet for forhånds-/etteropptak.", + "pre_capture": { + "label": "Sekunder forhåndsopptak", + "description": "Antall sekunder før deteksjonshendelsen som skal inkluderes i opptaket." + }, + "post_capture": { + "label": "Sekunder etteropptak", + "description": "Antall sekunder etter deteksjonshendelsen som skal inkluderes i opptaket." + }, + "retain": { + "label": "Hendelsesbevaring", + "description": "Bevaringsinnstillinger for opptak av deteksjonshendelser.", + "days": { + "label": "Bevaringsdager", + "description": "Antall dager opptak av deteksjonshendelser skal bevares." + }, + "mode": { + "label": "Bevaringsmodus", + "description": "Modus for bevaring: all (alle), motion (bevegelse) eller active_objects (aktive objekter)." + } + } + }, + "alerts": { + "label": "Bevaring av varsler", + "description": "Innstillinger for bevaring av opptak for varslingshendelser, inkludert varighet for forhånds-/etteropptak.", + "pre_capture": { + "label": "Sekunder forhåndsopptak", + "description": "Antall sekunder før deteksjonshendelsen som skal inkluderes i opptaket." + }, + "post_capture": { + "label": "Sekunder etteropptak", + "description": "Antall sekunder etter deteksjonshendelsen som skal inkluderes i opptaket." + }, + "retain": { + "label": "Hendelsesbevaring", + "description": "Bevaringsinnstillinger for opptak av deteksjonshendelser.", + "days": { + "label": "Bevaringsdager", + "description": "Antall dager opptak av deteksjonshendelser skal bevares." + }, + "mode": { + "label": "Bevaringsmodus", + "description": "Modus for bevaring: all (lagre alle segmenter), motion (lagre segmenter med bevegelse) eller active_objects (lagre segmenter med aktive objekter)." + } + } + }, + "export": { + "label": "Konfigurasjon for eksport", + "description": "Innstillinger som brukes ved eksport av opptak, som for eksempel tidsforløp (timelapse) og maskinvareakselerasjon.", + "hwaccel_args": { + "label": "Argumenter for maskinvareakselerasjon ved eksport", + "description": "Argumenter for maskinvareakselerasjon som skal brukes ved eksport og transkoding." + } + }, + "preview": { + "label": "Konfigurasjon for forhåndsvisning", + "description": "Innstillinger som kontrollerer kvaliteten på forhåndsvisninger av opptak i grensesnittet.", + "quality": { + "label": "Kvalitet på forhåndsvisning", + "description": "Kvalitetsnivå for forhåndsvisning (very_low, low, medium, high, very_high)." + } + }, + "enabled_in_config": { + "label": "Opprinnelig opptaksstatus", + "description": "Indikerer om opptak var aktivert i den opprinnelige statiske konfigurasjonen." + }, + "description": "Innstillinger for opptak og bevaring for dette kameraet." + }, + "review": { + "label": "Inspeksjon", + "alerts": { + "label": "Konfigurasjon for varsler", + "description": "Innstillinger for hvilke sporede objekter som genererer varsler og hvordan disse bevares.", + "enabled": { + "label": "Aktiver varsler", + "description": "Aktiver eller deaktiver generering av varsler for dette kameraet." + }, + "labels": { + "label": "Varslingsetiketter", + "description": "Liste over objektetiketter som kvalifiserer som varsler (for eksempel: bil, person)." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for å anses som et varsel; la stå tom for å tillate alle soner." + }, + "enabled_in_config": { + "label": "Opprinnelig varslingsstatus", + "description": "Registrerer om varsler opprinnelig var aktivert i den statiske konfigurasjonen." + }, + "cutoff_time": { + "label": "Avskjæringstid for varsler", + "description": "Sekunder å vente etter at varslingsutløsende aktivitet har opphørt før et varsel avsluttes." + } + }, + "detections": { + "label": "Konfigurasjon for deteksjoner", + "description": "Innstillinger for hvilke sporede objekter som genererer deteksjoner (ikke-varsler) og hvordan disse bevares.", + "enabled": { + "label": "Aktiver deteksjoner", + "description": "Aktiver eller deaktiver deteksjonshendelser for dette kameraet." + }, + "labels": { + "label": "Deteksjonsetiketter", + "description": "Liste over objektetiketter som kvalifiserer som deteksjonshendelser." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for å anses som en deteksjon; la stå tom for å tillate alle soner." + }, + "cutoff_time": { + "label": "Avskjæringstid for deteksjoner", + "description": "Sekunder å vente etter at deteksjonsutløsende aktivitet har opphørt før en deteksjon avsluttes." + }, + "enabled_in_config": { + "label": "Opprinnelig deteksjonsstatus", + "description": "Registrerer om deteksjoner opprinnelig var aktivert i den statiske konfigurasjonen." + } + }, + "genai": { + "label": "GenAI-konfigurasjon", + "description": "Kontrollerer bruk av generativ AI for å produsere beskrivelser og sammendrag av inspeksjonselementer.", + "enabled": { + "label": "Aktiver GenAI-beskrivelser", + "description": "Aktiver eller deaktiver GenAI-genererte beskrivelser og sammendrag for inspeksjonselementer." + }, + "alerts": { + "label": "Aktiver GenAI for varsler", + "description": "Bruk GenAI til å generere beskrivelser for varslingselementer." + }, + "detections": { + "label": "Aktiver GenAI for deteksjoner", + "description": "Bruk GenAI til å generere beskrivelser for deteksjonselementer." + }, + "image_source": { + "label": "Bildekilde for inspeksjon", + "description": "Kilde for bilder sendt til GenAI ('preview' eller 'recordings'); 'recordings' bruker bilder med høyere kvalitet, men flere tokens." + }, + "additional_concerns": { + "label": "Tilleggshensyn", + "description": "En liste over tilleggshensyn eller notater GenAI bør vurdere ved evaluering av aktivitet på dette kameraet." + }, + "debug_save_thumbnails": { + "label": "Lagre miniatyrbilder", + "description": "Lagre miniatyrbilder som sendes til GenAI-leverandøren for feilsøking og inspeksjon." + }, + "enabled_in_config": { + "label": "Opprinnelig GenAI-status", + "description": "Registrerer om GenAI-inspeksjon opprinnelig var aktivert i den statiske konfigurasjonen." + }, + "preferred_language": { + "label": "Foretrukket språk", + "description": "Foretrukket språk som skal etterspørres fra GenAI-leverandøren for genererte svar." + }, + "activity_context_prompt": { + "label": "Ledetekst for aktivitetskontekst", + "description": "Egendefinert ledetekst som beskriver hva som er og ikke er mistenkelig aktivitet for å gi kontekst til GenAI-sammendrag." + } + }, + "description": "Innstillinger for varsler, deteksjoner og GenAI-sammendrag for dette kameraet." + }, + "snapshots": { + "label": "Stillbilder", + "enabled": { + "label": "Aktiver stillbilder", + "description": "Aktiver eller deaktiver lagring av stillbilder for dette kameraet." + }, + "timestamp": { + "label": "Tidsstempel-overlegg", + "description": "Legg et tidsstempel over stillbilder fra API-et." + }, + "bounding_box": { + "label": "Overlegg for markeringsramme", + "description": "Tegn markeringsrammer for sporede objekter på stillbilder fra API-et." + }, + "crop": { + "label": "Beskjær stillbilde", + "description": "Beskjær stillbilder fra API-et til det detekterte objektets markeringsramme." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for at et stillbilde skal lagres." + }, + "height": { + "label": "Høyde på stillbilde", + "description": "Høyde (piksler) som stillbilder fra API-et skal skaleres til; la stå tom for å beholde opprinnelig størrelse." + }, + "retain": { + "label": "Bevaring av stillbilder", + "description": "Bevaringsinnstillinger for stillbilder, inkludert standard antall dager og overstyringer per objekt.", + "default": { + "label": "Standard bevaring", + "description": "Standard antall dager stillbilder skal bevares." + }, + "mode": { + "label": "Bevaringsmodus", + "description": "Modus for bevaring: all (lagre alle segmenter), motion (lagre segmenter med bevegelse) eller active_objects (lagre segmenter med aktive objekter)." + }, + "objects": { + "label": "Objektbevaring", + "description": "Overstyringer per objekt for antall dager stillbilder skal bevares." + } + }, + "quality": { + "label": "Kvalitet på stillbilde", + "description": "Kodingskvalitet for lagrede stillbilder (0-100)." + }, + "description": "Innstillinger for API-genererte stillbilder av sporede objekter for dette kameraet." + }, + "timestamp_style": { + "label": "Stil for tidsstempel", + "position": { + "label": "Posisjon for tidsstempel", + "description": "Posisjonen til tidsstempelet på bildet (tl/tr/bl/br)." + }, + "format": { + "label": "Format for tidsstempel", + "description": "Formatstreng for dato og tid brukt for tidsstempler (Python datetime-formatkoder)." + }, + "color": { + "label": "Farge på tidsstempel", + "description": "RGB-fargeverdier for tidsstempelteksten (alle verdier 0-255).", + "red": { + "label": "Rød", + "description": "Rød komponent (0-255) for tidsstempelfarge." + }, + "green": { + "label": "Grønn", + "description": "Grønn komponent (0-255) for tidsstempelfarge." + }, + "blue": { + "label": "Blå", + "description": "Blå komponent (0-255) for tidsstempelfarge." + } + }, + "thickness": { + "label": "Tykkelse på tidsstempel", + "description": "Linjetykkelsen på tidsstempelteksten." + }, + "effect": { + "label": "Effekt for tidsstempel", + "description": "Visuell effekt for tidsstempelteksten (none, solid, shadow)." + }, + "description": "Stilalternativer for tidsstempler i strømmen, brukt på opptak og stillbilder." + }, + "audio_transcription": { + "label": "Lydtranskripsjon", + "description": "Innstillinger for tale- og lydtranskripsjon i sanntid, brukt for hendelser og teksting.", + "live_enabled": { + "label": "Sanntidstranskripsjon", + "description": "Aktiver løpende transkripsjon av lyd etter hvert som den mottas." + }, + "enabled": { + "description": "Aktiver eller deaktiver manuelt utløst transkripsjon av lydhendelser.", + "label": "Aktiver transkripsjon" + }, + "enabled_in_config": { + "label": "Opprinnelig transkripsjonsstatus" + } + }, + "semantic_search": { + "label": "Semantisk søk", + "triggers": { + "label": "Utløsere", + "description": "Handlinger og kriterier for kameraspesifikke utløsere for semantisk søk.", + "friendly_name": { + "label": "Visningsnavn", + "description": "Valgfritt visningsnavn for denne utløseren i grensesnittet." + }, + "enabled": { + "label": "Aktiver denne utløseren", + "description": "Aktiver eller deaktiver denne utløseren for semantisk søk." + }, + "type": { + "label": "Utløsertype", + "description": "Type utløser: 'miniatyrbilde' (match mot bilde) eller 'beskrivelse' (match mot tekst)." + }, + "data": { + "label": "Utløserinnhold", + "description": "Tekstfrase eller miniatyrbilde-ID som skal matches mot sporede objekter." + }, + "threshold": { + "label": "Utløser-terskel", + "description": "Minimum likhetsscore (0-1) som kreves for å aktivere denne utløseren." + }, + "actions": { + "label": "Utløserhandlinger", + "description": "Liste over handlinger som skal utføres når utløseren matches (varsling, underetikett, egenskap)." + } + }, + "description": "Innstillinger for semantisk søk som bygger og søker i objekt-embeddings for å finne lignende elementer." + }, + "face_recognition": { + "label": "Ansiktsgjenkjenning", + "enabled": { + "label": "Aktiver ansiktsgjenkjenning", + "description": "Aktiver eller deaktiver ansiktsgjenkjenning." + }, + "min_area": { + "label": "Minimum ansiktsareal", + "description": "Minimum areal (piksler) for en ansiktsboks før gjenkjenning forsøkes." + }, + "description": "Innstillinger for ansiktsdeteksjon og gjenkjenning for dette kameraet." + }, + "lpr": { + "label": "Gjenkjenning av kjennemerker", + "description": "Innstillinger for gjenkjenning av kjennemerker, inkludert deteksjonsterskler og kjente kjennemerkeer.", + "enabled": { + "label": "Aktiver skiltgjenkjenning", + "description": "Aktiver eller deaktiver kjennemerkegjenkjenning på dette kameraet." + }, + "min_area": { + "label": "Minimum areal for kjennemerke", + "description": "Minimum areal (piksler) for et kjennemerke før gjenkjenning forsøkes." + }, + "enhancement": { + "label": "Forbedringsnivå", + "description": "Forbedringsnivå (0-10) som brukes på kjennemerkebeskjæringer før OCR; høyere verdier forbedrer ikke alltid resultatet, nivåer over 5 fungerer ofte kun på nattbilder og bør brukes med forsiktighet." + }, + "expire_time": { + "label": "Utløpstid (sekunder)", + "description": "Tid i sekunder før et ukjent kjennemerke foreldes fra sporingen (kun for dedikerte LPR-kameraer)." + } + }, + "profiles": { + "label": "Profiler", + "description": "Navngitte konfigurasjonsprofiler med delvise overstyringer som kan aktiveres i kjøretid." + }, + "onvif": { + "label": "ONVIF", + "description": "ONVIF-tilkobling og innstillinger for PTZ-autosporing for dette kameraet.", + "host": { + "label": "ONVIF-vert", + "description": "Vert (og valgfritt skjema) for ONVIF-tjenesten for dette kameraet." + }, + "port": { + "label": "ONVIF-port", + "description": "Portnummer for ONVIF-tjenesten." + }, + "user": { + "label": "ONVIF-brukernavn", + "description": "Brukernavn for ONVIF-autentisering; enkelte enheter krever admin-bruker for ONVIF." + }, + "password": { + "label": "ONVIF-passord", + "description": "Passord for ONVIF-autentisering." + }, + "tls_insecure": { + "label": "Deaktiver TLS-verifisering", + "description": "Hopp over TLS-verifisering og deaktiver digest-autentisering for ONVIF (usikre; bruk kun i trygge nettverk)." + }, + "profile": { + "label": "ONVIF-profil", + "description": "Spesifikk ONVIF-medieprofil for PTZ-kontroll. Hvis ikke satt, velges den første profilen med gyldig PTZ-konfigurasjon automatisk." + }, + "autotracking": { + "label": "Autosporing", + "description": "Spor bevegelige objekter automatisk og hold dem sentrert ved bruk av PTZ-bevegelser.", + "enabled": { + "label": "Aktiver autosporing", + "description": "Aktiver eller deaktiver automatisk PTZ-sporing av detekterte objekter." + }, + "calibrate_on_startup": { + "label": "Kalibrer ved start", + "description": "Mål PTZ-motorhastigheter ved oppstart for å forbedre sporingsnøyaktighet. Frigate vil oppdatere konfigurasjonen etter kalibrering." + }, + "zooming": { + "label": "Zoom-modus", + "description": "Kontroller zoom-oppførsel: deaktivert, absolutt (mest kompatibel) eller relativ." + }, + "zoom_factor": { + "label": "Zoom-faktor", + "description": "Kontrollere zoom-nivå på sporede objekter. Lavere verdier gir mer oversikt; høyere verdier zoomer tettere inn. Verdier mellom 0.1 og 0.75." + }, + "track": { + "label": "Sporede objekter", + "description": "Liste over objekttyper som skal utløse autosporing." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Objekter må tre inn i en av disse sonene før autosporing starter." + }, + "return_preset": { + "label": "Forhåndsinnstilling for retur", + "description": "Navn på ONVIF-forhåndsinnstilling kameraet skal returnere til når sporingen avsluttes." + }, + "timeout": { + "label": "Tidsavbrudd for retur", + "description": "Antall sekunder å vente etter mistet sporing før kameraet returnerer til forhåndsinnstilt posisjon." + }, + "movement_weights": { + "label": "Bevegelsesvekting", + "description": "Kalibreringsverdier som genereres automatisk. Ikke endre manuelt." + }, + "enabled_in_config": { + "label": "Opprinnelig autosporingsstatus", + "description": "Internt felt for å spore om autosporing var aktivert i konfigurasjonen." + } + }, + "ignore_time_mismatch": { + "label": "Ignorer tidsavvik", + "description": "Ignorer forskjeller i tidssynkronisering mellom kamera og server ved ONVIF-kommunikasjon." + } + }, + "best_image_timeout": { + "description": "Hvor lenge man skal vente på bildet med høyest konfidensscore.", + "label": "Tidsavbrudd for beste bilde" + }, + "enabled": { + "description": "Aktivert", + "label": "Aktivert" + }, + "enabled_in_config": { + "description": "Bevar opprinnelig status for kameraet.", + "label": "Opprinnelig kamerastatus" + }, + "friendly_name": { + "description": "Kamerats visningsnavn i Frigate-grensesnittet", + "label": "Visningsnavn" + }, + "label": "Kamerakonfigurasjon", + "name": { + "description": "Kameranavn er påkrevd", + "label": "Kameranavn" + }, + "type": { + "description": "Kameratype", + "label": "Kameratype" + }, + "ui": { + "dashboard": { + "description": "Velg om dette kameraet skal være synlig i Frigate-grensesnittet. Deaktivering krever manuell redigering av konfigurasjonen for å vise kameraet igjen.", + "label": "Vis i grensesnitt" + }, + "description": "Sortering og synlighet for kameraet i grensesnittet. Påvirker standard dashbord. For mer detaljert kontroll, bruk kameragrupper.", + "label": "Brukergrensesnitt for kamera", + "order": { + "description": "Numerisk rekkefølge for sortering av kameraet i grensesnittet; høyere tall vises senere.", + "label": "Rekkefølge i UI" + } + }, + "webui_url": { + "description": "URL for å besøke kameraet direkte fra systemsiden", + "label": "Kamera-URL" + }, + "zones": { + "coordinates": { + "label": "Koordinater", + "description": "Polygonkoordinater som definerer soneområdet. Kan være en kommaseparert streng eller en liste med koordinatstrenger. Koordinater bør være relative (0–1) eller absolutte (legacy)." + }, + "description": "Soner lar deg definere spesifikke områder i bildet for å avgjøre om et objekt befinner seg i et bestemt område.", + "distances": { + "label": "Faktiske avstander", + "description": "Valgfrie faktiske avstander for hver side av sonens firkant, brukt til beregning av hastighet eller avstand. Må ha nøyaktig 4 verdier hvis spesifisert." + }, + "enabled": { + "description": "Aktiver eller deaktiver denne sonen. Deaktiverte soner ignoreres i kjøretid.", + "label": "Aktivert" + }, + "enabled_in_config": { + "label": "Bevar opprinnelig status for sonen." + }, + "filters": { + "description": "Filtre for objekter i denne sonen. Brukes for å redusere falske positive eller begrense hvilke objekter som regnes som tilstede.", + "label": "Sonefiltre", + "mask": { + "description": "Polygonkoordinater som definerer hvor dette filteret gjelder innenfor bildet.", + "label": "Filtermaske" + }, + "max_area": { + "label": "Maksimum objektområde", + "description": "Maksimalt areal for markeringsramme (piksler eller prosent) tillatt for denne objekttypen. Kan oppgis i piksler (heltall) eller prosent (desimaltall mellom 0,000001 og 0,99)." + }, + "max_ratio": { + "description": "Maksimalt forhold mellom bredde og høyde tillatt for at markeringsrammen skal kvalifere.", + "label": "Maksimum størrelsesforhold" + }, + "min_area": { + "label": "Minimum objektområde", + "description": "Minimum areal for markeringsramme (piksler eller prosent) som kreves for denne objekttypen." + }, + "min_ratio": { + "description": "Minimum forhold mellom bredde og høyde som kreves for at markeringsrammen skal kvalifisere.", + "label": "Minimum størrelsesforhold" + }, + "min_score": { + "description": "Minimum deteksjonskonfidens for et enkeltbilde som kreves for at objektet skal telles med.", + "label": "Minimum konfidens" + }, + "raw_mask": { + "label": "Råmaske" + }, + "threshold": { + "description": "Gjennomsnittlig terskel for deteksjonskonfidens som kreves for at objektet skal anses som en ekte positiv.", + "label": "Konfidensterskel" + } + }, + "friendly_name": { + "description": "Et brukervennlig navn på sonen som vises i grensesnittet. Hvis ikke satt, brukes en formatert versjon av sonenavnet.", + "label": "Sonenavn" + }, + "inertia": { + "description": "Antall påfølgende bilder et objekt må detekteres i sonen før det regnes som tilstede. Hjelper med å filtrere ut kortvarige feildeteksjoner.", + "label": "Treghetsbilder" + }, + "label": "Soner", + "loitering_time": { + "description": "Antall sekunder et objekt må oppholde seg i sonen for å bli regnet som uønsket opphold (\"loitering\"). Sett til 0 for å deaktivere.", + "label": "Oppholdssekunder" + }, + "objects": { + "label": "Utløsende objekter", + "description": "Liste over objekttyper (fra etikettkartet) som kan utløse denne sonen. Kan være en enkeltstreng eller en liste med strenger. Hvis feltet er tomt, blir alle objekter vurdert." + }, + "speed_threshold": { + "label": "Minimum hastighet", + "description": "Minimumshastighet (i faktiske enheter hvis avstander er satt) som kreves for at et objekt skal regnes som tilstede i sonen. Brukes for hastighets-baserte soneutløsere." + } + } +} diff --git a/web/public/locales/nb-NO/config/global.json b/web/public/locales/nb-NO/config/global.json index 0967ef424..d12306320 100644 --- a/web/public/locales/nb-NO/config/global.json +++ b/web/public/locales/nb-NO/config/global.json @@ -1 +1,1592 @@ -{} +{ + "version": { + "label": "Nåværende konfigurasjonsversjon", + "description": "Numerisk eller tekstbasert versjon av den aktive konfigurasjonen for å hjelpe med å oppdage migreringer eller formatendringer." + }, + "safe_mode": { + "label": "Trygg modus", + "description": "Når aktivert, start Frigate i trygg modus med reduserte funksjoner for feilsøking." + }, + "environment_vars": { + "label": "Miljøvariabler", + "description": "Nøkkel-/verdipar for miljøvariabler som skal settes for Frigate-prosessen i Home Assistant OS. Brukere uten HAOS må bruke miljøvariabelkonfigurasjon i Docker i stedet." + }, + "logger": { + "label": "Logging", + "description": "Kontrollerer standard loggdetaljnivå og overstyringer av loggnivå per komponent.", + "default": { + "label": "Loggnivå", + "description": "Standard globale loggedetaljer (debug, info, warning, error)." + }, + "logs": { + "label": "Loggnivå per prosess", + "description": "Overstyringer av loggnivå per komponent for å øke eller redusere detaljrikdommen for spesifikke moduler." + } + }, + "auth": { + "label": "Autentisering", + "description": "Innstillinger for autentisering og økter, inkludert alternativer for informasjonskapsler (cookies) og hastighetsbegrensning.", + "enabled": { + "label": "Aktiver autentisering", + "description": "Aktiver innebygd autentisering for Frigate-grensesnittet." + }, + "reset_admin_password": { + "label": "Nullstill admin-passord", + "description": "Hvis sann, nullstill admin-brukerens passord ved oppstart og skriv ut det nye passordet i loggen." + }, + "cookie_name": { + "label": "Navn på JWT-informasjonskapsel", + "description": "Navnet på informasjonskapselen som brukes til å lagre JWT-tokenet for innebygd autentisering." + }, + "cookie_secure": { + "label": "Flagg for sikker informasjonskapsel", + "description": "Sett \"secure\"-flagget på autentiseringskapselen; bør være sann ved bruk av TLS." + }, + "session_length": { + "label": "Øktvarighet", + "description": "Varighet på økten i sekunder for JWT-baserte økter." + }, + "refresh_time": { + "label": "Vindu for øktfornyelse", + "description": "Når en økt har så mange sekunder igjen før den utløper, fornyes den til full lengde." + }, + "failed_login_rate_limit": { + "label": "Begrensninger for mislykkede pålogginger", + "description": "Regler for hastighetsbegrensning for mislykkede påloggingsforsøk for å redusere brute-force-angrep." + }, + "trusted_proxies": { + "label": "Betrodde proxyer", + "description": "Liste over betrodde proxy-IP-er som brukes ved fastsettelse av klient-IP for hastighetsbegrensning." + }, + "hash_iterations": { + "label": "Hash-iterasjoner", + "description": "Antall PBKDF2-SHA256-iterasjoner som skal brukes ved hashing av brukerpassord." + }, + "roles": { + "label": "Rolletilordninger", + "description": "Tilordne roller til kameralister. En tom liste gir tilgang til alle kameraer for rollen." + }, + "admin_first_time_login": { + "label": "Flagg for førstegangs admin-innlogging", + "description": "Når sann, kan grensesnittet vise en hjelpelenke på påloggingssiden som informerer brukere om hvordan de logger inn etter en nullstilling av admin-passordet. " + } + }, + "database": { + "label": "Database", + "description": "Innstillinger for SQLite-databasen som brukes av Frigate til å lagre sporede objekter og metadata for opptak.", + "path": { + "label": "Sti til database", + "description": "Sti i filsystemet der Frigates SQLite-databasefil vil bli lagret." + } + }, + "go2rtc": { + "label": "go2rtc", + "description": "Innstillinger for den integrerte go2rtc-tjenesten for videreformidling og oversettelse av direktestrømmer." + }, + "mqtt": { + "label": "MQTT", + "description": "Innstillinger for tilkobling og publisering av telemetri, stillbilder og hendelsesdetaljer til en MQTT-megler.", + "enabled": { + "label": "Aktiver MQTT", + "description": "Aktiver eller deaktiver MQTT-integrasjon for status, hendelser og stillbilder." + }, + "host": { + "label": "MQTT-vert", + "description": "Vertsnavn eller IP-adresse til MQTT-megleren." + }, + "port": { + "label": "MQTT-port", + "description": "Port til MQTT-megleren (vanligvis 1883 for vanlig MQTT)." + }, + "topic_prefix": { + "label": "Emne-prefiks", + "description": "MQTT-emneprefiks for alle Frigate-emner; må være unikt hvis man kjører flere instanser." + }, + "client_id": { + "label": "Klient-ID", + "description": "Klientidentifikator som brukes ved tilkobling til MQTT-megleren; bør være unik per instans." + }, + "stats_interval": { + "label": "Statistikkintervall", + "description": "Intervall i sekunder for publisering av system- og kamerastatistikk til MQTT." + }, + "user": { + "label": "MQTT-brukernavn", + "description": "Valgfritt MQTT-brukernavn; kan oppgis via miljøvariabler eller hemmeligheter (secrets)." + }, + "password": { + "label": "MQTT-passord", + "description": "Valgfritt MQTT-passord; kan oppgis via miljøvariabler eller hemmeligheter (secrets)." + }, + "tls_ca_certs": { + "label": "TLS CA-sertifikater", + "description": "Sti til CA-sertifikat for TLS-tilkoblinger til megleren (for selvsignerte sertifikater)." + }, + "tls_client_cert": { + "label": "Klientsertifikat", + "description": "Sti til klientsertifikat for gjensidig TLS-autentisering; ikke sett brukernavn/passord når klientsertifikater brukes." + }, + "tls_client_key": { + "label": "Klientnøkkel", + "description": "Sti til privat nøkkel for klientsertifikatet." + }, + "tls_insecure": { + "label": "Usikker TLS", + "description": "Tillat usikre TLS-tilkoblinger ved å hoppe over verifisering av vertsnavn (ikke anbefalt)." + }, + "qos": { + "label": "MQTT QoS", + "description": "Quality of Service-nivå for MQTT-publiseringer/abonnementer (0, 1 eller 2)." + } + }, + "notifications": { + "label": "Varslinger", + "description": "Innstillinger for å aktivere og kontrollere varslinger for alle kameraer; kan overstyres per kamera.", + "enabled": { + "label": "Aktiver varslinger", + "description": "Aktiver eller deaktiver varslinger for alle kameraer; kan overstyres per kamera." + }, + "email": { + "label": "E-postadresse for varsling", + "description": "E-postadresse som brukes for push-varslinger eller som kreves av visse varslingstjenester." + }, + "cooldown": { + "label": "Nedkjølingsperiode", + "description": "Nedkjøling (sekunder) mellom varslinger for å unngå å spamme mottakere." + }, + "enabled_in_config": { + "label": "Opprinnelig varslingsstatus", + "description": "Indikerer om varslinger var aktivert i den opprinnelige statiske konfigurasjonen." + } + }, + "networking": { + "label": "Nettverk", + "description": "Nettverksrelaterte innstillinger, som aktivering av IPv6 for Frigate-endepunkter.", + "ipv6": { + "label": "IPv6-konfigurasjon", + "description": "IPv6-spesifikke innstillinger for Frigates nettverkstjenester.", + "enabled": { + "label": "Aktiver IPv6", + "description": "Aktiver IPv6-støtte for Frigate-tjenester (API og brukergrensesnitt) der det er aktuelt." + } + }, + "listen": { + "label": "Konfigurasjon for lytteporter", + "description": "Konfigurasjon for interne og eksterne lytteporter. Dette er for avanserte brukere. For de fleste brukstilfeller anbefales det å endre port-seksjonen i din Docker compose-fil i stedet.", + "internal": { + "label": "Intern port", + "description": "Intern lytteport for Frigate (standard 5000)." + }, + "external": { + "label": "Ekstern port", + "description": "Ekstern lytteport for Frigate (standard 8971)." + } + } + }, + "proxy": { + "label": "Proxy", + "description": "Innstillinger for å integrere Frigate bak en reverse proxy som sender videre hoder for autentiserte brukere.", + "header_map": { + "label": "Tilordning av hoder (Header mapping)", + "description": "Tilordne innkommende proxy-hoder til Frigates bruker- og rollefelt for proxy-basert autentisering.", + "user": { + "label": "Bruker-hode (User header)", + "description": "Hode (header) som inneholder det autentiserte brukernavnet fra oppstrøms proxy." + }, + "role": { + "label": "Rolle-hode (Role header)", + "description": "Hode (header) som inneholder den autentiserte brukerens rolle eller grupper fra oppstrøms proxy." + }, + "role_map": { + "label": "Rolletilordning", + "description": "Tilordne gruppeverdier fra oppstrøms proxy til Frigate-roller (for eksempel tilordne admin-grupper til admin-rollen)." + } + }, + "logout_url": { + "label": "Utloggings-URL", + "description": "URL som brukere skal videresendes til ved utlogging via proxyen." + }, + "auth_secret": { + "label": "Proxy-hemmelighet", + "description": "Valgfri hemmelighet som sjekkes mot X-Proxy-Secret-hodet for å verifisere betrodde proxyer." + }, + "default_role": { + "label": "Standardrolle", + "description": "Standardrolle tildelt proxy-autentiserte brukere når ingen rolletilordning gjelder (admin eller viewer)." + }, + "separator": { + "label": "Skilletegn", + "description": "Tegn som brukes til å dele opp flere verdier i proxy-hoder." + } + }, + "telemetry": { + "label": "Telemetri", + "description": "Alternativer for systemtelemetri og statistikk, inkludert overvåking av GPU og nettverksbåndbredde.", + "network_interfaces": { + "label": "Nettverksgrensesnitt", + "description": "Liste over prefikser for navn på nettverksgrensesnitt som skal overvåkes for båndbreddestatistikk." + }, + "stats": { + "label": "Systemstatistikk", + "description": "Alternativer for å aktivere/deaktivere innsamling av ulike system- og GPU-statistikker.", + "amd_gpu_stats": { + "label": "AMD GPU-statistikk", + "description": "Aktiver innsamling av AMD GPU-statistikk hvis en AMD GPU er til stede." + }, + "intel_gpu_stats": { + "label": "Intel GPU-statistikk", + "description": "Aktiver innsamling av Intel GPU-statistikk hvis en Intel GPU er til stede." + }, + "network_bandwidth": { + "label": "Nettverksbåndbredde", + "description": "Aktiver overvåking av nettverksbåndbredde per prosess for kamera-ffmpeg-prosesser og detektorer." + }, + "intel_gpu_device": { + "label": "SR-IOV-enhet", + "description": "Enhetsidentifikator som brukes når Intel-GPU-er behandles som SR-IOV for å korrigere GPU-statistikk." + } + }, + "version_check": { + "label": "Versjonskontroll", + "description": "Aktiver en utgående sjekk for å oppdage om en nyere versjon av Frigate er tilgjengelig." + } + }, + "tls": { + "label": "TLS", + "description": "TLS-innstillinger for Frigates web-endepunkter (port 8971).", + "enabled": { + "label": "Aktiver TLS", + "description": "Aktiver TLS for Frigates web-grensesnitt og API på den konfigurerte TLS-porten." + } + }, + "ui": { + "label": "Brukergrensesnitt", + "description": "Innstillinger for brukergrensesnitt, som tidssone, formatering av tid/dato og enheter.", + "timezone": { + "label": "Tidssone", + "description": "Valgfri tidssone som skal vises i grensesnittet (standard er nettleserens lokale tid)." + }, + "time_format": { + "label": "Tidsformat", + "description": "Tidsformat som skal brukes i grensesnittet (nettleser, 12-timers eller 24-timers)." + }, + "date_style": { + "label": "Datostil", + "description": "Datostil som skal brukes i grensesnittet (full, lang, middels, kort)." + }, + "time_style": { + "label": "Tidsstil", + "description": "Tidsstil som skal brukes i grensesnittet (full, lang, middels, kort)." + }, + "unit_system": { + "label": "Enhetssystem", + "description": "Enhetssystem for visning (metrisk eller imperisk) brukt i grensesnittet og MQTT." + } + }, + "detectors": { + "label": "Maskinvare for detektor", + "description": "Konfigurasjon for objektdetektorer (CPU, GPU, ONNX-bakender) og eventuelle detektorspesifikke modellinnstillinger.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detektorspesifikk modellkonfigurasjon", + "description": "Detektorspesifikke konfigurasjonsalternativer for modell (sti, inndatastørrelse, osv.).", + "path": { + "label": "Sti til egendefinert objektdetektormodell", + "description": "Sti til en egendefinert deteksjonsmodellfil (eller plus:// for Frigate+-modeller)." + }, + "labelmap_path": { + "label": "Etikettkart (labelmap) for egendefinert detektor", + "description": "Sti til en labelmap-fil som kobler numeriske klasser til tekstetiketter for detektoren." + }, + "width": { + "label": "Inndatabredde for deteksjonsmodell", + "description": "Bredden på modellens inndata-tensor i piksler." + }, + "height": { + "label": "Inndatahøyde for deteksjonsmodell", + "description": "Høyden på modellens inndata-tensor i piksler." + }, + "labelmap": { + "label": "Tilpasning av etikettkart (labelmap)", + "description": "Overstyringer eller tilordninger som skal flettes inn i standard etikettkart." + }, + "attributes_map": { + "label": "Kartlegging av objektetiketter til deres egenskaper", + "description": "Kobling fra objektetiketter til egenskapsetiketter brukt for metadata (f.eks. 'bil' -> ['kjennemerke'])." + }, + "input_tensor": { + "label": "Modellens inndata-tensorform", + "description": "Tensorformatet som forventes av modellen: 'nhwc' eller 'nchw'." + }, + "input_pixel_format": { + "label": "Pikselformat for modellens inndata", + "description": "Pikselfargerom som forventes av modellen: 'rgb', 'bgr' eller 'yuv'." + }, + "input_dtype": { + "label": "Datatype for modellens inndata", + "description": "Datatypen til modellens inndata-tensor (for eksempel 'float32')." + }, + "model_type": { + "label": "Type objektdeteksjonsmodell", + "description": "Arkitekturtype for detektormodellen (ssd, yolox, yolonas) brukt av enkelte detektorer for optimalisering." + } + }, + "model_path": { + "label": "Detektorspesifikk modellsti", + "description": "Filsti til detektormodellens binærfil hvis påkrevd av valgt detektor." + }, + "axengine": { + "label": "AXEngine NPU", + "description": "AXERA AX650N/AX8850N NPU-detektor som kjører kompilerte .axmodel-filer via AXEngine-miljøet." + }, + "cpu": { + "label": "CPU", + "description": "CPU TFLite-detektor som kjører TensorFlow Lite-modeller på vertens CPU uten maskinvareakselerasjon. Ikke anbefalt.", + "num_threads": { + "label": "Antall deteksjonstråder", + "description": "Antallet tråder som brukes til CPU-basert inferens." + } + }, + "deepstack": { + "label": "DeepStack", + "description": "DeepStack/CodeProject.AI-detektor som sender bilder til et eksternt DeepStack HTTP-API for inferens. Ikke anbefalt.", + "api_url": { + "label": "URL for DeepStack-API", + "description": "URL-adressen til DeepStack-API-et." + }, + "api_timeout": { + "label": "Tidsavbrudd for DeepStack-API (i sekunder)", + "description": "Maksimal tid tillatt for en forespørsel til DeepStack-API-et." + }, + "api_key": { + "label": "DeepStack-API-nøkkel (hvis påkrevd)", + "description": "Valgfri API-nøkkel for autentiserte DeepStack-tjenester." + } + }, + "degirum": { + "label": "DeGirum", + "description": "DeGirum-detektor for kjøring av modeller via DeGirum-skyen eller lokale inferens-tjenester.", + "location": { + "label": "Inferenslokasjon", + "description": "Lokasjonen til DeGirim-inferensmotoren (f.eks. '@cloud', '127.0.0.1')." + }, + "zoo": { + "label": "Modell-zoo", + "description": "Sti eller URL til DeGirum modell-zoo." + }, + "token": { + "label": "DeGirum-skytoken", + "description": "Token for tilgang til DeGirum-skyen." + } + }, + "edgetpu": { + "label": "EdgeTPU", + "description": "EdgeTPU-detektor som kjører TensorFlow Lite-modeller kompilert for Coral EdgeTPU ved bruk av EdgeTPU-delegaten.", + "device": { + "label": "Enhetstype", + "description": "Enheten som skal brukes til EdgeTPU-inferens (f.eks. 'usb', 'pci')." + } + }, + "hailo8l": { + "label": "Hailo-8/Hailo-8L", + "description": "Hailo-8/Hailo-8L-detektor som bruker HEF-modeller og HailoRT SDK for inferens på Hailo-maskinvare.", + "device": { + "label": "Enhetstype", + "description": "Enheten som skal brukes til Hailo-inferens (f.eks. 'PCIe', 'M.2')." + } + }, + "memryx": { + "label": "MemryX", + "description": "MemryX MX3-detektor som kjører kompilerte DFP-modeller på MemryX-akseleratorer.", + "device": { + "label": "Enhetssti", + "description": "Enheten som skal brukes for MemryX-inferens (f.eks. 'PCIe')." + } + }, + "onnx": { + "label": "ONNX", + "description": "ONNX-detektor for kjøring av ONNX-modeller; bruker tilgjengelige akselerasjonsbakender (CUDA/ROCm/OpenVINO) når de er tilgjengelige.", + "device": { + "label": "Enhetstype", + "description": "Enheten som skal brukes for ONNX-inferens (f.eks. 'AUTO', 'CPU', 'GPU')." + } + }, + "openvino": { + "label": "OpenVINO", + "description": "OpenVINO-detektor for AMD- og Intel-CPU-er, Intel-GPU-er og Intel VPU-maskinvare.", + "device": { + "label": "Enhetstype", + "description": "Enheten som skal brukes for OpenVINO-inferens (f.eks. 'CPU', 'GPU', 'NPU')." + } + }, + "rknn": { + "label": "RKNN", + "description": "RKNN-detektor for Rockchip NPU-er; kjører kompilerte RKNN-modeller på Rockchip-maskinvare.", + "num_cores": { + "label": "Antall NPU-kjerner som skal brukes.", + "description": "Antall NPU-kjerner som skal brukes (0 for auto)." + } + }, + "synaptics": { + "label": "Synaptics", + "description": "Synaptics NPU-detektor for modeller i .synap-format ved bruk av Synap SDK på Synaptics-maskinvare." + }, + "teflon_tfl": { + "label": "Teflon", + "description": "Teflon-delegatdetektor for TFLite som bruker Mesa Teflon-delegatbiblioteket for å akselerere inferens på støttede GPU-er." + }, + "tensorrt": { + "label": "TensorRT", + "description": "TensorRT-detektor for Nvidia Jetson-enheter som bruker serialiserte TensorRT-motorer for akselerert inferens.", + "device": { + "label": "GPU-enhetsindeks", + "description": "GPU-enhetsindeksen som skal brukes." + } + }, + "zmq": { + "label": "ZMQ IPC", + "description": "ZMQ IPC-detektor som flytter inferenssprosesser til en ekstern prosess via et ZeroMQ IPC-endepunkt.", + "endpoint": { + "label": "ZMQ IPC-endepunkt", + "description": "ZMQ-endepunktet som skal tilkobles." + }, + "request_timeout_ms": { + "label": "Tidsavbrudd for ZMQ-forespørsel i millisekunder", + "description": "Tidsavbrudd for ZMQ-forespørsler i millisekunder." + }, + "linger_ms": { + "label": "ZMQ-sokkel ventetid i millisekunder", + "description": "Sokkelens ventetid i millisekunder." + } + } + }, + "model": { + "label": "Deteksjonsmodell", + "description": "Innstillinger for å konfigurere en egendefinert objektdeteksjonsmodell og dens inndataform.", + "path": { + "label": "Sti til egendefinert objektdeteksjonsmodell", + "description": "Sti til en egendefinert deteksjonsmodellfil (eller plus:// for Frigate+-modeller)." + }, + "labelmap_path": { + "label": "Etikettkart (labelmap) for egendefinert detektor", + "description": "Sti til en labelmap-fil som kobler numeriske klasser til tekstetiketter for detektoren." + }, + "width": { + "label": "Inndatabredde for deteksjonsmodell", + "description": "Bredden på modellens inndata-tensor i piksler." + }, + "height": { + "label": "Inndatahøyde for deteksjonsmodell", + "description": "Høyden på modellens inndata-tensor i piksler." + }, + "labelmap": { + "label": "Tilpasning av etikettkart (labelmap)", + "description": "Overstyringer eller tilordninger som skal flettes inn i standard etikettkart." + }, + "attributes_map": { + "label": "Kartlegging av objektetiketter til deres egenskaper", + "description": "Kobling fra objektetiketter til egenskapsetiketter brukt for metadata (f.eks. 'bil' -> ['kjennemerke'])." + }, + "input_tensor": { + "label": "Modellens inndata-tensorform", + "description": "Tensorformatet som forventes av modellen: 'nhwc' eller 'nchw'." + }, + "input_pixel_format": { + "label": "Pikselformat for modellens inndata", + "description": "Pikselfargerom som forventes av modellen: 'rgb', 'bgr' eller 'yuv'." + }, + "input_dtype": { + "label": "Datatype for modellens inndata", + "description": "Datatypen til modellens inndata-tensor (for eksempel 'float32')." + }, + "model_type": { + "label": "Type objektdeteksjonsmodell", + "description": "Arkitekturtype for detektormodellen (ssd, yolox, yolonas) brukt av enkelte detektorer for optimalisering." + } + }, + "genai": { + "label": "Konfigurasjon for generativ AI", + "description": "Innstillinger for integrerte generativ AI-leverandører brukt til å generere objektbeskrivelser og inspeksjonssammendrag.", + "api_key": { + "label": "API-nøkkel", + "description": "API-nøkkel som kreves av enkelte leverandører (kan også settes via miljøvariabler)." + }, + "base_url": { + "label": "Basis-URL", + "description": "Basis-URL for selvhostede eller kompatible leverandører (for eksempel en Ollama-instans)." + }, + "model": { + "label": "Modell", + "description": "Modellen som skal brukes fra leverandøren for å generere beskrivelser eller sammendrag." + }, + "provider": { + "label": "Leverandør", + "description": "GenAI-leverandøren som skal brukes (for eksempel: ollama, gemini, openai)." + }, + "roles": { + "label": "Roller", + "description": "GenAI-roller (verktøy, bildeforståelse/syn, vektorrepresentasjoner); én leverandør per rolle." + }, + "provider_options": { + "label": "Leverandøralternativer", + "description": "Ekstra leverandørspesifikke alternativer som sendes til GenAI-klienten." + }, + "runtime_options": { + "label": "Kjøretidsalternativer", + "description": "Kjøretidsalternativer som sendes til leverandøren for hver inferenssforespørsel." + } + }, + "audio": { + "label": "Lydhendelser", + "description": "Innstillinger for lydbasert hendelsesdeteksjon for alle kameraer; kan overstyres per kamera.", + "enabled": { + "label": "Aktiver lyddeteksjon", + "description": "Aktiver eller deaktiver lydhendelsesdeteksjon for alle kameraer; kan overstyres per kamera." + }, + "max_not_heard": { + "label": "Tidsavbrudd for avslutning", + "description": "Antall sekunder uten den konfigurerte lydtypen før lydhendelsen avsluttes." + }, + "min_volume": { + "label": "Minimumsvolum", + "description": "Minimum terskel for RMS-volum som kreves for å kjøre lyddeteksjon; lavere verdier øker følsomheten (f.eks. 200 høy, 500 middels, 1000 lav)." + }, + "listen": { + "label": "Lyttetyper", + "description": "Liste over typer lydhendelser som skal detekteres (f.eks. bjeff, brannalarm, skrik, tale, rop)." + }, + "filters": { + "label": "Lydfiltre", + "description": "Filterinnstillinger per lydtype, som konfidensterskler for å redusere falske positive." + }, + "enabled_in_config": { + "label": "Opprinnelig lydstatus", + "description": "Indikerer om lyddeteksjon opprinnelig var aktivert i den statiske konfigurasjonsfilen." + }, + "num_threads": { + "label": "Deteksjonstråder", + "description": "Antall tråder som skal brukes til prosessering av lyddeteksjon." + } + }, + "birdseye": { + "label": "Fugleperspektiv", + "description": "Innstillinger for Fugleperspektiv (Birdseye) som setter sammen flere kamerastrømmer til ett felles oppsett.", + "enabled": { + "label": "Aktiver Fugleperspektiv", + "description": "Aktiver eller deaktiver funksjonen for Fugleperspektiv." + }, + "mode": { + "label": "Sporingsmodus", + "description": "Modus for å inkludere kameraer i Fugleperspektiv: 'objects', 'motion' eller 'continuous'." + }, + "restream": { + "label": "Videreformidle RTSP", + "description": "Videreformidle Fugleperspektiv-utdataen som en RTSP-strøm; aktivering av dette vil holde Fugleperspektiv kjørende kontinuerlig." + }, + "width": { + "label": "Bredde", + "description": "Utgangsbredde (piksler) for det sammensatte Fugleperspektiv-bildet." + }, + "height": { + "label": "Høyde", + "description": "Utgangshøyde (piksler) for det sammensatte Fugleperspektiv-bildet." + }, + "quality": { + "label": "Kodingskvalitet", + "description": "Kodingskvalitet for mpeg1-strømmen i Fugleperspektiv (1 høyest kvalitet, 31 lavest)." + }, + "inactivity_threshold": { + "label": "Terskel for inaktivitet", + "description": "Sekunder med inaktivitet før et kamera slutter å vises i Fugleperspektiv." + }, + "layout": { + "label": "Oppsett", + "description": "Oppsettsalternativer for Fugleperspektiv-sammensetningen.", + "scaling_factor": { + "label": "Skaleringsfaktor", + "description": "Skaleringsfaktor brukt av oppsettskalkulatoren (intervall 1.0 til 5.0)." + }, + "max_cameras": { + "label": "Maks antall kameraer", + "description": "Maksimalt antall kameraer som vises samtidig i Fugleperspektiv; viser de nyeste kameraene." + } + }, + "idle_heartbeat_fps": { + "label": "FPS for hjerteslag ved inaktivitet", + "description": "Bilder per sekund (FPS) for å sende det siste sammensatte Fugleperspektiv-bildet på nytt ved inaktivitet; sett til 0 for å deaktivere." + }, + "order": { + "label": "Posisjon", + "description": "Numerisk posisjon som kontrollerer kameraenes rekkefølge i Fugleperspektiv-oppsettet." + } + }, + "detect": { + "label": "Objektdeteksjon", + "description": "Innstillinger for deteksjonsrollen brukt til å kjøre objektdeteksjon og starte sporing (trackere).", + "enabled": { + "label": "Aktiver objektdeteksjon", + "description": "Aktiver eller deaktiver objektdeteksjon for alle kameraer; kan overstyres per kamera." + }, + "height": { + "label": "Deteksjonshøyde", + "description": "Høyde (piksler) på bilder brukt for deteksjonsstrømmen; la stå tom for å bruke strømmens opprinnelige oppløsning." + }, + "width": { + "label": "Deteksjonsbredde", + "description": "Bredde (piksler) på bilder brukt for deteksjonsstrømmen; la stå tom for å bruke strømmens opprinnelige oppløsning." + }, + "fps": { + "label": "Deteksjons-FPS", + "description": "Ønsket antall bilder per sekund (FPS) for deteksjon; lavere verdier reduserer CPU-bruk (anbefalt verdi er 5, sett kun høyere – maks 10 – ved sporing av objekter i svært høy fart)." + }, + "min_initialized": { + "label": "Minimum initialiseringsbilder", + "description": "Antall påfølgende deteksjonstreff som kreves før et sporet objekt opprettes. Øk for å redusere falske initialiseringer. Standardverdi er FPS delt på 2." + }, + "max_disappeared": { + "label": "Maks bilder borte", + "description": "Antall bilder uten deteksjon før et sporet objekt anses som borte." + }, + "stationary": { + "label": "Konfigurasjon for stasjonære objekter", + "description": "Innstillinger for å detektere og håndtere objekter som forblir i ro over en viss tid.", + "interval": { + "label": "Intervall for stasjonære objekter", + "description": "Hvor ofte (i antall bilder) det skal kjøres en deteksjonssjekk for å bekrefte et stasjonært objekt." + }, + "threshold": { + "label": "Terskel for stasjonære objekter", + "description": "Antall bilder uten posisjonsendring som kreves for å markere et objekt som stasjonært." + }, + "max_frames": { + "label": "Maks antall bilder", + "description": "Begrenser hvor lenge stasjonære objekter spores før de forkastes.", + "default": { + "label": "Standard maks bilder", + "description": "Standard maksimalt antall bilder et stasjonært objekt spores før det stoppes." + }, + "objects": { + "label": "Maks bilder per objekt", + "description": "Overstyringer per objekttype for maksimalt antall bilder stasjonære objekter skal spores." + } + }, + "classifier": { + "label": "Aktiver visuell klassifiserer", + "description": "Bruk en visuell klassifiserer for å detektere reelt stasjonære objekter selv når markeringsrammene \"skjelver\" (jitter)." + } + }, + "annotation_offset": { + "label": "Forskyvning av annotering", + "description": "Millisekunder for å forskyve deteksjonsannoteringer for bedre samsvar mellom markeringsrammer på tidslinjen og opptakene; kan være positiv eller negativ." + } + }, + "ffmpeg": { + "label": "FFmpeg", + "description": "FFmpeg-innstillinger, inkludert sti til binærfil, argumenter, alternativer for maskinvareakselerasjon og utdata-argumenter per rolle.", + "path": { + "label": "FFmpeg-sti", + "description": "Sti til FFmpeg-binærfilen som skal brukes, eller et versjonsalias (\"5.0\" eller \"7.0\")." + }, + "global_args": { + "label": "Globale FFmpeg-argumenter", + "description": "Globale argumenter som sendes til FFmpeg-prosesser." + }, + "hwaccel_args": { + "label": "Argumenter for maskinvareakselerasjon", + "description": "Argumenter for maskinvareakselerasjon i FFmpeg. Leverandørspesifikke forhåndsinnstillinger anbefales." + }, + "input_args": { + "label": "Inndata-argumenter", + "description": "Inndata-argumenter som brukes på FFmpeg-innstrømmer." + }, + "output_args": { + "label": "Utdata-argumenter", + "description": "Standard utdata-argumenter brukt for ulike FFmpeg-roller som deteksjon og opptak.", + "detect": { + "label": "Utdata-argumenter for deteksjon", + "description": "Standard utdata-argumenter for strømmer med deteksjonsrolle." + }, + "record": { + "label": "Utdata-argumenter for opptak", + "description": "Standard utdata-argumenter for strømmer med opptaksrolle." + } + }, + "retry_interval": { + "label": "FFmpeg-forsøksintervall", + "description": "Sekunder å vente før man prøver å koble til en kamerastrøm på nytt etter feil. Standard er 10." + }, + "apple_compatibility": { + "label": "Apple-kompatibilitet", + "description": "Aktiver HEVC-tagging for bedre kompatibilitet med Apple-avspillere ved opptak i H.265." + }, + "gpu": { + "label": "GPU-indeks", + "description": "Standard GPU-indeks som brukes til maskinvareakselerasjon hvis tilgjengelig." + }, + "inputs": { + "label": "Kamerainndata", + "description": "Liste over definisjoner for inndatastrømmer (stier og roller) for dette kameraet.", + "path": { + "label": "Inndatasti", + "description": "URL eller sti for kameraets inndatastrøm." + }, + "roles": { + "label": "Inndataroller", + "description": "Roller for denne inndatastrømmen." + }, + "global_args": { + "label": "Globale FFmpeg-argumenter", + "description": "Globale FFmpeg-argumenter for denne inndatastrømmen." + }, + "hwaccel_args": { + "label": "Argumenter for maskinvareakselerasjon", + "description": "Argumenter for maskinvareakselerasjon for denne inndatastrømmen." + }, + "input_args": { + "label": "Inndata-argumenter", + "description": "Inndata-argumenter spesifisert for denne strømmen." + } + } + }, + "live": { + "label": "Direkteavspilling", + "description": "Innstillinger for å kontrollere oppløsning og kvalitet på jsmpeg-direktestrømmer. Dette påvirker ikke kameraer som bruker go2rtc for direktevisning.", + "streams": { + "label": "Navn på direktestrømmer", + "description": "Kobling mellom konfigurerte strøm-navn og restream/go2rtc-navn brukt for direkteavspilling." + }, + "height": { + "label": "Direktevisningshøyde", + "description": "Høyde (piksler) for jsmpeg-direktestrømmen i web-grensesnittet; må være <= høyden på deteksjonsstrømmen." + }, + "quality": { + "label": "Direktevisningskvalitet", + "description": "Kodingskvalitet for jsmpeg-strømmen (1 høyest, 31 lavest)." + } + }, + "motion": { + "label": "Bevegelsesdeteksjon", + "description": "Standardinnstillinger for bevegelsesdeteksjon som gjelder for kameraer med mindre de overstyres per kamera.", + "enabled": { + "label": "Aktiver bevegelsesdeteksjon", + "description": "Aktiver eller deaktiver bevegelsesdeteksjon for alle kameraer; kan overstyres per kamera." + }, + "threshold": { + "label": "Terskel for bevegelse", + "description": "Terskel for pikselendring brukt av bevegelsesdetektoren; høyere verdier reduserer følsomheten (intervall 1–255)." + }, + "lightning_threshold": { + "label": "Terskel for lyn/lysglimt", + "description": "Terskel for å oppdage og ignorere korte lysglimt (lavere er mer følsom, verdier mellom 0,3 og 1,0). Dette stopper ikke bevegelsesdeteksjon helt; det fører bare til at detektoren slutter å analysere flere bilder når terskelen er nådd. Bevegelsesbaserte opptak blir fortsatt laget under slike hendelser." + }, + "skip_motion_threshold": { + "label": "Terskel for å hoppe over bevegelse", + "description": "Hvis satt til en verdi mellom 0,0 og 1,0, og mer enn denne andelen av bildet endres i ett enkelt bilde, vil detektoren ikke returnere noen bevegelsesbokser og kalibrere på nytt umiddelbart. Dette kan spare CPU og redusere falske positive under lyn, storm, osv., men kan gå glipp av ekte hendelser som at et PTZ-kamera autosporer et objekt. Avveiningen står mellom å miste noen megabyte med opptak mot å måtte se gjennom et par korte klipp. La stå tom (None) for å deaktivere denne funksjonen." + }, + "improve_contrast": { + "label": "Forbedre kontrast", + "description": "Bruk kontrastforbedring på bilder før bevegelsesanalyse for å hjelpe deteksjonen." + }, + "contour_area": { + "label": "Konturområde", + "description": "Minimum konturområde i piksler som kreves for at en bevegelseskontur skal telles med." + }, + "delta_alpha": { + "label": "Delta alfa", + "description": "Alfa-blandingsfaktor brukt i bildedifferensiering for bevegelsesberegning." + }, + "frame_alpha": { + "label": "Bilde-alfa", + "description": "Alfa-verdi brukt ved sammenfletting av bilder for forhåndsbehandling av bevegelse." + }, + "frame_height": { + "label": "Bildehøyde", + "description": "Høyde i piksler som bildene skal skaleres til ved beregning av bevegelse." + }, + "mask": { + "label": "Maskekoordinater", + "description": "Sorterte x,y-koordinater som definerer polygonet for bevegelsesmasken brukt til å inkludere/ekskludere områder." + }, + "mqtt_off_delay": { + "label": "MQTT-av-forsinkelse", + "description": "Sekunder å vente etter siste bevegelse før en MQTT 'av'-status publiseres." + }, + "enabled_in_config": { + "label": "Opprinnelig bevegelsesstatus", + "description": "Indikerer om bevegelsesdeteksjon var aktivert i den opprinnelige statiske konfigurasjonen." + }, + "raw_mask": { + "label": "Råmaske" + } + }, + "objects": { + "label": "Objekter", + "description": "Standardinnstillinger for objektsporing, inkludert hvilke etiketter som skal spores og filtre per objekt.", + "track": { + "label": "Objekter som skal spores", + "description": "Liste over objektetiketter som skal spores for alle kameraer; kan overstyres per kamera." + }, + "filters": { + "label": "Objektfiltre", + "description": "Filtre som brukes på detekterte objekter for å redusere falske positive (område, forhold, konfidens).", + "min_area": { + "label": "Minimum objektområde", + "description": "Minimum areal for markeringsramme (piksler eller prosent) som kreves for denne objekttypen. Kan oppgis i piksler (heltall) eller prosent (desimaltall mellom 0,000001 og 0,99)." + }, + "max_area": { + "label": "Maksimum objektområde", + "description": "Maksimalt areal for markeringsramme (piksler eller prosent) tillatt for denne objekttypen." + }, + "min_ratio": { + "label": "Minimum størrelsesforhold", + "description": "Minimum forhold mellom bredde og høyde som kreves for at markeringsrammen skal kvalifisere." + }, + "max_ratio": { + "label": "Maksimum størrelsesforhold", + "description": "Maksimalt forhold mellom bredde og høyde tillatt for at markeringsrammen skal kvalifisere." + }, + "threshold": { + "label": "Konfidensterskel", + "description": "Gjennomsnittlig terskel for deteksjonskonfidens som kreves for at objektet skal anses som en ekte positiv." + }, + "min_score": { + "label": "Minimum konfidens", + "description": "Minimum deteksjonskonfidens for et enkeltbilde som kreves for at objektet skal telles med." + }, + "mask": { + "label": "Filtermaske", + "description": "Polygonkoordinater som definerer hvor dette filteret gjelder innenfor bildet." + }, + "raw_mask": { + "label": "Råmaske" + } + }, + "mask": { + "label": "Objektmaske", + "description": "Maskepolygon brukt for å forhindre objektdeteksjon i spesifiserte områder." + }, + "raw_mask": { + "label": "Råmaske" + }, + "genai": { + "label": "GenAI-objektkonfigurasjon", + "description": "GenAI-alternativer for å beskrive sporede objekter og sende bilder til generering.", + "enabled": { + "label": "Aktiver GenAI", + "description": "Aktiver GenAI-generering av beskrivelser for sporede objekter som standard." + }, + "use_snapshot": { + "label": "Bruk stillbilder", + "description": "Bruk stillbilder av objekter i stedet for miniatyrbilder for GenAI-beskrivelsesgenerering." + }, + "prompt": { + "label": "Ledetekst for bildetekst", + "description": "Standardmal for ledetekst brukt ved generering av beskrivelser med GenAI." + }, + "object_prompts": { + "label": "Objektspesifikke ledetekster", + "description": "Ledetekster per objekt for å tilpasse GenAI-resultater for spesifikke etiketter." + }, + "objects": { + "label": "GenAI-objekter", + "description": "Liste over objektetiketter som skal sendes til GenAI som standard." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner som må entres for at objekter skal kvalifisere for GenAI-beskrivelsesgenerering." + }, + "debug_save_thumbnails": { + "label": "Lagre miniatyrbilder", + "description": "Lagre miniatyrbilder sendt til GenAI for feilsøking og inspeksjon." + }, + "send_triggers": { + "label": "GenAI-utløsere", + "description": "Definerer når bilder skal sendes til GenAI (ved slutt, etter oppdateringer, osv.).", + "tracked_object_end": { + "label": "Send ved avslutning", + "description": "Send en forespørsel til GenAI når det sporede objektet avsluttes." + }, + "after_significant_updates": { + "label": "Tidlig GenAI-utløser", + "description": "Send en forespørsel til GenAI etter et spesifisert antall signifikante oppdateringer for det sporede objektet." + } + }, + "enabled_in_config": { + "label": "Opprinnelig GenAI-status", + "description": "Indikerer om GenAI var aktivert i den opprinnelige statiske konfigurasjonen." + } + } + }, + "record": { + "label": "Opptak", + "description": "Innstillinger for opptak og bevaring (retention) som gjelder for kameraer med mindre de overstyres per kamera.", + "enabled": { + "label": "Aktiver opptak", + "description": "Aktiver eller deaktiver opptak for alle kameraer; kan overstyres per kamera." + }, + "expire_interval": { + "label": "Intervall for opprydding av opptak", + "description": "Minutter mellom hver opprydding som fjerner foreldede opptakssegmenter." + }, + "continuous": { + "label": "Kontinuerlig bevaring", + "description": "Antall dager opptak skal bevares uavhengig av sporede objekter eller bevegelse.", + "days": { + "label": "Bevaringsdager", + "description": "Dager opptak skal bevares." + } + }, + "motion": { + "label": "Bevaring ved bevegelse", + "description": "Antall dager opptak utløst av bevegelse skal bevares uavhengig av sporede objekter.", + "days": { + "label": "Bevaringsdager", + "description": "Dager opptak skal bevares." + } + }, + "detections": { + "label": "Bevaring ved deteksjon", + "description": "Innstillinger for bevaring av opptak for deteksjonshendelser, inkludert varighet for forhånds-/etteropptak.", + "pre_capture": { + "label": "Sekunder forhåndsopptak", + "description": "Antall sekunder før deteksjonshendelsen som skal inkluderes i opptaket." + }, + "post_capture": { + "label": "Sekunder etteropptak", + "description": "Antall sekunder etter deteksjonshendelsen som skal inkluderes i opptaket." + }, + "retain": { + "label": "Hendelsesbevaring", + "description": "Bevaringsinnstillinger for opptak av deteksjonshendelser.", + "days": { + "label": "Bevaringsdager", + "description": "Antall dager opptak av deteksjonshendelser skal bevares." + }, + "mode": { + "label": "Bevaringsmodus", + "description": "Modus for bevaring: all (alle), motion (bevegelse) eller active_objects (aktive objekter)." + } + } + }, + "alerts": { + "label": "Bevaring av varsler", + "description": "Innstillinger for bevaring av opptak for varslingshendelser, inkludert varighet for forhånds-/etteropptak.", + "pre_capture": { + "label": "Sekunder forhåndsopptak", + "description": "Antall sekunder før deteksjonshendelsen som skal inkluderes i opptaket." + }, + "post_capture": { + "label": "Sekunder etteropptak", + "description": "Antall sekunder etter deteksjonshendelsen som skal inkluderes i opptaket." + }, + "retain": { + "label": "Hendelsesbevaring", + "description": "Bevaringsinnstillinger for opptak av deteksjonshendelser.", + "days": { + "label": "Bevaringsdager", + "description": "Antall dager opptak av deteksjonshendelser skal bevares." + }, + "mode": { + "label": "Bevaringsmodus", + "description": "Modus for bevaring: all (lagre alle segmenter), motion (lagre segmenter med bevegelse) eller active_objects (lagre segmenter med aktive objekter)." + } + } + }, + "export": { + "label": "Konfigurasjon for eksport", + "description": "Innstillinger som brukes ved eksport av opptak, som for eksempel tidsforløp (timelapse) og maskinvareakselerasjon.", + "hwaccel_args": { + "label": "Argumenter for maskinvareakselerasjon ved eksport", + "description": "Argumenter for maskinvareakselerasjon som skal brukes ved eksport og transkoding." + } + }, + "preview": { + "label": "Konfigurasjon for forhåndsvisning", + "description": "Innstillinger som kontrollerer kvaliteten på forhåndsvisninger av opptak i grensesnittet.", + "quality": { + "label": "Kvalitet på forhåndsvisning", + "description": "Kvalitetsnivå for forhåndsvisning (very_low, low, medium, high, very_high)." + } + }, + "enabled_in_config": { + "label": "Opprinnelig opptaksstatus", + "description": "Indikerer om opptak var aktivert i den opprinnelige statiske konfigurasjonen." + } + }, + "review": { + "label": "Inspeksjon", + "description": "Innstillinger som kontrollerer varsler, deteksjoner og GenAI-sammendrag brukt av grensesnittet og lagring.", + "alerts": { + "label": "Konfigurasjon for varsler", + "description": "Innstillinger for hvilke sporede objekter som genererer varsler og hvordan disse bevares.", + "enabled": { + "label": "Aktiver varsler", + "description": "Aktiver eller deaktiver generering av varsler for alle kameraer; kan overstyres per kamera." + }, + "labels": { + "label": "Varslingsetiketter", + "description": "Liste over objektetiketter som kvalifiserer som varsler (for eksempel: bil, person)." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for å anses som et varsel; la stå tom for å tillate alle soner." + }, + "enabled_in_config": { + "label": "Opprinnelig varslingsstatus", + "description": "Registrerer om varsler opprinnelig var aktivert i den statiske konfigurasjonen." + }, + "cutoff_time": { + "label": "Avskjæringstid for varsler", + "description": "Sekunder å vente etter at varslingsutløsende aktivitet har opphørt før et varsel avsluttes." + } + }, + "detections": { + "label": "Konfigurasjon for deteksjoner", + "description": "Innstillinger for hvilke sporede objekter som genererer deteksjoner (ikke-varsler) og hvordan disse bevares.", + "enabled": { + "label": "Aktiver deteksjoner", + "description": "Aktiver eller deaktiver deteksjonshendelser for alle kameraer; kan overstyres per kamera." + }, + "labels": { + "label": "Deteksjonsetiketter", + "description": "Liste over objektetiketter som kvalifiserer som deteksjonshendelser." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for å anses som en deteksjon; la stå tom for å tillate alle soner." + }, + "cutoff_time": { + "label": "Avskjæringstid for deteksjoner", + "description": "Sekunder å vente etter at deteksjonsutløsende aktivitet har opphørt før en deteksjon avsluttes." + }, + "enabled_in_config": { + "label": "Opprinnelig deteksjonsstatus", + "description": "Registrerer om deteksjoner opprinnelig var aktivert i den statiske konfigurasjonen." + } + }, + "genai": { + "label": "GenAI-konfigurasjon", + "description": "Kontrollerer bruk av generativ AI for å produsere beskrivelser og sammendrag av inspeksjonselementer.", + "enabled": { + "label": "Aktiver GenAI-beskrivelser", + "description": "Aktiver eller deaktiver GenAI-genererte beskrivelser og sammendrag for inspeksjonselementer." + }, + "alerts": { + "label": "Aktiver GenAI for varsler", + "description": "Bruk GenAI til å generere beskrivelser for varslingselementer." + }, + "detections": { + "label": "Aktiver GenAI for deteksjoner", + "description": "Bruk GenAI til å generere beskrivelser for deteksjonselementer." + }, + "image_source": { + "label": "Bildekilde for inspeksjon", + "description": "Kilde for bilder sendt til GenAI ('preview' eller 'recordings'); 'recordings' bruker bilder med høyere kvalitet, men flere tokens." + }, + "additional_concerns": { + "label": "Tilleggshensyn", + "description": "En liste over tilleggshensyn eller notater GenAI bør vurdere ved evaluering av aktivitet på dette kameraet." + }, + "debug_save_thumbnails": { + "label": "Lagre miniatyrbilder", + "description": "Lagre miniatyrbilder som sendes til GenAI-leverandøren for feilsøking og inspeksjon." + }, + "enabled_in_config": { + "label": "Opprinnelig GenAI-status", + "description": "Registrerer om GenAI-inspeksjon opprinnelig var aktivert i den statiske konfigurasjonen." + }, + "preferred_language": { + "label": "Foretrukket språk", + "description": "Foretrukket språk som skal etterspørres fra GenAI-leverandøren for genererte svar." + }, + "activity_context_prompt": { + "label": "Ledetekst for aktivitetskontekst", + "description": "Egendefinert ledetekst som beskriver hva som er og ikke er mistenkelig aktivitet for å gi kontekst til GenAI-sammendrag." + } + } + }, + "snapshots": { + "label": "Stillbilder", + "description": "Innstillinger for API-genererte stillbilder av sporede objekter for alle kameraer; kan overstyres per kamera.", + "enabled": { + "label": "Aktiver stillbilder", + "description": "Aktiver eller deaktiver lagring av stillbilder for alle kameraer; kan overstyres per kamera." + }, + "timestamp": { + "label": "Tidsstempel-overlegg", + "description": "Legg et tidsstempel over stillbilder fra API-et." + }, + "bounding_box": { + "label": "Overlegg for markeringsramme", + "description": "Tegn markeringsrammer for sporede objekter på stillbilder fra API-et." + }, + "crop": { + "label": "Beskjær stillbilde", + "description": "Beskjær stillbilder fra API-et til det detekterte objektets markeringsramme." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for at et stillbilde skal lagres." + }, + "height": { + "label": "Høyde på stillbilde", + "description": "Høyde (piksler) som stillbilder fra API-et skal skaleres til; la stå tom for å beholde opprinnelig størrelse." + }, + "retain": { + "label": "Bevaring av stillbilder", + "description": "Bevaringsinnstillinger for stillbilder, inkludert standard antall dager og overstyringer per objekt.", + "default": { + "label": "Standard bevaring", + "description": "Standard antall dager stillbilder skal bevares." + }, + "mode": { + "label": "Bevaringsmodus", + "description": "Modus for bevaring: all (lagre alle segmenter), motion (lagre segmenter med bevegelse) eller active_objects (lagre segmenter med aktive objekter)." + }, + "objects": { + "label": "Objektbevaring", + "description": "Overstyringer per objekt for antall dager stillbilder skal bevares." + } + }, + "quality": { + "label": "Kvalitet på stillbilde", + "description": "Kodingskvalitet for lagrede stillbilder (0-100)." + } + }, + "timestamp_style": { + "label": "Stil for tidsstempel", + "description": "Stilalternativer for tidsstempler i strømmen, brukt i testvisning (debug) og stillbilder.", + "position": { + "label": "Posisjon for tidsstempel", + "description": "Posisjonen til tidsstempelet på bildet (tl/tr/bl/br)." + }, + "format": { + "label": "Format for tidsstempel", + "description": "Formatstreng for dato og tid brukt for tidsstempler (Python datetime-formatkoder)." + }, + "color": { + "label": "Farge på tidsstempel", + "description": "RGB-fargeverdier for tidsstempelteksten (alle verdier 0-255).", + "red": { + "label": "Rød", + "description": "Rød komponent (0-255) for tidsstempelfarge." + }, + "green": { + "label": "Grønn", + "description": "Grønn komponent (0-255) for tidsstempelfarge." + }, + "blue": { + "label": "Blå", + "description": "Blå komponent (0-255) for tidsstempelfarge." + } + }, + "thickness": { + "label": "Tykkelse på tidsstempel", + "description": "Linjetykkelsen på tidsstempelteksten." + }, + "effect": { + "label": "Effekt for tidsstempel", + "description": "Visuell effekt for tidsstempelteksten (none, solid, shadow)." + } + }, + "audio_transcription": { + "label": "Lydtranskripsjon", + "description": "Innstillinger for tale- og lydtranskripsjon i sanntid, brukt for hendelser og teksting.", + "enabled": { + "label": "Aktiver lydtranskripsjon", + "description": "Aktiver eller deaktiver automatisk lydtranskripsjon for alle kameraer; kan overstyres per kamera." + }, + "language": { + "label": "Språk for transkripsjon", + "description": "Språkkode som brukes for transkripsjon/oversettelse (f.eks. 'no' for norsk)." + }, + "device": { + "label": "Enhet for transkripsjon", + "description": "Enhet (CPU/GPU) som transkripsjonsmodellen skal kjøre på." + }, + "model_size": { + "label": "Modellstørrelse", + "description": "Modellstørrelse som skal brukes for transkripsjon av lydhendelser lokalt." + }, + "live_enabled": { + "label": "Sanntidstranskripsjon", + "description": "Aktiver løpende transkripsjon av lyd etter hvert som den mottas." + } + }, + "classification": { + "label": "Objektklassifisering", + "description": "Innstillinger for klassifiseringsmodeller brukt til å forbedre objektetiketter eller statusklassifisering.", + "bird": { + "label": "Konfigurasjon for fugleklassifisering", + "description": "Spesifikke innstillinger for modeller for fugleklassifisering.", + "enabled": { + "label": "Fugleklassifisering", + "description": "Aktiver eller deaktiver fugleklassifisering." + }, + "threshold": { + "label": "Minimumsscore", + "description": "Minimum klassifiseringsscore som kreves for å godta en fugleklassifisering." + } + }, + "custom": { + "label": "Egendefinerte klassifiseringsmodeller", + "description": "Konfigurasjon for egendefinerte klassifiseringsmodeller brukt for objekter eller statusdeteksjon.", + "enabled": { + "label": "Aktiver modell", + "description": "Aktiver eller deaktiver den egendefinerte klassifiseringsmodellen." + }, + "name": { + "label": "Modellnavn", + "description": "Identifikator for den egendefinerte klassifiseringsmodellen som skal brukes." + }, + "threshold": { + "label": "Score-terskel", + "description": "Score-terskel brukt for å endre klassifiseringsstatus." + }, + "save_attempts": { + "label": "Lagre forsøk", + "description": "Hvor mange klassifiseringsforsøk som skal lagres for visning i grensesnittet." + }, + "object_config": { + "objects": { + "label": "Klassifiser objekter", + "description": "Liste over objekttyper det skal kjøres objektklassifisering på." + }, + "classification_type": { + "label": "Klassifiseringstype", + "description": "Klassifiseringstype som brukes: 'sub_label' (legger til underetikett) eller andre støttede typer." + } + }, + "state_config": { + "cameras": { + "label": "Kameraer for klassifisering", + "description": "Beskjæring og innstillinger per kamera for kjøring av statusklassifisering.", + "crop": { + "label": "Beskjæring for klassifisering", + "description": "Beskjæringskoordinater som skal brukes for klassifisering på dette kameraet." + } + }, + "motion": { + "label": "Kjør ved bevegelse", + "description": "Hvis sann, kjør klassifisering når bevegelse detekteres innenfor det spesifiserte området." + }, + "interval": { + "label": "Klassifiseringsintervall", + "description": "Intervall (sekunder) mellom periodiske kjøringer for statusklassifisering." + } + } + } + }, + "semantic_search": { + "label": "Semantisk søk", + "description": "Innstillinger for semantisk søk, som bygger og søker i objekt-vektorrepresentasjoner for å finne lignende elementer.", + "enabled": { + "label": "Aktiver semantisk søk", + "description": "Aktiver eller deaktiver funksjonen for semantisk søk." + }, + "reindex": { + "label": "Reindekser ved oppstart", + "description": "Utløs en fullstendig reindeksering av historiske sporede objekter i databasen for vektorrepresentasjoner." + }, + "model": { + "label": "Modell for semantisk søk eller GenAI-leverandør", + "description": "Modellen for vektorrepresentasjoner som skal brukes for semantisk søk (f.eks. 'jinav1'), eller navnet på en GenAI-leverandør." + }, + "model_size": { + "label": "Modellstørrelse", + "description": "Velg modellstørrelse; 'liten' kjører på CPU og 'stor' krever vanligvis GPU." + }, + "device": { + "label": "Enhet", + "description": "Dette er en overstyring for å målrette en spesifikk enhet. Se https://onnxruntime.ai/docs/execution-providers/ for mer informasjon" + }, + "triggers": { + "label": "Utløsere", + "description": "Handlinger og kriterier for kameraspesifikke utløsere for semantisk søk.", + "friendly_name": { + "label": "Visningsnavn", + "description": "Valgfritt visningsnavn for denne utløseren i grensesnittet." + }, + "enabled": { + "label": "Aktiver denne utløseren", + "description": "Aktiver eller deaktiver denne utløseren for semantisk søk." + }, + "type": { + "label": "Utløsertype", + "description": "Type utløser: 'miniatyrbilde' (match mot bilde) eller 'beskrivelse' (match mot tekst)." + }, + "data": { + "label": "Utløserinnhold", + "description": "Tekstfrase eller miniatyrbilde-ID som skal matches mot sporede objekter." + }, + "threshold": { + "label": "Utløser-terskel", + "description": "Minimum likhetsscore (0-1) som kreves for å aktivere denne utløseren." + }, + "actions": { + "label": "Utløserhandlinger", + "description": "Liste over handlinger som skal utføres når utløseren matches (varsling, underetikett, egenskap)." + } + } + }, + "face_recognition": { + "label": "Ansiktsgjenkjenning", + "description": "Innstillinger for ansiktsdeteksjon og gjenkjenning for alle kameraer; kan overstyres per kamera.", + "enabled": { + "label": "Aktiver ansiktsgjenkjenning", + "description": "Aktiver eller deaktiver ansiktsgjenkjenning for alle kameraer; kan overstyres per kamera." + }, + "model_size": { + "label": "Modellstørrelse", + "description": "Modellstørrelse for vektorrepresentasjoner for ansikts (liten/stor); 'stor' kan kreve GPU." + }, + "unknown_score": { + "label": "Terskel for ukjent person", + "description": "Avstandsterskel der et ansikt anses som en potensiell match (høyere = strengere)." + }, + "detection_threshold": { + "label": "Deteksjonsterskel", + "description": "Minimum konfidens som kreves for at en ansiktsdeteksjon skal anses som gyldig." + }, + "recognition_threshold": { + "label": "Gjenkjenningsterskel", + "description": "Terskel for avstand mellom vektorrepresentasjoner for ansikt for å anse to ansikter som like." + }, + "min_area": { + "label": "Minimum ansiktsareal", + "description": "Minimum areal (piksler) for en ansiktsboks før gjenkjenning forsøkes." + }, + "min_faces": { + "label": "Minimum antall ansikter", + "description": "Minimum antall gjenkjenninger som kreves før en underetikett tas i bruk." + }, + "save_attempts": { + "label": "Lagre forsøk", + "description": "Antall gjenkjenningsforsøk som skal lagres for visning i grensesnittet." + }, + "blur_confidence_filter": { + "label": "Filter for uskarphet", + "description": "Juster konfidensscore basert på uskarphet i bildet for å redusere falske positive." + }, + "device": { + "label": "Enhet", + "description": "Dette er en overstyring for å målrette en spesifikk enhet. Se https://onnxruntime.ai/docs/execution-providers/ for mer informasjon" + } + }, + "lpr": { + "label": "Gjenkjenning av kjennemerker", + "description": "Innstillinger for gjenkjenning av kjennemerker, inkludert deteksjonsterskler og kjente kjennemerkeer.", + "enabled": { + "label": "Aktiver skiltgjenkjenning", + "description": "Aktiver eller deaktiver gjenkjenning av kjennemerker for alle kameraer; kan overstyres per kamera." + }, + "model_size": { + "label": "Modellstørrelse", + "description": "Modellstørrelse brukt for tekstdeteksjon og gjenkjenning. De fleste brukere bør bruke 'liten'." + }, + "detection_threshold": { + "label": "Deteksjonsterskel", + "description": "Terskel for deteksjonskonfidens for å starte OCR på et antatt kjennemerke." + }, + "min_area": { + "label": "Minimum areal for kjennemerke", + "description": "Minimum areal (piksler) for et kjennemerke før gjenkjenning forsøkes." + }, + "recognition_threshold": { + "label": "Gjenkjenningsterskel", + "description": "Konfidensterskel som kreves for at gjenkjent tekst på kjennemerke skal legges til som underetikett." + }, + "min_plate_length": { + "label": "Minimum kjennemerkelengde", + "description": "Minimum antall tegn et gjenkjent kjennemerke må inneholde for å anses som gyldig." + }, + "format": { + "label": "Regex for kjennemerkeformat", + "description": "Valgfri regex for å validere gjenkjente kjennemerkestrenger mot et forventet format." + }, + "match_distance": { + "label": "Match-distanse", + "description": "Antall tegnfeil som tillates ved sammenligning av detekterte kjennemerkeer mot kjente kjennemerkeer." + }, + "known_plates": { + "label": "Kjente kjennemerkeer", + "description": "Liste over kjennemerkeer eller regexer som skal spores spesielt eller utløse varsel." + }, + "enhancement": { + "label": "Forbedringsnivå", + "description": "Forbedringsnivå (0-10) som brukes på kjennemerkebeskjæringer før OCR; høyere verdier forbedrer ikke alltid resultatet, nivåer over 5 fungerer ofte kun på nattbilder og bør brukes med forsiktighet." + }, + "debug_save_plates": { + "label": "Lagre feilsøkingsbilder", + "description": "Lagre beskjærte kjennemerkebilder for feilsøking av LPR-ytelse." + }, + "device": { + "label": "Enhet", + "description": "Dette er en overstyring for å målrette en spesifikk enhet. Se https://onnxruntime.ai/docs/execution-providers/ for mer informasjon" + }, + "replace_rules": { + "label": "Erstatningsregler", + "description": "Regex-erstatningsregler brukt for å normalisere detekterte kjennemerkestrenger før matching.", + "pattern": { + "label": "Regex-mønster" + }, + "replacement": { + "label": "Erstatningsstreng" + } + }, + "expire_time": { + "label": "Utløpstid (sekunder)", + "description": "Tid i sekunder før et ukjent kjennemerke foreldes fra sporingen (kun for dedikerte LPR-kameraer)." + } + }, + "camera_groups": { + "label": "Kameragrupper", + "description": "Konfigurasjon for navngitte kameragrupper brukt til å organisere kameraer i grensesnittet.", + "cameras": { + "label": "Kameraliste", + "description": "Liste over kameranavn som er inkludert i denne gruppen." + }, + "icon": { + "label": "Gruppeikon", + "description": "Ikon som representerer kameragruppen i grensesnittet." + }, + "order": { + "label": "Sorteringsrekkefølge", + "description": "Numerisk rekkefølge for sortering av kameragrupper i grensesnittet; høyere tall vises senere." + } + }, + "profiles": { + "label": "Profiler", + "description": "Navngitte profil-definisjoner. Kameraprofiler må referere til navn definert her.", + "friendly_name": { + "label": "Visningsnavn", + "description": "Visningsnavn for denne profilen i grensesnittet." + } + }, + "active_profile": { + "label": "Aktiv profil", + "description": "Navn på profil som er aktiv nå. Kun for kjøretid, lagres ikke i YAML." + }, + "camera_mqtt": { + "label": "MQTT", + "description": "Innstillinger for bilde-publisering via MQTT.", + "enabled": { + "label": "Send bilde", + "description": "Aktiver publisering av bilde-stillbilder for objekter til MQTT-emner for dette kameraet." + }, + "timestamp": { + "label": "Legg til tidsstempel", + "description": "Legg et tidsstempel over bilder som publiseres til MQTT." + }, + "bounding_box": { + "label": "Legg til markeringsramme", + "description": "Tegn markeringsrammer på bilder som publiseres over MQTT." + }, + "crop": { + "label": "Beskjær bilde", + "description": "Beskjær bilder publisert til MQTT til det detekterte objektets markeringsramme." + }, + "height": { + "label": "Bildehøyde", + "description": "Høyde (piksler) for bilder som publiseres over MQTT." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Soner et objekt må tre inn i for at et MQTT-bilde skal publiseres." + }, + "quality": { + "label": "JPEG-kvalitet", + "description": "JPEG-kvalitet for bilder publisert til MQTT (0-100)." + } + }, + "camera_ui": { + "label": "Brukergrensesnitt for kamera", + "description": "Sortering og synlighet for dette kameraet i grensesnittet. Sortering påvirker standard dashbord. For mer detaljert kontroll, bruk kameragrupper.", + "order": { + "label": "Rekkefølge i UI", + "description": "Numerisk rekkefølge for sortering av kameraet i grensesnittet; høyere tall vises senere." + }, + "dashboard": { + "label": "Vis i grensesnitt", + "description": "Velg om dette kameraet skal være synlig i Frigate-grensesnittet. Deaktivering krever manuell redigering av konfigurasjonen for å vise kameraet igjen." + } + }, + "onvif": { + "label": "ONVIF", + "description": "ONVIF-tilkobling og innstillinger for PTZ-autosporing for dette kameraet.", + "host": { + "label": "ONVIF-vert", + "description": "Vert (og valgfritt skjema) for ONVIF-tjenesten for dette kameraet." + }, + "port": { + "label": "ONVIF-port", + "description": "Portnummer for ONVIF-tjenesten." + }, + "user": { + "label": "ONVIF-brukernavn", + "description": "Brukernavn for ONVIF-autentisering; enkelte enheter krever admin-bruker for ONVIF." + }, + "password": { + "label": "ONVIF-passord", + "description": "Passord for ONVIF-autentisering." + }, + "tls_insecure": { + "label": "Deaktiver TLS-verifisering", + "description": "Hopp over TLS-verifisering og deaktiver digest-autentisering for ONVIF (usikre; bruk kun i trygge nettverk)." + }, + "profile": { + "label": "ONVIF-profil", + "description": "Spesifikk ONVIF-medieprofil for PTZ-kontroll. Hvis ikke satt, velges den første profilen med gyldig PTZ-konfigurasjon automatisk." + }, + "autotracking": { + "label": "Autosporing", + "description": "Spor bevegelige objekter automatisk og hold dem sentrert ved bruk av PTZ-bevegelser.", + "enabled": { + "label": "Aktiver autosporing", + "description": "Aktiver eller deaktiver automatisk PTZ-sporing av detekterte objekter." + }, + "calibrate_on_startup": { + "label": "Kalibrer ved start", + "description": "Mål PTZ-motorhastigheter ved oppstart for å forbedre sporingsnøyaktighet. Frigate vil oppdatere konfigurasjonen etter kalibrering." + }, + "zooming": { + "label": "Zoom-modus", + "description": "Kontroller zoom-oppførsel: deaktivert, absolutt (mest kompatibel) eller relativ." + }, + "zoom_factor": { + "label": "Zoom-faktor", + "description": "Kontrollere zoom-nivå på sporede objekter. Lavere verdier gir mer oversikt; høyere verdier zoomer tettere inn. Verdier mellom 0.1 og 0.75." + }, + "track": { + "label": "Sporede objekter", + "description": "Liste over objekttyper som skal utløse autosporing." + }, + "required_zones": { + "label": "Påkrevde soner", + "description": "Objekter må tre inn i en av disse sonene før autosporing starter." + }, + "return_preset": { + "label": "Forhåndsinnstilling for retur", + "description": "Navn på ONVIF-forhåndsinnstilling kameraet skal returnere til når sporingen avsluttes." + }, + "timeout": { + "label": "Tidsavbrudd for retur", + "description": "Antall sekunder å vente etter mistet sporing før kameraet returnerer til forhåndsinnstilt posisjon." + }, + "movement_weights": { + "label": "Bevegelsesvekting", + "description": "Kalibreringsverdier som genereres automatisk. Ikke endre manuelt." + }, + "enabled_in_config": { + "label": "Opprinnelig autosporingsstatus", + "description": "Internt felt for å spore om autosporing var aktivert i konfigurasjonen." + } + }, + "ignore_time_mismatch": { + "label": "Ignorer tidsavvik", + "description": "Ignorer forskjeller i tidssynkronisering mellom kamera og server ved ONVIF-kommunikasjon." + } + } +} diff --git a/web/public/locales/nb-NO/config/groups.json b/web/public/locales/nb-NO/config/groups.json index 0967ef424..254a34394 100644 --- a/web/public/locales/nb-NO/config/groups.json +++ b/web/public/locales/nb-NO/config/groups.json @@ -1 +1,73 @@ -{} +{ + "audio": { + "global": { + "detection": "Global deteksjon", + "sensitivity": "Global følsomhet" + }, + "cameras": { + "detection": "Deteksjon", + "sensitivity": "Følsomhet" + } + }, + "timestamp_style": { + "global": { + "appearance": "Globalt utseende" + }, + "cameras": { + "appearance": "Utseende" + } + }, + "motion": { + "global": { + "sensitivity": "Global følsomhet", + "algorithm": "Global algoritme" + }, + "cameras": { + "sensitivity": "Følsomhet", + "algorithm": "Algoritme" + } + }, + "snapshots": { + "global": { + "display": "Global visning" + }, + "cameras": { + "display": "Visning" + } + }, + "detect": { + "global": { + "resolution": "Global oppløsning", + "tracking": "Global sporing" + }, + "cameras": { + "resolution": "Oppløsning", + "tracking": "Sporing" + } + }, + "objects": { + "global": { + "tracking": "Global sporing", + "filtering": "Global filtrering" + }, + "cameras": { + "tracking": "Sporing", + "filtering": "Filtrering" + } + }, + "record": { + "global": { + "retention": "Global opptaksbevaring", + "events": "Globale hendelser" + }, + "cameras": { + "retention": "Opptaksbevaring", + "events": "Hendelser" + } + }, + "ffmpeg": { + "cameras": { + "cameraFfmpeg": "Kamera-spesifikke FFmpeg argumenter" + } + } +} diff --git a/web/public/locales/nb-NO/config/validation.json b/web/public/locales/nb-NO/config/validation.json index 0967ef424..e9e34a202 100644 --- a/web/public/locales/nb-NO/config/validation.json +++ b/web/public/locales/nb-NO/config/validation.json @@ -1 +1,32 @@ -{} +{ + "minimum": "Må være minst {{limit}}", + "maximum": "Må være maksimalt {{limit}}", + "exclusiveMinimum": "Må være større enn {{limit}}", + "exclusiveMaximum": "Må være mindre enn {{limit}}", + "minItems": "Må ha minst {{limit}} elementer", + "maxItems": "Må ha maksimalt {{limit}} elementer", + "pattern": "Ugyldig format", + "required": "Dette feltet er obligatorisk", + "type": "Ugyldig verditype", + "enum": "Må være en av de tillatte verdiene", + "const": "Verdien samsvarer ikke med forventet konstant", + "uniqueItems": "Alle elementer må være unike", + "format": "Ugyldig format", + "additionalProperties": "Ukjent egenskap er ikke tillatt", + "oneOf": "Må samsvare med nøyaktig ett av de tillatte skjemaene", + "anyOf": "Må samsvare med minst ett av de tillatte skjemaene", + "proxy": { + "header_map": { + "roleHeaderRequired": "Rollehode (header) er påkrevd når rolletilordninger er konfigurert." + } + }, + "ffmpeg": { + "inputs": { + "rolesUnique": "Hver rolle kan bare tildeles én inngangsstrøm.", + "detectRequired": "Minst èn inngangsstrøm må være tildelt rollen 'deteksjon'.", + "hwaccelDetectOnly": "Bare inngangsstrømmen med rollen 'deteksjon' kan definere argumenter for maskinvareakselerasjon." + } + }, + "minLength": "Må være minst {{limit}} tegn", + "maxLength": "Må være maks {{limit}} tegn" +} diff --git a/web/public/locales/nb-NO/objects.json b/web/public/locales/nb-NO/objects.json index 5c7c5edd2..eb4b3ee36 100644 --- a/web/public/locales/nb-NO/objects.json +++ b/web/public/locales/nb-NO/objects.json @@ -112,9 +112,14 @@ "fedex": "FedEx", "dhl": "DHL", "an_post": "An Post", - "purolator": "Filter", + "purolator": "Purolator", "postnl": "PostNL", "nzpost": "NZPost", "postnord": "PostNord", - "dpd": "DPD" + "dpd": "DPD", + "kangaroo": "Kenguru", + "skunk": "Skunk", + "school_bus": "Skolebuss", + "royal_mail": "Royal Mail", + "canada_post": "Canada Post" } diff --git a/web/public/locales/nb-NO/views/classificationModel.json b/web/public/locales/nb-NO/views/classificationModel.json index e7ee73f08..2a5770877 100644 --- a/web/public/locales/nb-NO/views/classificationModel.json +++ b/web/public/locales/nb-NO/views/classificationModel.json @@ -12,15 +12,18 @@ }, "toast": { "success": { - "deletedCategory": "Klasse slettet", - "deletedImage": "Bilder slettet", + "deletedCategory_one": "Slettet {{count}} klasse", + "deletedCategory_other": "Slettet {{count}} klasser", + "deletedImage_one": "Slettet {{count}} bilde", + "deletedImage_other": "Slettet {{count}} bilder", "categorizedImage": "Klassifiserte bildet", "trainedModel": "Modellen ble trent.", "trainingModel": "Modelltrening startet.", "deletedModel_one": "{{count}} modell ble slettet", "deletedModel_other": "{{count}} modeller ble slettet", "updatedModel": "Modellkonfigurasjonen ble oppdatert", - "renamedCategory": "Klassen ble omdøpt til {{name}}" + "renamedCategory": "Klassen ble omdøpt til {{name}}", + "reclassifiedImage": "Bildet ble reklassifisert" }, "error": { "deleteImageFailed": "Kunne ikke slette: {{errorMessage}}", @@ -30,7 +33,8 @@ "deleteModelFailed": "Kunne ikke slette modell: {{errorMessage}}", "trainingFailedToStart": "Kunne ikke starte modelltrening: {{errorMessage}}", "updateModelFailed": "Kunne ikke oppdatere modell: {{errorMessage}}", - "renameCategoryFailed": "Kunne ikke omdøpe klasse: {{errorMessage}}" + "renameCategoryFailed": "Kunne ikke omdøpe klasse: {{errorMessage}}", + "reclassifyFailed": "Kunne ikke reklassifisere bilde: {{errorMessage}}" } }, "deleteCategory": { @@ -150,8 +154,13 @@ "allImagesRequired_other": "Vennligst klassifiser alle bildene. {{count}} bilder gjenstår.", "modelCreated": "Modellen ble opprettet. Bruk visningen Nylige klassifiseringer for å legge til bilder for manglende tilstander, og tren deretter modellen.", "missingStatesWarning": { - "title": "Manglende tilstandseksempler", - "description": "Det anbefales å velge eksempler for alle tilstander for å oppnå best mulig resultat. Du kan fortsette uten å velge alle tilstander, men modellen vil ikke bli trent før alle tilstander har bilder. Etter at du har gått videre, bruk visningen Nylige klassifiseringer for å klassifisere bilder for de manglende tilstandene, og tren deretter modellen." + "title": "Manglende klasseeksempler", + "description": "Ikke alle klasser har eksempler. Prøv å generere nye eksempler for å finne den manglende klassen, eller fortsett å bruke visningen 'Siste klassifiseringer' for å legge til bilder senere." + }, + "refreshExamples": "Generer nye eksempler", + "refreshConfirm": { + "title": "Generere nye eksempler?", + "description": "Dette vil generere et nytt sett med bilder og tilbakestille alle valg, inkludert tidligere klasser. Du må velge eksempler på nytt for alle klasser." } } }, @@ -181,5 +190,7 @@ "descriptionObject": "Rediger objekttypen og klassifiseringstypen for denne objektklassifiseringsmodellen.", "stateClassesInfo": "Merk: Endring av tilstandsklasser krever at modellen trenes på nytt med de oppdaterte klassene." }, - "none": "Ingen" + "none": "Ingen", + "reclassifyImageAs": "Reklassifiser bilde som:", + "reclassifyImage": "Reklassifiser bilde" } diff --git a/web/public/locales/nb-NO/views/configEditor.json b/web/public/locales/nb-NO/views/configEditor.json index c0c9253fa..df0cd00a9 100644 --- a/web/public/locales/nb-NO/views/configEditor.json +++ b/web/public/locales/nb-NO/views/configEditor.json @@ -1,5 +1,5 @@ { - "documentTitle": "Konfigurasjonsredigering - Frigate", + "documentTitle": "Konfigurasjonseditor - Frigate", "toast": { "error": { "savingError": "Feil ved lagring av konfigurasjon" @@ -8,11 +8,11 @@ "copyToClipboard": "Konfigurasjonen ble kopiert til utklippstavlen." } }, - "configEditor": "Konfigurasjonsredigering", + "configEditor": "Konfig-editor", "copyConfig": "Kopier konfigurasjonen", "saveAndRestart": "Lagre og omstart", "saveOnly": "Kun lagre", "confirm": "Avslutt uten å lagre?", - "safeConfigEditor": "Konfigurasjonsredigering (Sikker modus)", + "safeConfigEditor": "Konfig-editor (Sikker modus)", "safeModeDescription": "Frigate er i sikker modus grunnet en feil i validering av konfigurasjonen." } diff --git a/web/public/locales/nb-NO/views/events.json b/web/public/locales/nb-NO/views/events.json index 5e77f38ed..d1c3b02de 100644 --- a/web/public/locales/nb-NO/views/events.json +++ b/web/public/locales/nb-NO/views/events.json @@ -9,7 +9,9 @@ "description": "Inspeksjonselementer kan kun opprettes for et kamera når opptak er aktivert for det kameraet." } }, - "timeline": "Tidslinje", + "timeline": { + "label": "Tidslinje" + }, "events": { "label": "Hendelser", "aria": "Velg hendelser", @@ -63,5 +65,28 @@ "normalActivity": "Normal", "needsReview": "Trenger inspeksjon", "securityConcern": "Sikkerhetsrisiko", - "select_all": "Alle" + "select_all": "Alle", + "motionSearch": { + "menuItem": "Bevegelsessøk", + "openMenu": "Kameravalg" + }, + "motionPreviews": { + "menuItem": "Vis forhåndsvisning av bevegelse", + "title": "Bevegelsesvisning: {{camera}}", + "mobileSettingsTitle": "Innstillinger for forhåndsvisning", + "mobileSettingsDesc": "Juster avspillingshastighet og dimming, og velg en dato for å inspisere klipp med kun bevegelse.", + "dim": "Dimming", + "dimAria": "Juster dimmestyrke", + "dimDesc": "Øk dimming for å gjøre bevegelsesområder tydeligere.", + "speed": "Hastighet", + "speedAria": "Velg avspillingshastighet", + "speedDesc": "Velg hvor raskt klippene skal spilles av.", + "back": "Tilbake", + "empty": "Ingen forhåndsvisninger tilgjengelig", + "noPreview": "Forhåndsvisning utilgjengelig", + "seekAria": "Flytt {{camera}}-avspilleren til {{time}}", + "filter": "Filter", + "filterDesc": "Velg områder for å kun vise klipp med bevegelse i disse sonene.", + "filterClear": "Tøm" + } } diff --git a/web/public/locales/nb-NO/views/explore.json b/web/public/locales/nb-NO/views/explore.json index a9fe5230a..6aac95d76 100644 --- a/web/public/locales/nb-NO/views/explore.json +++ b/web/public/locales/nb-NO/views/explore.json @@ -16,8 +16,8 @@ }, "downloadingModels": { "setup": { - "visionModel": "Visjonsmodell", - "visionModelFeatureExtractor": "Funksjonsekstraktor for visjonsmodell", + "visionModel": "Modell for bildegjenkjenning", + "visionModelFeatureExtractor": "Kjennetegnsuttrekker for bildegjenkjenning", "textModel": "Tekstmodell", "textTokenizer": "Tekst-tokeniserer" }, @@ -161,7 +161,8 @@ "attributes": "Klassifiseringsattributter", "title": { "label": "Tittel" - } + }, + "scoreInfo": "Score-informasjon" }, "itemMenu": { "viewInHistory": { @@ -212,6 +213,13 @@ "downloadCleanSnapshot": { "label": "Last ned rent stillbilde", "aria": "Last ned stillbilde uten markeringer" + }, + "debugReplay": { + "aria": "Vis dette sporede objektet i reprise for feilsøking", + "label": "Reprise for feilsøking" + }, + "more": { + "aria": "Mer" } }, "searchResult": { @@ -238,6 +246,9 @@ "confirmDelete": { "title": "Bekreft sletting", "desc": "Sletting av dette sporede objektet fjerner stillbildet, alle lagrede vektorrepresentasjoner og tilknyttede oppføringer for sporingsdetaljer. Opptak av dette objektet i Historikk-visningen vil IKKE bli slettet.

Er du sikker på at du vil fortsette?" + }, + "toast": { + "error": "Kunne ikke slette dette sporede objektet: {{errorMessage}}" } }, "noTrackedObjects": "Fant ingen sporede objekter", @@ -257,7 +268,7 @@ "createObjectMask": "Opprett objektmaske", "adjustAnnotationSettings": "Juster annoteringsinnstillinger", "scrollViewTips": "Klikk for å se de viktige øyeblikkene i dette objektets livssyklus.", - "autoTrackingTips": "Posisjonene til avgrensningsboksene vil være unøyaktige for kameraer med automatisk sporing.", + "autoTrackingTips": "Posisjonene til markeringsrammer vil være unøyaktige for kameraer med automatisk sporing.", "count": "{{first}} av {{second}}", "trackedPoint": "Sporet punkt", "lifecycleItemDesc": { @@ -287,9 +298,9 @@ }, "offset": { "label": "Annoteringsforskyvning", - "desc": "Disse dataene kommer fra kameraets deteksjonsstrøm, men legges over bilder fra opptaksstrømmen. Det er lite sannsynlig at de to strømmene er perfekt synkronisert. Som et resultat vil avgrensningsboksen og opptaket ikke stemme perfekt overens. Du kan bruke denne innstillingen til å forskyve annoteringene fremover eller bakover i tid for å tilpasse dem bedre til det innspilte opptaket.", + "desc": "Disse dataene kommer fra kameraets deteksjonsstrøm, men legges over bilder fra opptaksstrømmen. Det er lite sannsynlig at de to strømmene er perfekt synkronisert. Som et resultat vil markeringsrammen og opptaket ikke stemme perfekt overens. Du kan bruke denne innstillingen til å forskyve annoteringene fremover eller bakover i tid for å tilpasse dem bedre til det innspilte opptaket.", "millisecondsToOffset": "Antall millisekunder deteksjonsannoteringene skal forskyves med. Standard: 0", - "tips": "TIPS: Se for deg et hendelsesklipp med en person som går fra venstre mot høyre. Hvis avgrensningsboksen på tidslinjen for hendelsen konsekvent er til venstre for personen, bør verdien reduseres. På samme måte, hvis en person går fra venstre mot høyre og avgrensningsboksen konsekvent er foran personen, bør verdien økes.", + "tips": "Senk verdien hvis videoen ligger foran boksene og punktene på stien, og øk den hvis videoen ligger bak. Verdien kan være negativ.", "toast": { "success": "Annoteringsforskyvning for {{camera}} er lagret i konfigurasjonsfilen." } diff --git a/web/public/locales/nb-NO/views/exports.json b/web/public/locales/nb-NO/views/exports.json index 4ced2fcdc..481750f5c 100644 --- a/web/public/locales/nb-NO/views/exports.json +++ b/web/public/locales/nb-NO/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "Eksport - Frigate", "search": "Søk", "noExports": "Ingen eksporter funnet", - "deleteExport": "Slett eksport", + "deleteExport": { + "label": "Slett eksport" + }, "deleteExport.desc": "Er du sikker på at du vil slette {{exportName}}?", "editExport": { "title": "Gi nytt navn til eksport", @@ -11,13 +13,27 @@ }, "toast": { "error": { - "renameExportFailed": "Kunne ikke gi nytt navn til eksport: {{errorMessage}}" + "renameExportFailed": "Kunne ikke gi nytt navn til eksport: {{errorMessage}}", + "assignCaseFailed": "Kunne ikke oppdatere sakstilknytning: {{errorMessage}}" } }, "tooltip": { "shareExport": "Del eksport", "downloadVideo": "Last ned video", "editName": "Rediger navn", - "deleteExport": "Slett eksport" + "deleteExport": "Slett eksport", + "assignToCase": "Legg til i sak" + }, + "caseDialog": { + "nameLabel": "Saksnavn", + "descriptionLabel": "Beskrivelse", + "newCaseOption": "Opprett en ny sak", + "selectLabel": "Sak", + "description": "Velg en eksisterende sak eller opprett en ny.", + "title": "Legg til sak" + }, + "headings": { + "cases": "Saker", + "uncategorizedExports": "Eksporter uten sak" } } diff --git a/web/public/locales/nb-NO/views/faceLibrary.json b/web/public/locales/nb-NO/views/faceLibrary.json index 89cc60aa1..cf8d81e39 100644 --- a/web/public/locales/nb-NO/views/faceLibrary.json +++ b/web/public/locales/nb-NO/views/faceLibrary.json @@ -43,7 +43,8 @@ "updateFaceScoreFailed": "Kunne ikke oppdatere ansiktsscore: {{errorMessage}}", "addFaceLibraryFailed": "Kunne ikke angi ansiktsnavn: {{errorMessage}}", "deleteNameFailed": "Kunne ikke slette navn: {{errorMessage}}", - "renameFaceFailed": "Kunne ikke gi nytt navn til ansikt: {{errorMessage}}" + "renameFaceFailed": "Kunne ikke gi nytt navn til ansikt: {{errorMessage}}", + "reclassifyFailed": "Kunne ikke reklassifisere ansikt: {{errorMessage}}" }, "success": { "deletedFace_one": "Slettet {{count}} ansikt.", @@ -54,7 +55,8 @@ "updatedFaceScore": "Oppdaterte ansiktsscore for {{name}} ({{score}}).", "uploadedImage": "Bildet ble lastet opp.", "addFaceLibrary": "{{name}} ble lagt til i ansiktsbiblioteket!", - "renamedFace": "Nytt navn ble gitt til ansikt {{name}}" + "renamedFace": "Nytt navn ble gitt til ansikt {{name}}", + "reclassifiedFace": "Ansikt ble reklassifisert." } }, "imageEntry": { @@ -98,5 +100,7 @@ "desc_other": "Er du sikker på at du vil slette {{count}} ansikter? Denne handlingen kan ikke angres." }, "nofaces": "Ingen ansikter tilgjengelig", - "pixels": "{{area}}piksler" + "pixels": "{{area}}piksler", + "reclassifyFaceAs": "Reklassifiser ansikt som:", + "reclassifyFace": "Reklassifiser ansikt" } diff --git a/web/public/locales/nb-NO/views/live.json b/web/public/locales/nb-NO/views/live.json index d2a87af31..be891769e 100644 --- a/web/public/locales/nb-NO/views/live.json +++ b/web/public/locales/nb-NO/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Direkte - Frigate", + "documentTitle": { + "default": "Direkte - Frigate" + }, "lowBandwidthMode": "Lav båndbreddemodus", "documentTitle.withCamera": "{{camera}} - Direkte - Frigate", "ptz": { @@ -7,7 +9,8 @@ "clickMove": { "label": "Klikk i rammen for å sentrere kameraet", "enable": "Aktiver klikk for å flytte", - "disable": "Deaktiver klikk for å flytte" + "disable": "Deaktiver klikk for å flytte", + "enableWithZoom": "Aktiver \"klikk for å flytte\" / \"dra for å zoome\"" }, "left": { "label": "Flytt PTZ-kameraet til venstre" diff --git a/web/public/locales/nb-NO/views/settings.json b/web/public/locales/nb-NO/views/settings.json index de3094649..3b0f3b4f0 100644 --- a/web/public/locales/nb-NO/views/settings.json +++ b/web/public/locales/nb-NO/views/settings.json @@ -12,7 +12,11 @@ "notifications": "Innstillinger for meldingsvarsler - Frigate", "enrichments": "Innstillinger for utvidelser - Frigate", "cameraManagement": "Administrer kameraer - Frigate", - "cameraReview": "Innstillinger for kamerainspeksjon - Frigate" + "cameraReview": "Innstillinger for kamerainspeksjon - Frigate", + "globalConfig": "Global konfigurasjon - Frigate", + "cameraConfig": "Kamerakonfigurasjon - Frigate", + "profiles": "Profiler - Frigate", + "maintenance": "Vedlikehold - Frigate" }, "menu": { "classification": "Klassifisering", @@ -28,7 +32,66 @@ "triggers": "Utløsere", "cameraManagement": "Administrasjon", "cameraReview": "Inspeksjon", - "roles": "Roller" + "roles": "Roller", + "profiles": "Profiler", + "cameraFaceRecognition": "Ansiktsgjenkjenning", + "integrationFaceRecognition": "Ansiktsgjenkjenning", + "systemAuthentication": "Autentisering", + "cameraMotion": "Bevegelsesdeteksjon", + "globalMotion": "Bevegelsesdeteksjon", + "systemUi": "Brukergrensesnitt", + "cameraUi": "Brukergrensesnitt for kamera", + "systemDatabase": "Database", + "systemDetectionModel": "Deteksjonsmodell", + "cameraLivePlayback": "Direkteavspilling", + "globalLivePlayback": "Direkteavspilling", + "cameraFfmpeg": "FFmpeg", + "globalFfmpeg": "FFmpeg", + "systemFfmpeg": "FFmpeg", + "cameraBirdseye": "Fugleperspektiv", + "systemBirdseye": "Fugleperspektiv", + "integrationGenerativeAi": "Generativ AI", + "general": "Generelt", + "globalConfig": "Global konfigurasjon", + "systemGo2rtcStreams": "go2rtc-strømmer", + "uiSettings": "Innstillinger for brukergrensesnitt", + "cameraConfigReview": "Inspeksjon", + "globalReview": "Inspeksjon", + "integrations": "Integrasjoner", + "cameraLpr": "Kjennemerke-gjenkjenning", + "integrationLpr": "Kjennemerke-gjenkjenning", + "systemLogging": "Logging", + "cameraAudioEvents": "Lydhendelser", + "globalAudioEvents": "Lydhendelser", + "cameraAudioTranscription": "Lydtranskripsjon", + "integrationAudioTranscription": "Lydtranskripsjon", + "systemDetectorHardware": "Maskinvare for detektor", + "mediaSync": "Mediesynkronisering", + "cameraNotifications": "Meldingsvarsler", + "systemEnvironmentVariables": "Miljøvariabler", + "cameraMqttConfig": "MQTT", + "systemMqtt": "MQTT", + "cameraMqtt": "MQTT for kamera", + "systemNetworking": "Nettverk", + "cameraDetect": "Objektdeteksjon", + "globalDetect": "Objektdeteksjon", + "cameraObjects": "Objekter", + "globalObjects": "Objekter", + "integrationObjectClassification": "Objektklassifisering", + "cameraOnvif": "ONVIF", + "cameraRecording": "Opptak", + "globalRecording": "Opptak", + "systemProxy": "Proxy", + "regionGrid": "Regionrutenett", + "integrationSemanticSearch": "Semantisk søk", + "cameraSnapshots": "Stillbilder", + "globalSnapshots": "Stillbilde", + "cameraTimestampStyle": "Stil for tidsstempel", + "globalTimestampStyle": "Stil for tidsstempel", + "system": "System", + "systemTelemetry": "Telemetri", + "systemTls": "TLS", + "maintenance": "Vedlikehold" }, "dialog": { "unsavedChanges": { @@ -282,6 +345,10 @@ "zone": "sone", "motion_mask": "bevegelsesmaske", "object_mask": "objektmaske" + }, + "revertOverride": { + "title": "Tilbakestill til basiskonfigurasjon", + "desc": "Dette vil fjerne profiloverstyringen for {{type}} {{name}} og tilbakestille til basiskonfigurasjonen." } }, "inertia": { @@ -298,6 +365,17 @@ "error": { "mustBeGreaterOrEqualTo": "Terskelverdi for hastighet må være større enn eller lik 0.1." } + }, + "id": { + "error": { + "alreadyExists": "En maske med denne ID-en eksisterer allerede for dette kameraet.", + "mustNotBeEmpty": "ID kan ikke være tom." + } + }, + "name": { + "error": { + "mustNotBeEmpty": "Navn kan ikke være tomt." + } } }, "zones": { @@ -351,6 +429,10 @@ }, "toast": { "success": "Sone ({{zoneName}}) er lagret." + }, + "enabled": { + "title": "Aktivert", + "description": "Om denne sonen er aktiv og aktivert i konfigurasjonsfilen. Hvis deaktivert, kan den ikke aktiveres via MQTT. Deaktiverte soner ignoreres ved kjøring." } }, "motionMasks": { @@ -379,6 +461,12 @@ "title": "{{polygonName}} er lagret.", "noName": "Bevegelsesmasken er lagret." } + }, + "defaultName": "Bevegelsesmaske {{number}}", + "name": { + "description": "Et valgfritt visningsnavn for denne bevegelsesmasken.", + "title": "Navn", + "placeholder": "Skriv inn et navn..." } }, "objectMasks": { @@ -404,11 +492,26 @@ "title": "{{polygonName}} er lagret.", "noName": "Objektmasken er lagret." } + }, + "name": { + "description": "Et valgfritt visningsnavn for denne objektmasken.", + "title": "Navn", + "placeholder": "Skriv inn et navn..." } }, "restart_required": "Omstart påkrevd (masker/soner endret)", "motionMaskLabel": "Bevegelsesmaske {{number}}", - "objectMaskLabel": "Objektmaske {{number}} ({{label}})" + "objectMaskLabel": "Objektmaske {{number}}", + "profileBase": "(basis)", + "profileOverride": "(overstyring)", + "masks": { + "enabled": { + "title": "Aktivert", + "description": "Om denne masken er aktivert i konfigurasjonsfilen. Hvis deaktivert, kan den ikke aktiveres via MQTT. Deaktiverte masker ignoreres ved kjøring." + } + }, + "disabledInConfig": "Elementet er deaktivert i konfigurasjonsfilen", + "addDisabledProfile": "Legg til i basiskonfigurasjonen først, og overstyr deretter i profilen" }, "motionDetectionTuner": { "title": "Finjustering av bevegelsesdeteksjon", @@ -438,10 +541,10 @@ "objectList": "Objektliste", "noObjects": "Ingen objekter", "boundingBoxes": { - "title": "Avgrensningsbokser", - "desc": "Vis omsluttende bokser rundt sporede objekter", + "title": "Markeringsrammer", + "desc": "Vis markeringsrammer rundt sporede objekter", "colors": { - "label": "Farge på omsluttende bokser for objekt", + "label": "Farge på markeringsrammer for objekt", "info": "
  • Ved oppstart vil forskjellige farger bli tildelt hver objekttype
  • En mørkeblå tynn linje indikerer at objektet ikke er detektert på dette tidspunktet
  • En grå tynn linje indikerer at objektet er detektert som stasjonært
  • En tykk linje indikerer at objektet er under autosporing (når aktivert)
  • " } }, @@ -700,14 +803,21 @@ "snapshots": "Stillbilder", "cleanCopySnapshots": "clean_copy-stillbilder" }, - "cleanCopyWarning": "Noen kameraer har stillbilder aktivert, men ren kopi er deaktivert. Du må aktivere clean_copy i stillbilde-konfigurasjonen for å kunne sende bilder fra disse kameraene til Frigate+." + "cleanCopyWarning": "Noen kameraer har stillbilder deaktivert" }, "toast": { "success": "Frigate+ innstillingene er lagret. Start Frigate på nytt for å aktivere endringene.", "error": "Kunne ikke lagre konfigurasjonsendringer: {{errorMessage}}" }, "restart_required": "Omstart påkrevd (Frigate+ modell endret)", - "unsavedChanges": "Ulagrede endringer for Frigate+ innstillinger" + "unsavedChanges": "Ulagrede endringer for Frigate+ innstillinger", + "cardTitles": { + "otherModels": "Andre modeller", + "api": "API", + "currentModel": "Gjeldende modell", + "configuration": "Konfigurasjon" + }, + "description": "Frigate+ er en abonnementstjeneste som gir tilgang til tilleggsfunksjoner og kapasiteter for Frigate-instansen din, inkludert muligheten til å bruke egendefinerte objektdeteksjonsmodeller trent på dine egne data. Du kan administrere innstillingene for Frigate+-modellen din her." }, "enrichments": { "title": "Innstillinger for utvidelser", @@ -1074,7 +1184,7 @@ }, "streamDetails": "Strømdetaljer", "probing": "Test kamera...", - "retry": "Prøv på nytt", + "retry": "Prøv igjen", "testing": { "probingMetadata": "Sjekker metadata for kamera...", "fetchingSnapshot": "Henter stillbilde fra kamera..." @@ -1243,7 +1353,12 @@ "backToSettings": "Tilbake til kamerainnstillinger", "streams": { "title": "Aktiver / Deaktiver kameraer", - "desc": "Midlertidig deaktiver et kamera til Frigate startes på nytt. Deaktivering av et kamera stopper Frigates behandling av dette kameraets strømmer fullstendig. Deteksjon, opptak og feilsøking vil være utilgjengelig.
    Merk: Dette deaktiverer ikke go2rtc-restrømming." + "desc": "Midlertidig deaktiver et kamera til Frigate startes på nytt. Deaktivering av et kamera stopper Frigates behandling av dette kameraets strømmer fullstendig. Deteksjon, opptak og feilsøking vil være utilgjengelig.
    Merk: Dette deaktiverer ikke go2rtc-restrømming.", + "disableDesc": "Aktiver et kamera som for øyeblikket ikke er synlig i grensesnittet og deaktivert i konfigurasjonen. En omstart av Frigate kreves etter aktivering.", + "enableSuccess": "Aktiverte {{cameraName}} i konfigurasjonen. Start Frigate på nytt for å ta i bruk endringene.", + "enableLabel": "Aktiverte kameraer", + "enableDesc": "Deaktiver et aktivert kamera midlertidig frem til Frigate starter på nytt. Deaktivering av et kamera stopper all prosessering av kameraets strømmer. Deteksjon, opptak og feilsøking vil være utilgjengelig.
    Merk: Dette deaktiverer ikke videreformidling (restream) i go2rtc.", + "disableLabel": "Deaktiverte kameraer" }, "cameraConfig": { "add": "Legg til kamera", @@ -1273,7 +1388,27 @@ "toast": { "success": "Kamera {{cameraName}} ble lagret" } - } + }, + "profiles": { + "enabled": "Aktivert", + "inherit": "Arv", + "disabled": "Deaktivert", + "description": "Konfigurer hvilke kameraer som er aktivert eller deaktivert når en profil aktiveres. Kameraer satt til \"Arv\" beholder sin opprinnelige status.", + "title": "Profiloverstyringer for kamera", + "selectLabel": "Velg profil" + }, + "deleteCameraDialog": { + "confirmTitle": "Er du sikker?", + "success": "Kamera {{cameraName}} ble slettet", + "error": "Kunne ikke slette kamera {{cameraName}}", + "title": "Slett kamera", + "deleteExports": "Slett også eksporterte filer for dette kameraet", + "confirmButton": "Slett permanent", + "confirmWarning": "Sletting av {{cameraName}} kan ikke angres.", + "description": "Sletting av et kamera vil fjerne alle opptak, sporede objekter og konfigurasjon for dette kameraet permanent. Eventuelle go2rtc-strømmer tilknyttet kameraet må eventuelt fjernes manuelt.", + "selectPlaceholder": "Velg kamera..." + }, + "deleteCamera": "Slett kamera" }, "cameraReview": { "title": "Innstillinger for kamerainspeksjon", @@ -1311,5 +1446,453 @@ "success": "Konfigurasjonen for inspeksjonsklassifisering er lagret. Start Frigate på nytt for å aktivere endringer." } } + }, + "configForm": { + "reviewLabels": { + "summary": "{{count}} etiketter valgt", + "empty": "Ingen etiketter tilgjengelig" + }, + "audioLabels": { + "summary": "{{count}} lydetiketter valgt", + "empty": "Ingen lydetiketter tilgjengelig" + }, + "objectLabels": { + "summary": "{{count}} objekttyper valgt", + "empty": "Ingen objektetiketter tilgjengelig" + }, + "inputRoles": { + "summary": "{{count}} roller valgt", + "options": { + "detect": "Deteksjon", + "audio": "Lyd", + "record": "Opptak" + }, + "empty": "Ingen roller tilgjengelig" + }, + "zoneNames": { + "summary": "{{count}} valgt", + "empty": "Ingen soner tilgjengelig" + }, + "filters": { + "objectFieldLabel": "{{field}} for {{label}}" + }, + "sections": { + "face_recognition": "Ansiktsgjenkjenning", + "auth": "Autentisering", + "motion": "Bevegelse", + "database": "Database", + "detect": "Deteksjon", + "detectors": "Detektorer", + "live": "Direktevisning", + "ffmpeg": "FFmpeg", + "birdseye": "Fugleperspektiv", + "genai": "GenAI", + "go2rtc": "go2rtc", + "review": "Inspeksjon", + "lpr": "Kjennemerke-gjenkjenning", + "audio": "Lyd", + "masksAndZones": "Masker / Soner", + "notifications": "Meldingsvarsler", + "model": "Modell", + "mqtt": "MQTT", + "objects": "Objekter", + "record": "Opptak", + "proxy": "Proxy", + "semantic_search": "Semantisk søk", + "snapshots": "Stillbilder", + "telemetry": "Telemetri", + "timestamp_style": "Tidsstempler", + "tls": "TLS" + }, + "ffmpegArgs": { + "useGlobalSetting": "Arv fra global innstilling", + "inherit": "Arv fra kamerainnstilling", + "preset": "Forhåndsinnstilling", + "presetLabels": { + "preset-http-reolink": "HTTP - Reolink-kameraer", + "preset-http-jpeg-generic": "HTTP JPEG (Generisk)", + "preset-http-mjpeg-generic": "HTTP MJPEG (Generisk)", + "preset-intel-qsv-h264": "Intel QuickSync (H.264)", + "preset-intel-qsv-h265": "Intel QuickSync (H.265)", + "preset-nvidia": "NVIDIA GPU", + "preset-jetson-h264": "NVIDIA Jetson (H.264)", + "preset-jetson-h265": "NVIDIA Jetson (H.265)", + "preset-record-jpeg": "Opptak - JPEG-kameraer", + "preset-record-mjpeg": "Opptak - MJPEG-kameraer", + "preset-record-ubiquiti": "Opptak - Ubiquiti-kameraer", + "preset-record-generic-audio-copy": "Opptak (Generisk + kopier lyd)", + "preset-record-generic-audio-aac": "Opptak (Generisk + lyd til AAC)", + "preset-record-generic": "Opptak (Generisk, uten lyd)", + "preset-rpi-64-h264": "Raspberry Pi (H.264)", + "preset-rpi-64-h265": "Raspberry Pi (H.265)", + "preset-rkmpp": "Rockchip RKMPP", + "preset-rtmp-generic": "RTMP (Generisk)", + "preset-rtsp-blue-iris": "RTSP - Blue Iris", + "preset-rtsp-udp": "RTSP - UDP", + "preset-rtsp-restream": "RTSP - Videreformidling fra go2rtc", + "preset-rtsp-restream-low-latency": "RTSP - Videreformidling fra go2rtc (lav forsinkelse)", + "preset-rtsp-generic": "RTSP (Generisk)", + "preset-vaapi": "VAAPI (Intel/AMD GPU)" + }, + "none": "Ingen", + "manual": "Manuelle argumenter", + "manualPlaceholder": "Skriv inn FFmpeg-argumenter", + "selectPreset": "Velg forhåndsinnstilling" + }, + "advancedCount": "Avansert ({{count}})", + "advancedSettingsCount": "Avanserte innstillinger ({{count}})", + "timezone": { + "defaultOption": "Bruk nettleserens tidssone" + }, + "tabs": { + "sharedDefaults": "Delte standardverdier", + "integrations": "Integrasjoner", + "system": "System" + }, + "detectors": { + "keyDuplicate": "Detektornavn eksisterer allerede.", + "keyRequired": "Detektornavn er påkrevd.", + "none": "Ingen detektor-instanser er konfigurert.", + "noSchema": "Ingen detektorskjemaer er tilgjengelige.", + "title": "Innstillinger for detektorer", + "singleType": "Kun én {{type}}-detektor er tillatt.", + "add": "Legg til detektor", + "addCustomKey": "Legg til egendefinert nøkkel" + }, + "global": { + "description": "Disse innstillingene gjelder for alle kameraer med mindre de overstyres i kameraspesifikke innstillinger.", + "title": "Globale innstillinger" + }, + "camera": { + "description": "Disse innstillingene gjelder kun for dette kameraet og overstyrer de globale innstillingene.", + "noCameras": "Ingen kameraer tilgjengelig", + "title": "Kamerainnstillinger" + }, + "genaiRoles": { + "options": { + "embeddings": "Vektorrepresentasjoner", + "tools": "Verktøy", + "vision": "Bildegjenkjenning" + } + }, + "additionalProperties": { + "remove": "Fjern", + "keyPlaceholder": "Ny nøkkel", + "keyLabel": "Nøkkel", + "valueLabel": "Verdi" + }, + "roleMap": { + "remove": "Fjern", + "groupsLabel": "Grupper", + "empty": "Ingen rolletilordninger", + "addMapping": "Legg til rolletilordning", + "roleLabel": "Rolle" + }, + "semanticSearchModel": { + "genaiProviders": "GenAI-leverandører", + "builtIn": "Innebygde modeller", + "placeholder": "Velg modell…" + }, + "motion": { + "title": "Innstillinger for bevegelse" + }, + "detect": { + "title": "Innstillinger for deteksjon" + }, + "live": { + "title": "Innstillinger for direktevisning" + }, + "review": { + "title": "Innstillinger for inspeksjon" + }, + "audio": { + "title": "Innstillinger for lyd" + }, + "objects": { + "title": "Innstillinger for objekter" + }, + "record": { + "title": "Innstillinger for opptak" + }, + "snapshots": { + "title": "Innstillinger for stillbilder" + }, + "timestamp_style": { + "title": "Innstillinger for tidsstempling" + }, + "notifications": { + "title": "Innstillinger for varsler" + }, + "restartRequiredFooter": "Konfigurasjon endret - Omstart påkrevd", + "addCustomLabel": "Legg til egendefinert etikett...", + "restartRequiredField": "Omstart påkrevd", + "cameraInputs": { + "itemTitle": "Strøm {{index}}" + }, + "searchPlaceholder": "Søk...", + "showAdvanced": "Vis avanserte innstillinger" + }, + "button": { + "overriddenBaseConfigTooltip": "{{profile}}-profilen overstyrer konfigurasjonsinnstillinger i denne seksjonen", + "overriddenGlobalTooltip": "Dette kameraet overstyrer globale konfigurasjonsinnstillinger i denne seksjonen", + "overriddenBaseConfig": "Overstyrt (Basiskonfigurasjon)", + "overriddenGlobal": "Overstyrt (Global)" + }, + "detectionModel": { + "plusActive": { + "title": "Administrasjon av Frigate+-modell", + "description": "Denne instansen kjører en Frigate+-modell. Velg eller endre modell i Frigate+-innstillingene.", + "goToFrigatePlus": "Gå til Frigate+-innstillinger", + "label": "Kilde for gjeldende modell", + "showModelForm": "Konfigurer en modell manuelt" + } + }, + "go2rtcStreams": { + "description": "Administrer go2rtc-strømkonfigurasjoner for videreformidling av kamera-strømmer. Hver strøm har ett navn og én eller flere kilde-URL-er.", + "ffmpeg": { + "hardwareAuto": "Automatisk maskinvareakselerasjon", + "useFfmpegModule": "Bruk kompatibilitetsmodus (ffmpeg)", + "audioExclude": "Ekskluder", + "videoExclude": "Ekskluder", + "hardwareNone": "Ingen maskinvareakselerasjon", + "audioCopy": "Kopier", + "videoCopy": "Kopier", + "audio": "Lyd", + "hardware": "Maskinvareakselerasjon", + "videoH264": "Transkod til H.264", + "videoH265": "Transkod til H.265", + "audioMp3": "Transkod til MP3", + "audioOpus": "Transkod til Opus", + "audioPcm": "Transkod til PCM", + "audioPcma": "Transkod til PCM A-law", + "audioPcmu": "Transkod til PCM μ-law", + "audioAac": "Transkod til AAC", + "video": "Video" + }, + "validation": { + "nameDuplicate": "En strøm med dette navnet eksisterer allerede", + "urlRequired": "Minst én URL er påkrevd", + "nameRequired": "Strømnavn er påkrevd", + "nameInvalid": "Strømnavn kan kun inneholde bokstaver, tall, understrek og bindestrek" + }, + "deleteStreamConfirm": "Er du sikker på at du vil slette strømmen \"{{streamName}}\"? Kameraer som refererer til denne strømmen kan slutte å fungere.", + "streamUrlPlaceholder": "f.eks. rtsp://bruker:passord@192.168.1.100/stream", + "streamNamePlaceholder": "f.eks. ytterdor", + "renameStream": "Gi strøm nytt navn", + "title": "go2rtc-strømmer", + "noStreams": "Ingen go2rtc-strømmer er konfigurert. Legg til en strøm for å komme i gang.", + "addStream": "Legg til strøm", + "addUrl": "Legg til URL", + "newStreamName": "Nytt strømnavn", + "addStreamDesc": "Skriv inn et navn for den nye strømmen. Dette navnet vil bli brukt til å referere til strømmen i kamerakonfigurasjonen din.", + "renameStreamDesc": "Skriv inn et nytt navn for denne strømmen. Endring av navn kan ødelegge for kameraer eller andre strømmer som refererer til den ved navn.", + "deleteStream": "Slett strøm", + "streamName": "Strømnavn" + }, + "profiles": { + "active": "Aktiv", + "activeProfile": "Aktiv profil", + "enableSwitch": "Aktiver profiler", + "baseConfig": "Basiskonfigurasjon", + "error": { + "alreadyExists": "En profil med denne ID-en eksisterer allerede", + "mustNotContainPeriod": "Kan ikke inneholde punktum", + "mustBeAtLeastTwoCharacters": "Må være minst 2 tegn" + }, + "nameDuplicate": "En profil med dette navnet eksisterer allerede", + "profileNamePlaceholder": "f.eks. Sikret, Borte, Nattmodus", + "deleteSectionConfirm": "Fjern {{section}}-overstyringene for profil {{profile}} på {{camera}}?", + "removeOverride": "Fjern profiloverstyring", + "deleteSectionSuccess": "Fjernet {{section}}-overstyringer for {{profile}}", + "renameProfile": "Gi profil nytt navn", + "noActiveProfile": "Ingen aktiv profil", + "noOverrides": "Ingen overstyringer", + "noProfiles": "Ingen profiler er definert.", + "profileIdDescription": "Intern identifikator brukt i konfigurasjon og automatiseringer", + "columnCamera": "Kamera", + "nameInvalid": "Kun små bokstaver, tall og understrek er tillatt", + "activateFailed": "Kunne ikke aktivere profil", + "addProfile": "Legg til profil", + "newProfile": "Ny profil", + "activated": "Profil '{{profile}}' aktivert", + "deactivated": "Profil deaktivert", + "createSuccess": "Profilen '{{profile}}' ble opprettet", + "deleteSuccess": "Profilen '{{profile}}' ble slettet", + "renameSuccess": "Profilen har fått nytt navn: '{{profile}}'", + "title": "Profiler", + "enabledDescription": "Profiler er aktivert. Opprett en ny profil nedenfor, naviger til en seksjon for kamerakonfigurasjon for å gjøre endringer, og lagre for at endringene skal tre i kraft.", + "disabledDescription": "Profiler lar deg definere navngitte sett med overstyringer for kamerakonfigurasjon (f.eks. sikret, borte, natt) som kan aktiveres ved behov.", + "profileIdLabel": "Profil-ID", + "friendlyNameLabel": "Profilnavn", + "columnOverrides": "Profiloverstyringer", + "deleteProfile": "Slett profil", + "deleteProfileConfirm": "Slett profilen \"{{profile}}\" fra alle kameraer? Dette kan ikke angres.", + "deleteSection": "Slett seksjonsoverstyringer", + "cameraCount_one": "{{count}} kamera", + "cameraCount_other": "{{count}} kameraer" + }, + "configMessages": { + "review": { + "allNonAlertDetections": "All aktivitet som ikke er et varsel, vil bli inkludert som deteksjoner.", + "detectDisabled": "Objektdeteksjon er deaktivert. Inspeksjonselementer krever detekterte objekter for å kategorisere varsler og deteksjoner.", + "recordDisabled": "Opptak er deaktivert, inspeksjonselementer vil ikke bli generert." + }, + "detectors": { + "mixedTypesSuggestion": "Alle detektorer må bruke samme type. Fjern eksisterende detektorer eller velg {{type}}.", + "mixedTypes": "Alle detektorer må bruke samme type. Fjern eksisterende detektorer for å bruke en annen type." + }, + "faceRecognition": { + "globalDisabled": "Ansiktsgjenkjenning er ikke aktivert på globalt nivå. Aktiver det i globale innstillinger for at ansiktsgjenkjenning på kameranivå skal fungere.", + "personNotTracked": "Ansiktsgjenkjenning krever at objektet 'person' spores. Sørg for at 'person' er i listen over objektsporing." + }, + "detect": { + "fpsGreaterThanFive": "Det anbefales ikke å sette FPS for deteksjon høyere enn 5." + }, + "birdseye": { + "objectsModeDetectDisabled": "Fugleperspektiv er satt til 'objekter'-modus, men objektdeteksjon er deaktivert for dette kameraet. Kameraet vil ikke vises i Fugleperspektiv." + }, + "audio": { + "noAudioRole": "Ingen strømmer har definert lydrolle. Du må aktivere lydrollen for at lyddeteksjon skal fungere." + }, + "record": { + "noRecordRole": "Ingen strømmer har definert opptaksrolle. Opptak vil ikke fungere." + }, + "audioTranscription": { + "audioDetectionDisabled": "Lyddeteksjon er ikke aktivert for dette kameraet. Lydtranskripsjon krever at lyddeteksjon er aktiv." + }, + "snapshots": { + "detectDisabled": "Objektdeteksjon er deaktivert. Stillbilder genereres fra sporede objekter og vil ikke bli opprettet." + }, + "lpr": { + "globalDisabled": "Identifisering av kjennemerker er ikke aktivert på globalt nivå. Aktiver det i globale innstillinger for at identifisering på kameranivå skal fungere.", + "vehicleNotTracked": "Identifisering av kjnnemerker krever at 'bil' eller 'motorsykkel' spores." + } + }, + "maintenance": { + "sync": { + "allMedia": "Alle medier", + "resultsFields": { + "aborted": "Avbrutt. Sletting ville overskredet sikkerhetsterskelen.", + "error": "Feil", + "filesChecked": "Filer kontrollert", + "orphansFound": "Foreldreløse filer funnet", + "orphansDeleted": "Foreldreløse filer slettet", + "totals": "Totalt" + }, + "verbose": "Detaljert (Verbose)", + "exports": "Eksporterte filer", + "alreadyRunning": "En synkroniseringsjobb kjører allerede", + "errorLabel": "Feil", + "dryRunDisabled": "Filer vil bli slettet", + "previews": "Forhåndsvisninger", + "desc": "Frigate vil periodisk rydde opp i mediefiler etter en fast plan i samsvar med din konfigurasjon for bevaring. Det er normalt at noen foreldreløse filer oppstår mens Frigate kjører. Bruk denne funksjonen til å fjerne foreldreløse mediefiler fra disk som ikke lenger er referert til i databasen.", + "status": { + "completed": "Fullført", + "queued": "I kø", + "running": "Kjører", + "notRunning": "Kjører ikke", + "failed": "Mislyktes" + }, + "forceDesc": "Gå utenom sikkerhetsterskelen og fullfør synkronisering selv om mer enn 50 % av filene vil bli slettet.", + "dryRunEnabled": "Ingen filer vil bli slettet", + "jobId": "Jobb-ID", + "error": "Kunne ikke starte synkronisering", + "title": "Mediesynkronisering", + "started": "Mediesynkronisering har startet.", + "mediaTypes": "Medietyper", + "event_thumbnails": "Miniatyrbilder av sporede objekter", + "review_thumbnails": "Miniatyrbilder for inspeksjon", + "recordings": "Opptak", + "results": "Resultater", + "verboseDesc": "Skriv en fullstendig liste over foreldreløse filer til disk for inspeksjon.", + "endTime": "Sluttidspunkt", + "event_snapshots": "Stillbilder av sporede objekter", + "start": "Start synkronisering", + "startTime": "Starttidspunkt", + "currentStatus": "Status", + "statusLabel": "Status", + "running": "Synkronisering kjører...", + "inProgress": "Synkronisering pågår. Denne siden er deaktivert.", + "dryRun": "Testkjøring (Dry Run)", + "force": "Tving" + }, + "regionGrid": { + "clearConfirmDesc": "Det anbefales ikke å tømme regionrutenettet med mindre du nylig har endret størrelsen på detektormodellen eller har endret kameraets fysiske posisjon og har problemer med objektsporing. Rutenettet vil automatisk bli bygget opp igjen over tid etter hvert som objekter spores. En omstart av Frigate kreves for at endringene skal tre i kraft.", + "clearError": "Kunne ikke tømme regionrutenettet", + "restartRequired": "Omstart kreves for at endringer i regionrutenettet skal tre i kraft", + "title": "Regionrutenett", + "clearSuccess": "Regionrutenettet ble tømt", + "desc": "Regionrutenettet er en optimalisering som lærer hvor objekter av ulike størrelser vanligvis dukker opp i hvert kameras synsfelt. Frigate bruker disse dataene til å dimensjonere deteksjonsregioner effektivt. Rutenettet bygges automatisk over tid basert på data fra sporede objekter.", + "clear": "Tøm regionrutenett", + "clearConfirmTitle": "Tøm regionrutenett" + }, + "title": "Vedlikehold" + }, + "onvif": { + "profileAuto": "Auto", + "profileLoading": "Laster profiler..." + }, + "confirmReset": "Bekreft nullstilling", + "resetToDefaultDescription": "Dette vil nullstille alle innstillinger i denne seksjonen til standardverdiene. Denne handlingen kan ikke angres.", + "resetToGlobalDescription": "Dette vil nullstille innstillingene i denne seksjonen til de globale standardverdiene. Denne handlingen kan ikke angres.", + "unsavedChanges": "Du har ulagrede endringer", + "saveAllPreview": { + "title": "Endringer som skal lagres", + "field": { + "label": "Felt" + }, + "scope": { + "global": "Global", + "camera": "Kamera: {{cameraName}}", + "label": "Omfang" + }, + "empty": "Ingen ventende endringer.", + "value": { + "reset": "Nullstill", + "label": "Ny verdi" + }, + "profile": { + "label": "Profil" + }, + "triggerLabel": "Se over ventende endringer" + }, + "globalConfig": { + "title": "Global konfigurasjon", + "toast": { + "success": "Globale innstillinger ble lagret", + "error": "Kunne ikke lagre globale innstillinger", + "validationError": "Validering feilet" + }, + "description": "Konfigurer globale innstillinger som gjelder for alle kameraer med mindre de overstyres." + }, + "toast": { + "success": "Innstillinger lagret", + "successRestartRequired": "Innstillinger lagret. Start Frigate på nytt for å aktivere endringene.", + "applied": "Innstillinger tatt i bruk", + "saveAllFailure": "Kunne ikke lagre alle seksjoner.", + "error": "Kunne ikke lagre innstillinger", + "resetError": "Kunne ikke nullstille innstillinger", + "resetSuccess": "Nullstilt til globale standardverdier", + "validationError": "Validering feilet: {{message}}", + "saveAllPartial_one": "{{successCount}} av {{totalCount}} seksjoner lagret. {{failCount}} feilet.", + "saveAllPartial_other": "{{successCount}} av {{totalCount}} seksjoner lagret. {{failCount}} feilet.", + "saveAllSuccess_one": "{{count}} seksjon ble lagret.", + "saveAllSuccess_other": "{{count}} seksjoner ble lagret." + }, + "cameraConfig": { + "toast": { + "success": "Kamerainnstillinger ble lagret", + "error": "Kunne ikke lagre kamerainnstillinger" + }, + "title": "Kamerakonfigurasjon", + "description": "Konfigurer innstillinger for enkeltkameraer. Innstillingene overstyrer globale standardverdier.", + "resetToGlobal": "Nullstill til globale verdier", + "overriddenBadge": "Overstyrt" + }, + "timestampPosition": { + "br": "Nederst til høyre", + "bl": "Nederst til venstre", + "tr": "Øverst til høyre", + "tl": "Øverst til venstre" } } diff --git a/web/public/locales/nb-NO/views/system.json b/web/public/locales/nb-NO/views/system.json index d04cefd93..374e6457b 100644 --- a/web/public/locales/nb-NO/views/system.json +++ b/web/public/locales/nb-NO/views/system.json @@ -5,7 +5,8 @@ "logs": { "frigate": "Frigate-logger - Frigate", "go2rtc": "Go2RTC-logger - Frigate", - "nginx": "Nginx-logger - Frigate" + "nginx": "Nginx-logger - Frigate", + "websocket": "Meldingslogger - Frigate" }, "general": "Generell statistikk - Frigate", "enrichments": "Statistikk for utvidelser - Frigate" @@ -31,7 +32,34 @@ "download": { "label": "Last ned logger" }, - "tips": "Logger strømmer fra serveren" + "tips": "Logger strømmer fra serveren", + "websocket": { + "label": "Meldinger", + "pause": "Pause", + "resume": "Gjenoppta", + "clear": "Tøm", + "filter": { + "all": "Alle emner", + "topics": "Emner", + "events": "Hendelser", + "reviews": "Inspeksjoner", + "classification": "Klassifisering", + "face_recognition": "Ansiktsgjenkjenning", + "lpr": "LPR", + "camera_activity": "Kameraaktivitet", + "system": "System", + "camera": "Kamera", + "all_cameras": "Alle kameraer", + "cameras_count_one": "{{count}} kamera", + "cameras_count_other": "{{count}} kameraer" + }, + "empty": "Ingen meldinger fanget opp ennå", + "count_one": "{{count}} melding", + "count_other": "{{count}} meldinger", + "expanded": { + "payload": "Payload" + } + } }, "general": { "title": "Generelt", @@ -79,7 +107,10 @@ "title": "Til info om Intel GPU-statistikk", "message": "GPU statistikk ikke tilgjengelig", "description": "Dette er en kjent feil i Intels verktøy for rapportering av GPU-statistikk (intel_gpu_top), der verktøyet slutter å fungere og gjentatte ganger viser 0 % GPU-bruk, selv om maskinvareakselerasjon og objektdeteksjon kjører korrekt på (i)GPU-en. Dette er ikke en feil i Frigate. Du kan starte verten på nytt for å løse problemet midlertidig, og for å bekrefte at GPU-en fungerer som den skal. Dette påvirker ikke ytelsen." - } + }, + "gpuTemperature": "GPU-temperatur", + "npuTemperature": "NPU-temperatur", + "gpuCompute": "GPU prossesering / enkoding" }, "otherProcesses": { "title": "Andre prosesser", @@ -116,7 +147,11 @@ "title": "Lagring", "shm": { "title": "SHM (delt minne) allokering", - "warning": "Den nåværende SHM-størrelsen på {{total}} MB er for liten. Øk den til minst {{min_shm}} MB." + "warning": "Den nåværende SHM-størrelsen på {{total}} MB er for liten. Øk den til minst {{min_shm}} MB.", + "frameLifetime": { + "title": "Bildelevetid", + "description": "Hvert kamera har {{frames}} bildeplasser i delt minne (SHM). Ved høyeste bildefrekvens er hvert bilde tilgjengelig i omtrent {{lifetime}} s før det overskrives." + } } }, "cameras": { @@ -154,7 +189,8 @@ "cameraFfmpeg": "{{camName}} FFmpeg", "overallDetectionsPerSecond": "totale deteksjoner per sekund", "overallSkippedDetectionsPerSecond": "totalt forkastede deteksjoner per sekund", - "overallFramesPerSecond": "totalt bilder per sekund" + "overallFramesPerSecond": "totalt bilder per sekund", + "cameraGpu": "{{camName}} GPU" }, "toast": { "success": { @@ -163,6 +199,17 @@ "error": { "unableToProbeCamera": "Kunne ikke hente informasjon fra kamera: {{errorMessage}}" } + }, + "connectionQuality": { + "title": "Tilkoblingskvalitet", + "excellent": "Utmerket", + "fair": "Grei", + "poor": "Dårlig", + "unusable": "Ubrukelig", + "fps": "BPS", + "expectedFps": "Forventet BPS", + "reconnectsLastHour": "Gjentatte tilkoblinger (siste time)", + "stallsLastHour": "Avbrudd (siste time)" } }, "enrichments": { @@ -203,6 +250,7 @@ "cameraIsOffline": "{{camera}} er frakoblet", "detectIsSlow": "{{detect}} er treg ({{speed}} ms)", "detectIsVerySlow": "{{detect}} er veldig treg ({{speed}} ms)", - "shmTooLow": "/dev/shm-allokeringen ({{total}} MB) bør økes til minst {{min}} MB." + "shmTooLow": "/dev/shm-allokeringen ({{total}} MB) bør økes til minst {{min}} MB.", + "debugReplayActive": "Debug-reprise pågår" } } diff --git a/web/public/locales/nl/common.json b/web/public/locales/nl/common.json index f03c7d3a4..45b7d93d8 100644 --- a/web/public/locales/nl/common.json +++ b/web/public/locales/nl/common.json @@ -253,7 +253,8 @@ "account": "Account", "anonymous": "anoniem" }, - "classification": "Classificatie" + "classification": "Classificatie", + "profiles": "Profielen" }, "toast": { "copyUrlToClipboard": "URL naar klembord gekopieerd.", diff --git a/web/public/locales/nl/config/cameras.json b/web/public/locales/nl/config/cameras.json index 1d55a46f3..96b78e382 100644 --- a/web/public/locales/nl/config/cameras.json +++ b/web/public/locales/nl/config/cameras.json @@ -1,12 +1,12 @@ { - "label": "Camera Config", + "label": "CameraConfiguratie", "name": { "label": "Camera naam", - "description": "Camera naam is verplicht" + "description": "Camera naam is vereist" }, "friendly_name": { "description": "Camera naam te gebruiken in de Frigate UI", - "label": "Herkenbare naam" + "label": "Eenvoudige naam" }, "enabled": { "label": "Geactiveerd", @@ -126,5 +126,27 @@ } } } + }, + "profiles": { + "label": "Profielen" + }, + "zones": { + "label": "Zones", + "description": "Met zones kun je een specifiek deel van het frame definiëren, zodat je kunt bepalen of een object zich binnen dat specifieke gebied bevindt.", + "friendly_name": { + "label": "Zone naam", + "description": "Een gebruiksvriendelijke naam voor de zone, die wordt weergegeven in de Frigate-gebruikersinterface. Als deze niet is ingesteld, wordt een opgemaakte versie van de zonenaam gebruikt." + }, + "enabled": { + "label": "Ingeschakeld", + "description": "Schakel deze zone in of uit. Uitgeschakelde zones worden tijdens de uitvoering genegeerd." + }, + "filters": { + "label": "Zone filters", + "description": "Filters die op objecten binnen deze zone moeten worden toegepast. Worden gebruikt om het aantal valse positieven te verminderen of om te beperken welke objecten als aanwezig in de zone worden beschouwd.", + "min_area": { + "label": "Minimale oppervlakte van het object" + } + } } } diff --git a/web/public/locales/nl/config/global.json b/web/public/locales/nl/config/global.json index a9b4f60e3..adc9aa42d 100644 --- a/web/public/locales/nl/config/global.json +++ b/web/public/locales/nl/config/global.json @@ -105,9 +105,65 @@ } }, "version": { - "description": "Numerieke of string-versie van de actieve configuratie om migraties of formaatwijzigingen te helpen detecteren." + "description": "Numerieke of string-versie van de actieve configuratie om migraties of formaatwijzigingen te helpen detecteren.", + "label": "Huidige configuratie versie" }, "safe_mode": { - "label": "Veilige modus" + "label": "Veilige modus", + "description": "Wanneer ingeschakeld, start Frigate in veilige modus met verminderde functionaliteit voor probleemoplossing." + }, + "environment_vars": { + "label": "Omgevingsvariabelen", + "description": "Sleutel/waarde paren van omgevingsvariabelen voor het Frigate proces in Home Assistant OS. Niet-HAOS gebruikers moeten in plaats hiervan Docker omgevingsvariabelen gebruiken." + }, + "auth": { + "label": "Authenticatie", + "enabled": { + "label": "Authenticatie aanzetten", + "description": "Schakel native authenticatie in voor de Frigate UI." + }, + "reset_admin_password": { + "label": "Reset admin wachtwoord", + "description": "Indien waar, reset het admin gebruiker wachtwoord tijdens opstarten en print het nieuwe wachtwoord in het logboek." + }, + "description": "Authenticatie en sessie-gerelateerde instellingen inclusief cookie en tempo limiet opties.", + "cookie_name": { + "label": "JWT cookie naam", + "description": "Naam van de gebruikte cookie om de JWT token voor native authenticatie op te slaan." + }, + "cookie_secure": { + "label": "Veilige cookie instelling", + "description": "Stel de veilige instelling in op de auth cookie; moet waar zijn indien TLS in gebruik." + }, + "session_length": { + "label": "Sessie duratie", + "description": "Sessie duratie in seconden voor JWT-gebaseerde sessies." + }, + "refresh_time": { + "label": "Sessie ververs scherm", + "description": "Als een sessie binnen dit aantal seconden verloopt, ververs het tot volledige duratie." + }, + "failed_login_rate_limit": { + "label": "Gefaalde log-in pogingen", + "description": "Tempo-limiet regels voor gefaalde inlogpogingen om brute-force aanvallen te beperken." + }, + "trusted_proxies": { + "label": "Vertrouwde proxies" + } + }, + "logger": { + "default": { + "label": "Loggingsniveau", + "description": "Standaard globale logboek detailniveau (debug, info, waarschuwing, fout)." + }, + "label": "Logging", + "logs": { + "label": "Per-proces logboek niveau", + "description": "Per-component logboekniveau afwijkingen om detailniveau te vergroten of verkleinen per specifieke module." + }, + "description": "Beheert het standaard logboek detailniveau en afwijkende instellingen per logboek." + }, + "profiles": { + "label": "Profielen" } } diff --git a/web/public/locales/nl/config/groups.json b/web/public/locales/nl/config/groups.json index dbaa569f6..6ecc7a612 100644 --- a/web/public/locales/nl/config/groups.json +++ b/web/public/locales/nl/config/groups.json @@ -5,7 +5,69 @@ "sensitivity": "Globale sensiviteit" }, "cameras": { - "detection": "Detectie" + "detection": "Detectie", + "sensitivity": "Gevoeligheid" + } + }, + "motion": { + "global": { + "algorithm": "Globaal algoritme", + "sensitivity": "Globale gevoeligheid" + }, + "cameras": { + "sensitivity": "Gevoeligheid", + "algorithm": "Algoritme" + } + }, + "snapshots": { + "cameras": { + "display": "Weergave" + }, + "global": { + "display": "Globale weergave" + } + }, + "detect": { + "cameras": { + "resolution": "Resolutie", + "tracking": "Volgen" + }, + "global": { + "resolution": "Globale resolutie", + "tracking": "Globaal volgen" + } + }, + "objects": { + "cameras": { + "tracking": "Volgen", + "filtering": "Filteren" + }, + "global": { + "tracking": "Globaal volgen", + "filtering": "Globaal filteren" + } + }, + "timestamp_style": { + "global": { + "appearance": "Globaal voorkomen" + }, + "cameras": { + "appearance": "Voorkomen" + } + }, + "record": { + "global": { + "retention": "Globale retentie", + "events": "Globale gebeurtenissen" + }, + "cameras": { + "retention": "Retentie", + "events": "Gebeurtenissen" + } + }, + "ffmpeg": { + "cameras": { + "cameraFfmpeg": "Camera-specifieke FFmpeg argumenten" } } } diff --git a/web/public/locales/nl/config/validation.json b/web/public/locales/nl/config/validation.json index 2083cc43a..6ddb7c764 100644 --- a/web/public/locales/nl/config/validation.json +++ b/web/public/locales/nl/config/validation.json @@ -1,5 +1,32 @@ { "minimum": "Minimale waarde van {{limit}} vereist", "maximum": "Mag niet meer dan {{limit}} bedragen.", - "exclusiveMinimum": "Waarde moet groter zijn dan {{limit}}" + "exclusiveMinimum": "Waarde moet groter zijn dan {{limit}}", + "exclusiveMaximum": "Moet minder zijn dan {{limit}}", + "minLength": "Moet minstens {{limit}} karakters zijn", + "maxLength": "Moet maximaal {{limit}} karakters zijn", + "minItems": "Moet minstens {{limit}} items hebben", + "maxItems": "Moet maximaal {{limit}} items hebben", + "pattern": "Ongeldig formaat", + "required": "Dit veld is vereist", + "type": "Ongeldig waarde type", + "enum": "Moet een van de toegestane waarden zijn", + "const": "Waarde komt niet overeen met de verwachte constante", + "uniqueItems": "Alle items moeten uniek zijn", + "format": "Ongeldig formaat", + "additionalProperties": "Onbekend kenmerk is niet toegestaan", + "oneOf": "Moet exact met een van de volgende schema's overeenkomen", + "anyOf": "Moet overeenkomen met minstens een van de toegestane schema's", + "ffmpeg": { + "inputs": { + "rolesUnique": "Elke rol kan slechts tot één input stroom worden toebedeeld.", + "detectRequired": "Minstens één input stroom moet toegewezen zijn aan de 'detectie' rol.", + "hwaccelDetectOnly": "Enkel de input stroom met de detectie rol kan hardwareversnelling argumenten instellen." + } + }, + "proxy": { + "header_map": { + "roleHeaderRequired": "Rol titel is vereist wanneer rol bindingen zijn geconfigureerd." + } + } } diff --git a/web/public/locales/nl/objects.json b/web/public/locales/nl/objects.json index 1fc914a77..c53f10416 100644 --- a/web/public/locales/nl/objects.json +++ b/web/public/locales/nl/objects.json @@ -116,5 +116,9 @@ "amazon": "Amazon", "face": "Gezicht", "an_post": "An Post", - "purolator": "Purolator" + "purolator": "Purolator", + "kangaroo": "Kangoeroe", + "skunk": "Stinkdier", + "school_bus": "Schoolbus", + "royal_mail": "Royal Mail" } diff --git a/web/public/locales/nl/views/classificationModel.json b/web/public/locales/nl/views/classificationModel.json index a94c7956b..40a947afc 100644 --- a/web/public/locales/nl/views/classificationModel.json +++ b/web/public/locales/nl/views/classificationModel.json @@ -12,8 +12,10 @@ }, "toast": { "success": { - "deletedCategory": "Verwijderde klasse", - "deletedImage": "Verwijderde afbeeldingen", + "deletedCategory_one": "Verwijderde klasse", + "deletedCategory_other": "Verwijderde klassen", + "deletedImage_one": "Verwijderde afbeelding", + "deletedImage_other": "Verwijderde afbeeldingen", "categorizedImage": "Succesvol geclassificeerde afbeelding", "trainedModel": "Succesvol getraind model.", "trainingModel": "Modeltraining succesvol gestart.", diff --git a/web/public/locales/nl/views/events.json b/web/public/locales/nl/views/events.json index b4be69aef..ff2d687f9 100644 --- a/web/public/locales/nl/views/events.json +++ b/web/public/locales/nl/views/events.json @@ -9,7 +9,9 @@ "recordings": { "documentTitle": "Opnamen - Frigate" }, - "timeline": "Tijdlijn", + "timeline": { + "label": "Tijdslijn" + }, "empty": { "alert": "Er zijn geen meldingen om te beoordelen", "detection": "Er zijn geen detecties om te beoordelen", diff --git a/web/public/locales/nl/views/exports.json b/web/public/locales/nl/views/exports.json index d14ca9dbc..ffeda4a9a 100644 --- a/web/public/locales/nl/views/exports.json +++ b/web/public/locales/nl/views/exports.json @@ -13,7 +13,9 @@ "desc": "Voer een nieuwe naam in voor deze export." }, "noExports": "Geen export gevonden", - "deleteExport": "Verwijder Export", + "deleteExport": { + "label": "Verwijder export" + }, "deleteExport.desc": "Weet je zeker dat je dit wilt wissen: {{exportName}}?", "tooltip": { "shareExport": "Deel export", diff --git a/web/public/locales/nl/views/live.json b/web/public/locales/nl/views/live.json index b6d1618be..a0b6cce79 100644 --- a/web/public/locales/nl/views/live.json +++ b/web/public/locales/nl/views/live.json @@ -97,7 +97,9 @@ }, "notifications": "Meldingen", "audio": "Geluid", - "documentTitle": "Live - Frigate", + "documentTitle": { + "default": "Live - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Live - Frigate", "autotracking": { "enable": "Automatisch volgen inschakelen", diff --git a/web/public/locales/nl/views/settings.json b/web/public/locales/nl/views/settings.json index 348003af7..1425acd22 100644 --- a/web/public/locales/nl/views/settings.json +++ b/web/public/locales/nl/views/settings.json @@ -7,7 +7,7 @@ "classification": "Classificatie-instellingen - Frigate", "masksAndZones": "Masker- en zone-editor - Frigate", "object": "Foutopsporing Frigate", - "general": "Profielinstellingen - Frigate", + "general": "UI Instellingen - Frigate", "frigatePlus": "Frigate+ Instellingen - Frigate", "notifications": "Meldingsinstellingen - Frigate", "enrichments": "Verrijkingsinstellingen - Frigate", @@ -15,7 +15,8 @@ "cameraReview": "Camera Review Instellingen - Frigate", "globalConfig": "Globale configuratie - Frigate", "cameraConfig": "Camera-instellingen - Frigate", - "maintenance": "Onderhoud - Frigate" + "maintenance": "Onderhoud - Frigate", + "profiles": "Profielen - Frigate" }, "menu": { "ui": "Gebruikersinterface", @@ -49,7 +50,45 @@ "globalTimestampStyle": "Tijdstempelstijl", "systemDatabase": "Database", "systemTls": "TLS", - "systemAuthentication": "Authenticatie" + "systemAuthentication": "Authenticatie", + "cameraNotifications": "Notificaties", + "integrationGenerativeAi": "Generatieve AI", + "systemNetworking": "Netwerken", + "profiles": "Profielen", + "uiSettings": "UI instellingen", + "systemProxy": "Proxy", + "systemUi": "UI", + "systemLogging": "Logging", + "integrationFaceRecognition": "Gezichtsherkenning", + "integrationLpr": "Kentekenplaat herkenning", + "integrationObjectClassification": "Object classificatie", + "integrationAudioTranscription": "Audio transcriptie", + "cameraDetect": "Object detectie", + "cameraFfmpeg": "FFmpeg", + "cameraRecording": "Opnemen", + "cameraSnapshots": "Momentopnames", + "cameraMotion": "Bewegingsdetectie", + "cameraObjects": "Objecten", + "cameraAudioEvents": "Audio gebeurtenissen", + "cameraAudioTranscription": "Audio transcriptie", + "integrationSemanticSearch": "Semantisch zoeken", + "systemDetectionModel": "Detectie model", + "systemMqtt": "MQTT", + "systemEnvironmentVariables": "Omgevingsvariabelen", + "systemTelemetry": "Telemetrie", + "systemBirdseye": "Overzicht", + "systemFfmpeg": "FFmpeg", + "systemDetectorHardware": "Detectie hardware", + "cameraFaceRecognition": "Gezichtsherkenning", + "systemGo2rtcStreams": "go2rtc streams", + "cameraConfigReview": "Beoordeling", + "cameraLivePlayback": "Live weergave", + "cameraLpr": "Kentekenplaat herkenning", + "cameraMqttConfig": "MQTT", + "cameraOnvif": "ONVIF", + "cameraUi": "Camera UI", + "cameraTimestampStyle": "Tijdstempel stijl", + "maintenance": "Onderhoud" }, "dialog": { "unsavedChanges": { @@ -696,14 +735,14 @@ }, "snapshotConfig": { "title": "Snapshot-configuratie", - "desc": "Om te verzenden naar Frigate+ moeten zowel snapshots als clean_copy-snapshots ingeschakeld zijn in je configuratie.", + "desc": "Om te verzenden naar Frigate+ moeten snapshots ingeschakeld zijn in je configuratie.", "documentation": "Lees de documentatie", "table": { "camera": "Camera", "snapshots": "Snapshots", "cleanCopySnapshots": "clean_copy Snapshots" }, - "cleanCopyWarning": "Bij sommige camera's zijn snapshots ingeschakeld, maar ontbreekt de 'clean_copy'. Om afbeeldingen van deze camera's naar Frigate+ te kunnen verzenden, moet clean_copy zijn ingeschakeld in de snapshotconfiguratie." + "cleanCopyWarning": "Sommige camera's hebben snapshots uitgeschakeld" }, "modelInfo": { "title": "Modelinformatie", @@ -1332,5 +1371,11 @@ "success": "Configuratie voor beoordelingsclassificatie is opgeslagen. Herstart Frigate om de wijzigingen toe te passen." } } + }, + "button": { + "overriddenGlobal": "Overschreven (globaal)", + "overriddenGlobalTooltip": "Deze camera heeft voorrang op de algemene configuratie-instellingen in dit gedeelte", + "overriddenBaseConfig": "Overschreven (basis configuratie)", + "overriddenBaseConfigTooltip": "Het profiel {{profile}} heeft voorrang op de configuratie-instellingen in dit gedeelte" } } diff --git a/web/public/locales/nl/views/system.json b/web/public/locales/nl/views/system.json index fcf821187..d31cd6e8c 100644 --- a/web/public/locales/nl/views/system.json +++ b/web/public/locales/nl/views/system.json @@ -4,7 +4,8 @@ "logs": { "nginx": "Nginx Logboeken - Frigate", "go2rtc": "Go2RTC Logboeken - Frigate", - "frigate": "Frigate Logboek - Frigate" + "frigate": "Frigate Logboek - Frigate", + "websocket": "Berichten Logboeken - Frigate" }, "storage": "Opslag Statistieken - Frigate", "cameras": "Camera Statistieken - Frigate", @@ -33,7 +34,30 @@ "fetchingLogsFailed": "Fout bij ophalen van logs: {{errorMessage}}" } }, - "tips": "Logs worden gestreamd vanaf de server" + "tips": "Logs worden gestreamd vanaf de server", + "websocket": { + "label": "Berichten", + "pause": "Pauze", + "resume": "Hervatten", + "clear": "Leegmaken", + "filter": { + "all": "Alle onderwerpen", + "topics": "Onderwerpen", + "events": "Gebeurtenissen", + "reviews": "Beoordelingen", + "classification": "Classificatie", + "face_recognition": "Gezichtsherkenning", + "lpr": "Kentekenplaatherkenning", + "camera_activity": "Camera activiteit", + "system": "Systeem", + "camera": "Camera", + "all_cameras": "Alle camera's", + "cameras_count_one": "{{count}} Camera", + "cameras_count_other": "{{count}} Cameras" + }, + "empty": "Nog geen berichten ontvangen", + "count_one": "{{count}} berichten" + } }, "general": { "detector": { diff --git a/web/public/locales/pl/components/camera.json b/web/public/locales/pl/components/camera.json index f67326172..ada44e296 100644 --- a/web/public/locales/pl/components/camera.json +++ b/web/public/locales/pl/components/camera.json @@ -82,6 +82,7 @@ "mask": "Maski", "regions": "Regiony", "motion": "Ruch", - "boundingBox": "Ramka Ograniczająca" + "boundingBox": "Ramka Ograniczająca", + "paths": "Ścieżki" } } diff --git a/web/public/locales/pl/config/cameras.json b/web/public/locales/pl/config/cameras.json index bdadbc182..9943f1332 100644 --- a/web/public/locales/pl/config/cameras.json +++ b/web/public/locales/pl/config/cameras.json @@ -161,5 +161,65 @@ "description": "Strefy, do których obiekt musi wejść, aby zostać wykryty; pozostaw puste, aby zezwolić na dowolną strefę." } } + }, + "label": "Konfiguracja kamery", + "name": { + "label": "Nazwa kamery", + "description": "Nazwa kamery jest wymagana" + }, + "friendly_name": { + "label": "Przyjazna nazwa", + "description": "Przyjazna nazwa kamery używana w interfejsie Frigate" + }, + "enabled": { + "label": "Włączone", + "description": "Włączone" + }, + "audio": { + "label": "Zdarzenia audio", + "description": "Ustawienia detekcji zdarzeń audio dla tej kamery.", + "enabled": { + "label": "Włącz detekcję audio", + "description": "Włącz lub wyłącz detekcję zdarzeń audio dla tej kamery." + }, + "max_not_heard": { + "label": "Limit czasu zakończenia", + "description": "Czas w sekundach bez wykrycia skonfigurowanego typu audio, po którym zdarzenie audio zostaje zakończone." + }, + "min_volume": { + "label": "Minimalna głośność", + "description": "Minimalny próg głośności RMS wymagany do uruchomienia detekcji audio; niższe wartości zwiększają czułość (np. 200 wysoka, 500 średnia, 1000 niska)." + }, + "listen": { + "label": "Typy nasłuchu", + "description": "Lista typów zdarzeń audio do wykrywania (na przykład: szczekanie, alarm pożarowy, krzyk, mowa, wrzask)." + }, + "filters": { + "label": "Filtry audio", + "description": "Ustawienia filtrów dla poszczególnych typów audio, takie jak progi pewności, używane do redukcji fałszywych alarmów." + }, + "enabled_in_config": { + "label": "Pierwotny stan audio", + "description": "Wskazuje, czy detekcja audio była pierwotnie włączona w statycznym pliku konfiguracyjnym." + }, + "num_threads": { + "label": "Wątki detekcji", + "description": "Liczba wątków używanych do przetwarzania detekcji audio." + } + }, + "audio_transcription": { + "label": "Transkrypcja audio", + "description": "Ustawienia transkrypcji audio na żywo i mowy, używane do zdarzeń i napisów na żywo.", + "enabled": { + "label": "Włącz transkrypcję", + "description": "Włącz lub wyłącz ręcznie wyzwalaną transkrypcję zdarzeń audio." + }, + "enabled_in_config": { + "label": "Pierwotny stan transkrypcji" + }, + "live_enabled": { + "label": "Transkrypcja na żywo", + "description": "Włącz transkrypcję strumieniową audio na żywo w momencie jego odbierania." + } } } diff --git a/web/public/locales/pl/config/global.json b/web/public/locales/pl/config/global.json index 0967ef424..ed12af3c7 100644 --- a/web/public/locales/pl/config/global.json +++ b/web/public/locales/pl/config/global.json @@ -1 +1,43 @@ -{} +{ + "audio": { + "label": "Zdarzenia audio", + "enabled": { + "label": "Włącz detekcję audio" + }, + "max_not_heard": { + "label": "Limit czasu zakończenia", + "description": "Czas w sekundach bez wykrycia skonfigurowanego typu audio, po którym zdarzenie audio zostaje zakończone." + }, + "min_volume": { + "label": "Minimalna głośność", + "description": "Minimalny próg głośności RMS wymagany do uruchomienia detekcji audio; niższe wartości zwiększają czułość (np. 200 wysoka, 500 średnia, 1000 niska)." + }, + "listen": { + "label": "Typy nasłuchu", + "description": "Lista typów zdarzeń audio do wykrywania (na przykład: szczekanie, alarm pożarowy, krzyk, mowa, wrzask)." + }, + "filters": { + "label": "Filtry audio", + "description": "Ustawienia filtrów dla poszczególnych typów audio, takie jak progi pewności, używane do redukcji fałszywych alarmów." + }, + "enabled_in_config": { + "label": "Pierwotny stan audio", + "description": "Wskazuje, czy detekcja audio była pierwotnie włączona w statycznym pliku konfiguracyjnym." + }, + "num_threads": { + "label": "Wątki detekcji", + "description": "Liczba wątków używanych do przetwarzania detekcji audio." + } + }, + "audio_transcription": { + "label": "Transkrypcja audio", + "description": "Ustawienia transkrypcji audio na żywo i mowy, używane do zdarzeń i napisów na żywo.", + "live_enabled": { + "label": "Transkrypcja na żywo", + "description": "Włącz transkrypcję strumieniową audio na żywo w momencie jego odbierania." + } + }, + "version": { + "label": "Aktualna wersja" + } +} diff --git a/web/public/locales/pl/views/classificationModel.json b/web/public/locales/pl/views/classificationModel.json index c68baf133..bb29f4598 100644 --- a/web/public/locales/pl/views/classificationModel.json +++ b/web/public/locales/pl/views/classificationModel.json @@ -17,8 +17,12 @@ }, "toast": { "success": { - "deletedCategory": "Usunięte klasy", - "deletedImage": "Usunięte obrazy", + "deletedCategory_one": "Usunięte klasy", + "deletedCategory_few": "", + "deletedCategory_many": "", + "deletedImage_one": "Usunięte obrazy", + "deletedImage_few": "", + "deletedImage_many": "", "deletedModel_one": "Pomyślenie usunięto {{count}} model", "deletedModel_few": "Pomyślenie usunięto {{count}} modele", "deletedModel_many": "Pomyślenie usunięto {{count}} modeli", diff --git a/web/public/locales/pt-BR/components/dialog.json b/web/public/locales/pt-BR/components/dialog.json index 22f891023..5ce4c631b 100644 --- a/web/public/locales/pt-BR/components/dialog.json +++ b/web/public/locales/pt-BR/components/dialog.json @@ -65,6 +65,10 @@ "fromTimeline": { "saveExport": "Salvar Exportação", "previewExport": "Pré-Visualizar Exportação" + }, + "case": { + "label": "Caso", + "placeholder": "Selecione um caso" } }, "streaming": { diff --git a/web/public/locales/pt-BR/config/cameras.json b/web/public/locales/pt-BR/config/cameras.json index cb9f2b5e0..b065dbb25 100644 --- a/web/public/locales/pt-BR/config/cameras.json +++ b/web/public/locales/pt-BR/config/cameras.json @@ -19,11 +19,32 @@ "description": "Habilitar ou desabilitar o evento de detecção de áudio para esta câmera." }, "max_not_heard": { - "label": "Tempo limite final" + "label": "Tempo limite final", + "description": "Quantidade de segundos sem o tipo de áudio configurado antes do término do evento de áudio." }, "min_volume": { - "label": "Volume mínimo" + "label": "Volume mínimo", + "description": "Limiar mínimo de volume RMS necessário para executar a detecção de áudio; valores mais baixos aumentam a sensibilidade (por exemplo, 200 para volume alto, 500 para volume médio, 1000 para volume baixo)." + }, + "listen": { + "label": "Tipos de escuta", + "description": "Lista de tipos de eventos de áudio a serem detectados (por exemplo: latido, alarme de incêndio, grito, fala, berro)." + }, + "filters": { + "label": "Filtros de áudio", + "description": "Configurações de filtro por tipo de áudio, como limites de confiança, usadas para reduzir falsos positivos." + }, + "enabled_in_config": { + "label": "Estado de áudio original", + "description": "Indica se a detecção de áudio foi originalmente ativada no arquivo de configuração estática." + }, + "num_threads": { + "label": "Threads de detecção", + "description": "Número de threads a serem usadas para o processamento de detecção de áudio." } }, - "label": "Configuração da Câmera" + "label": "Configuração da Câmera", + "audio_transcription": { + "label": "Transcrição de áudio" + } } diff --git a/web/public/locales/pt-BR/config/global.json b/web/public/locales/pt-BR/config/global.json index 9c9266cb1..a9cbd3f9c 100644 --- a/web/public/locales/pt-BR/config/global.json +++ b/web/public/locales/pt-BR/config/global.json @@ -19,18 +19,61 @@ "description": "Padrão global de verbosidade de registro (debug, info, aviso, erro)." }, "logs": { - "label": "Nível de registro por processo" + "label": "Nível de registro por processo", + "description": "Configurações de nível de registro por componente para aumentar ou diminuir a verbosidade de módulos específicos." } }, "audio": { "max_not_heard": { - "label": "Tempo limite final" + "label": "Tempo limite final", + "description": "Quantidade de segundos sem o tipo de áudio configurado antes do término do evento de áudio." }, "min_volume": { - "label": "Volume mínimo" + "label": "Volume mínimo", + "description": "Limiar mínimo de volume RMS necessário para executar a detecção de áudio; valores mais baixos aumentam a sensibilidade (por exemplo, 200 para volume alto, 500 para volume médio, 1000 para volume baixo)." + }, + "listen": { + "label": "Tipos de escuta", + "description": "Lista de tipos de eventos de áudio a serem detectados (por exemplo: latido, alarme de incêndio, grito, fala, berro)." + }, + "filters": { + "label": "Filtros de áudio", + "description": "Configurações de filtro por tipo de áudio, como limites de confiança, usadas para reduzir falsos positivos." + }, + "enabled_in_config": { + "label": "Estado de áudio original", + "description": "Indica se a detecção de áudio foi originalmente ativada no arquivo de configuração estática." + }, + "num_threads": { + "label": "Threads de detecção", + "description": "Número de threads a serem usadas para o processamento de detecção de áudio." } }, "auth": { - "label": "Autenticação" + "label": "Autenticação", + "description": "Configurações de autenticação e relacionadas à sessão, incluindo opções de cookies e limite de taxa.", + "enabled": { + "label": "Habilitar autenticação", + "description": "Ative a autenticação nativa para a interface do usuário do Frigate." + }, + "reset_admin_password": { + "label": "Redefinir senha de administrador", + "description": "Se verdadeiro, redefina a senha do usuário administrador na inicialização e imprima a nova senha nos registros." + }, + "cookie_name": { + "label": "nome do cookie JWT", + "description": "Nome do cookie usado para armazenar o token JWT para autenticação nativa." + }, + "cookie_secure": { + "label": "Sinalizador de cookie seguro", + "description": "Defina o atributo \"secure\" no cookie de autenticação; ele deve ser verdadeiro ao usar TLS." + }, + "session_length": { + "label": "Duração da sessão", + "description": "Duração da sessão em segundos para sessões baseadas em JWT." + } + }, + "audio_transcription": { + "label": "Transcrição de áudio" } } diff --git a/web/public/locales/pt-BR/config/groups.json b/web/public/locales/pt-BR/config/groups.json index fd806e880..a392ecc76 100644 --- a/web/public/locales/pt-BR/config/groups.json +++ b/web/public/locales/pt-BR/config/groups.json @@ -39,6 +39,30 @@ "global": { "resolution": "Resolução Global", "tracking": "Rastreamento Global" + }, + "cameras": { + "resolution": "Resolução", + "tracking": "Monitorando" + } + }, + "objects": { + "global": { + "tracking": "Rastreamento Global", + "filtering": "Filtragem global" + }, + "cameras": { + "tracking": "Monitorando", + "filtering": "Filtragem" + } + }, + "record": { + "global": { + "retention": "Retenção Global", + "events": "Eventos Globais" + }, + "cameras": { + "retention": "Retenção", + "events": "Eventos" } } } diff --git a/web/public/locales/pt-BR/config/validation.json b/web/public/locales/pt-BR/config/validation.json index 324a358cc..3fc808668 100644 --- a/web/public/locales/pt-BR/config/validation.json +++ b/web/public/locales/pt-BR/config/validation.json @@ -12,5 +12,21 @@ "type": "Tipo de valor inválido", "enum": "Deve ser um dos valores permitidos", "const": "Valor não condiz com a constante esperada", - "uniqueItems": "Todos os itens devem ser únicos" + "uniqueItems": "Todos os itens devem ser únicos", + "format": "Formato inválido", + "additionalProperties": "Propriedade desconhecida não é permitida", + "oneOf": "Deve corresponder exatamente a um dos esquemas permitidos", + "anyOf": "Deve corresponder a pelo menos um dos esquemas permitidos", + "proxy": { + "header_map": { + "roleHeaderRequired": "O cabeçalho de função é obrigatório quando os mapeamentos de função são configurados." + } + }, + "ffmpeg": { + "inputs": { + "rolesUnique": "Cada função só pode ser atribuída a um fluxo de entrada.", + "detectRequired": "Pelo menos um fluxo de entrada deve ter a função 'detectar' atribuída.", + "hwaccelDetectOnly": "Somente o fluxo de entrada com a função de detecção pode definir argumentos de aceleração de hardware." + } + } } diff --git a/web/public/locales/pt-BR/views/classificationModel.json b/web/public/locales/pt-BR/views/classificationModel.json index 5defd3fcc..afa3fafbb 100644 --- a/web/public/locales/pt-BR/views/classificationModel.json +++ b/web/public/locales/pt-BR/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Classe Apagada", - "deletedImage": "Imagens Apagadas", + "deletedCategory_one": "Classe Apagada", + "deletedCategory_many": "", + "deletedCategory_other": "", + "deletedImage_one": "Imagens Apagadas", + "deletedImage_many": "", + "deletedImage_other": "", "categorizedImage": "Imagem Classificada com Sucesso", "trainedModel": "Modelo treinado com sucesso.", "trainingModel": "Treinamento do modelo iniciado com sucesso.", @@ -21,7 +25,8 @@ "deletedModel_many": "{{count}} modelos excluídos com sucesso", "deletedModel_other": "{{count}} modelos excluídos com sucesso", "updatedModel": "Configuração do modelo atualizada com sucesso", - "renamedCategory": "Classe renomeada para {{name}} com sucesso" + "renamedCategory": "Classe renomeada para {{name}} com sucesso", + "reclassifiedImage": "Imagem reclassificada com sucesso" }, "error": { "deleteImageFailed": "Falha ao deletar:{{errorMessage}}", diff --git a/web/public/locales/pt-BR/views/events.json b/web/public/locales/pt-BR/views/events.json index 3402c1002..15282d447 100644 --- a/web/public/locales/pt-BR/views/events.json +++ b/web/public/locales/pt-BR/views/events.json @@ -15,7 +15,9 @@ "description": "A revisão de itens só pode ser criada para uma câmera quando a gravação está habilitada." } }, - "timeline": "Linha do tempo", + "timeline": { + "label": "Linha do tempo" + }, "timeline.aria": "Selecione a linha do tempo", "events": { "label": "Eventos", diff --git a/web/public/locales/pt-BR/views/explore.json b/web/public/locales/pt-BR/views/explore.json index 93505f0bd..1db62f00d 100644 --- a/web/public/locales/pt-BR/views/explore.json +++ b/web/public/locales/pt-BR/views/explore.json @@ -31,7 +31,7 @@ } }, "details": { - "timestamp": "Carimbo de data e hora", + "timestamp": "Estampa de Tempo", "item": { "title": "Rever Detalhe dos itens", "desc": "Revisar os detalhes do item", diff --git a/web/public/locales/pt-BR/views/exports.json b/web/public/locales/pt-BR/views/exports.json index 29edc2cb5..db100ff0c 100644 --- a/web/public/locales/pt-BR/views/exports.json +++ b/web/public/locales/pt-BR/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "Exportar - Frigate", "search": "Buscar", "noExports": "Nenhuma exportação encontrada", - "deleteExport": "Deletar Exportação", + "deleteExport": { + "label": "Excluir Exportação" + }, "deleteExport.desc": "Você tem certeza que quer apagar {{exportName}}?", "editExport": { "title": "Renomear Exportação", diff --git a/web/public/locales/pt-BR/views/faceLibrary.json b/web/public/locales/pt-BR/views/faceLibrary.json index dded9cf19..7e8c8f56c 100644 --- a/web/public/locales/pt-BR/views/faceLibrary.json +++ b/web/public/locales/pt-BR/views/faceLibrary.json @@ -6,7 +6,7 @@ "subLabelScore": "Pontuação do Sub-Rótulo", "scoreInfo": "A pontuação do sub-rótulo é a pontuação ponderada de todas as confidências faciais reconhecidas, então a pontuação pode ser diferente da mostrada na foto instantânea.", "faceDesc": "Detalhes do objeto rastreado que gerou este rosto", - "timestamp": "Carimbo de data e hora" + "timestamp": "Estampa de Tempo" }, "selectItem": "Selecione {{item}}", "imageEntry": { @@ -60,7 +60,7 @@ "placeholder": "Informe um nome para esta coleção", "addFace": "Adicione uma nova coleção à Biblioteca Facial subindo a sua primeira imagem.", "invalidName": "Nome inválido. Nomes podem conter letras, números, espacos, apóstrofos, sublinhado e hífens.", - "nameCannotContainHash": "Nome não pode conter #." + "nameCannotContainHash": "O nome não pode conter #." }, "documentTitle": "Biblioteca de rostos - Frigate", "uploadFaceImage": { diff --git a/web/public/locales/pt-BR/views/live.json b/web/public/locales/pt-BR/views/live.json index 0c58e2137..c2459b640 100644 --- a/web/public/locales/pt-BR/views/live.json +++ b/web/public/locales/pt-BR/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Ao Vivo - Frigate", + "documentTitle": { + "default": "Ao vivo - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Ao vivo - Frigate", "lowBandwidthMode": "Modo de baixa largura de banda", "twoWayTalk": { diff --git a/web/public/locales/pt-BR/views/settings.json b/web/public/locales/pt-BR/views/settings.json index e4f8c4c1a..799822774 100644 --- a/web/public/locales/pt-BR/views/settings.json +++ b/web/public/locales/pt-BR/views/settings.json @@ -7,14 +7,15 @@ "masksAndZones": "Editor de Máscara e Zona - Frigate", "motionTuner": "Ajuste de Movimento - Frigate", "object": "Debug - Frigate", - "general": "Configurações de perfil - Frigate", + "general": "Configurações da interface - Frigate", "frigatePlus": "Frigate+ Configurações- Frigate", "notifications": "Configurações de notificação - Frigate", "cameraManagement": "Gerenciar Câmeras - Frigate", "cameraReview": "Configurações de Revisão de Câmera - Frigate", "globalConfig": "Configuração Global - Frigate", "cameraConfig": "Configuração da Câmera - Frigate", - "maintenance": "Manutenção - Frigate" + "maintenance": "Manutenção - Frigate", + "profiles": "Perfis - Frigate" }, "menu": { "ui": "UI", @@ -30,7 +31,10 @@ "roles": "Papéis", "cameraManagement": "Gerenciamento", "cameraReview": "Revisar", - "general": "Geral" + "general": "Geral", + "globalConfig": "Configuração global", + "system": "Sistema", + "integrations": "Integrações" }, "dialog": { "unsavedChanges": { @@ -913,5 +917,11 @@ "fetchingSnapshot": "Buscando a captura de imagem da câmera..." } } + }, + "button": { + "overriddenGlobal": "Substituir (Global)", + "overriddenGlobalTooltip": "Esta câmera substitui as configurações globais desta seção", + "overriddenBaseConfig": "Substituído (Configuração base)", + "overriddenBaseConfigTooltip": "O perfil {{profile}} substitui as configurações desta seção" } } diff --git a/web/public/locales/pt-BR/views/system.json b/web/public/locales/pt-BR/views/system.json index 7f73b3c46..922629719 100644 --- a/web/public/locales/pt-BR/views/system.json +++ b/web/public/locales/pt-BR/views/system.json @@ -39,7 +39,19 @@ "label": "Mensagens", "pause": "Pausar", "resume": "Resumir", - "clear": "Limpar" + "clear": "Limpar", + "filter": { + "all": "Todos os tópicos", + "topics": "Tópicos", + "events": "Eventos", + "reviews": "Avaliações", + "classification": "Classificação", + "face_recognition": "Reconhecimento facial", + "lpr": "LPR", + "camera_activity": "Atividade da câmera", + "system": "Sistema", + "camera": "Camera" + } } }, "general": { diff --git a/web/public/locales/pt/views/classificationModel.json b/web/public/locales/pt/views/classificationModel.json index 5aba72a91..06403c1b0 100644 --- a/web/public/locales/pt/views/classificationModel.json +++ b/web/public/locales/pt/views/classificationModel.json @@ -22,8 +22,12 @@ }, "toast": { "success": { - "deletedCategory": "Classe excluída", - "deletedImage": "Imagens excluídas", + "deletedCategory_one": "Classe excluída", + "deletedCategory_many": "", + "deletedCategory_other": "", + "deletedImage_one": "Imagens excluídas", + "deletedImage_many": "", + "deletedImage_other": "", "categorizedImage": "Imagem classificada com sucesso", "trainedModel": "Modelo treinado com sucesso.", "trainingModel": "Treinamento do modelo iniciado com sucesso.", diff --git a/web/public/locales/ro/common.json b/web/public/locales/ro/common.json index 65129e6d6..1938c3d11 100644 --- a/web/public/locales/ro/common.json +++ b/web/public/locales/ro/common.json @@ -187,7 +187,8 @@ "review": "Revizuire", "classification": "Clasificare", "chat": "Chat", - "actions": "Acțiuni" + "actions": "Acțiuni", + "profiles": "Profile" }, "button": { "cameraAudio": "Sunet cameră", @@ -236,7 +237,8 @@ "saveAll": "Salvează toate", "savingAll": "Se salvează toate…", "undoAll": "Anulează toate", - "applying": "Se aplică…" + "applying": "Se aplică…", + "retry": "Reîncearcă" }, "unit": { "speed": { @@ -291,7 +293,8 @@ "error": { "noMessage": "Nu s-au putut salva modificările de configurație", "title": "Salvarea modificărilor de configurație a eșuat: {{errorMessage}}" - } + }, + "success": "Modificările de configurare au fost salvate cu succes." } }, "accessDenied": { @@ -316,5 +319,7 @@ "field": { "optional": "Opțional", "internalID": "ID-ul Intern pe care Frigate îl folosește în configurație și în baza de date" - } + }, + "no_items": "Niciun element", + "validation_errors": "Erori de validare" } diff --git a/web/public/locales/ro/components/camera.json b/web/public/locales/ro/components/camera.json index 093ff4532..35f57ff01 100644 --- a/web/public/locales/ro/components/camera.json +++ b/web/public/locales/ro/components/camera.json @@ -82,6 +82,7 @@ "zones": "Zone", "mask": "Mască", "motion": "Mișcare", - "regions": "Regiuni" + "regions": "Regiuni", + "paths": "Căi" } } diff --git a/web/public/locales/ro/config/cameras.json b/web/public/locales/ro/config/cameras.json index e7fb961d0..01c256adf 100644 --- a/web/public/locales/ro/config/cameras.json +++ b/web/public/locales/ro/config/cameras.json @@ -79,8 +79,8 @@ "label": "Detecție obiecte", "description": "Setări pentru rolul de detecție folosit pentru a rula recunoașterea obiectelor și trackerele.", "enabled": { - "label": "Detecție activată", - "description": "Activează sau dezactivează detecția obiectelor pentru această cameră. Detecția trebuie să fie activă pentru ca urmărirea obiectelor să funcționeze." + "label": "Activează detecția de obiecte", + "description": "Activează sau dezactivează detecția obiectelor pentru această cameră." }, "height": { "label": "Înălțime detect", @@ -303,7 +303,7 @@ }, "skip_motion_threshold": { "label": "Ignoră pragul de mișcare", - "description": "Dacă se schimbă mai mult de această fracțiune din imagine într-un singur cadru, detectorul nu va returna nicio casetă de mișcare și se va recalibra imediat. Acest lucru poate economisi CPU și reduce alertele false în timpul fulgerelor, furtunilor etc., dar poate rata evenimente reale, cum ar fi o cameră PTZ care urmărește automat un obiect. Compromisul este între a pierde câțiva megaocteți de înregistrări versus a revizui câteva clipuri scurte. Interval 0.0 - 1.0." + "description": "Dacă este setat la o valoare între 0.0 și 1.0, și mai mult decât această fracție din imagine se modifică într-un singur cadru, detectorul nu va returna casete de mișcare și se va recalibra imediat. Acest lucru poate economisi CPU și reduce rezultatele fals pozitive în timpul fulgerelor, furtunilor etc., dar poate rata evenimente reale, cum ar fi o cameră PTZ care urmărește automat un obiect. Compromisul este între a pierde câțiva megabytes de înregistrări versus a revizui câteva clipuri scurte. Lasă nesetat (None) pentru a dezactiva această funcție." } }, "objects": { @@ -529,7 +529,7 @@ }, "detections": { "label": "Configurație detecții", - "description": "Setări pentru evenimentele de detecție (non-alertă).", + "description": "Setări pentru care obiecte urmărite generează detecții (fără alertă) și cum sunt păstrate detecțiile.", "enabled": { "label": "Activare detecții", "description": "Activează sau dezactivează evenimentele de detecție pentru această cameră." @@ -626,9 +626,9 @@ }, "snapshots": { "label": "Snapshot-uri", - "description": "Setări pentru snapshot-urile JPEG salvate ale obiectelor monitorizate de această cameră.", + "description": "Setări pentru snapshot-uri generate prin API ale obiectelor urmărite pentru această cameră.", "enabled": { - "label": "Snapshot-uri activate", + "label": "Activează snapshot-urile", "description": "Activează sau dezactivează salvarea de snapshots pentru această cameră." }, "clean_copy": { @@ -637,15 +637,15 @@ }, "timestamp": { "label": "Overlay timestamp", - "description": "Pune data și ora pe snapshot-urile salvate." + "description": "Suprapune data și ora pe snapshot-urile din API." }, "bounding_box": { "label": "Overlay chenar", - "description": "Desenează chenarele obiectelor pe snapshot-uri." + "description": "Desenează chenarele obiectelor urmărite pe snapshot-urile din API." }, "crop": { "label": "Decupează snapshot-ul", - "description": "Decupează snapshot-ul pe mărimea obiectului detectat." + "description": "Decupează snapshot-urile din API pe chenarul obiectului detectat." }, "required_zones": { "label": "Zone obligatorii", @@ -653,11 +653,11 @@ }, "height": { "label": "Înălțime snapshot", - "description": "Înălțimea la care se redimensionează snapshot-ul; lasă gol pentru dimensiunea originală." + "description": "Înălțimea (în pixeli) la care să se redimensioneze snapshot-urile din API; lasă gol pentru a păstra dimensiunea originală." }, "retain": { "label": "Retenție snapshot-uri", - "description": "Setări pentru păstrarea snapshot-urilor.", + "description": "Setări de reținere pentru snapshot-uri, incluzând zilele implicite și suprascrierile per obiect.", "default": { "label": "Retenție implicită", "description": "Numărul implicit de zile pentru păstrare." @@ -672,8 +672,8 @@ } }, "quality": { - "label": "Calitate JPEG", - "description": "Calitatea encodării JPEG pentru snapshot-uri (0-100)." + "label": "Calitatea snapshot-ului", + "description": "Calitatea encodării pentru snapshot-urile salvate (0-100)." } }, "timestamp_style": { @@ -838,6 +838,10 @@ "ignore_time_mismatch": { "label": "Ignoră decalaj timp", "description": "Ignoră diferențele de sincronizare a timpului între cameră și serverul Frigate pentru comunicarea ONVIF." + }, + "profile": { + "label": "Profil ONVIF", + "description": "Profil media ONVIF specific de utilizat pentru control PTZ, potrivit după token sau nume. Dacă nu este setat, se selectează automat primul profil cu configurație PTZ validă." } }, "type": { @@ -937,5 +941,9 @@ "enabled_in_config": { "label": "Stare inițială cameră", "description": "Păstrează starea originală a camerei." + }, + "profiles": { + "label": "Profiluri", + "description": "Profile de configurare denumite cu suprascrieri parțiale care pot fi activate la rulare." } } diff --git a/web/public/locales/ro/config/global.json b/web/public/locales/ro/config/global.json index 13455527c..d07e3bab4 100644 --- a/web/public/locales/ro/config/global.json +++ b/web/public/locales/ro/config/global.json @@ -111,8 +111,8 @@ "label": "Detecție obiecte", "description": "Setări pentru rolul de detecție folosit pentru a rula recunoașterea obiectelor și trackerele.", "enabled": { - "label": "Detecție activată", - "description": "Activează detecția pentru toate camerele. Trebuie să fie activă pentru ca urmărirea obiectelor să funcționeze." + "label": "Activează detecția de obiecte", + "description": "Activează sau dezactivează detecția de obiecte pentru toate camerele; poate fi suprascrisă pentru fiecare cameră în parte." }, "height": { "label": "Înălțime detect", @@ -293,7 +293,7 @@ "label": "Calitate live", "description": "Calitatea encodării pentru stream-ul jsmpeg (1 maxim, 31 minim)." }, - "description": "Setări folosite de interfața web pentru a controla rezoluția și calitatea stream-ului live." + "description": "Setări pentru a controla rezoluția și calitatea stream live jsmpeg. Acest lucru nu afectează camerele retransmise care folosesc go2rtc pentru vizualizare live." }, "lpr": { "label": "Recunoaștere numere înmatriculare", @@ -413,7 +413,7 @@ "description": "Setări implicite pentru detecția mișcării, aplicate dacă nu sunt suprascrise per cameră.", "skip_motion_threshold": { "label": "Ignoră pragul de mișcare", - "description": "Dacă se schimbă mai mult de această fracțiune din imagine într-un singur cadru, detectorul nu va returna nicio casetă de mișcare și se va recalibra imediat. Acest lucru poate economisi CPU și reduce alertele false în timpul fulgerelor, furtunilor etc., dar poate rata evenimente reale, cum ar fi o cameră PTZ care urmărește automat un obiect. Compromisul este între a pierde câțiva megaocteți de înregistrări versus a revizui câteva clipuri scurte. Interval 0.0 - 1.0." + "description": "Dacă este setat la o valoare între 0.0 și 1.0, și mai mult decât această fracție din imagine se modifică într-un singur cadru, detectorul nu va returna casete de mișcare și se va recalibra imediat. Acest lucru poate economisi CPU și reduce rezultatele fals pozitive în timpul fulgerelor, furtunilor etc., dar poate rata evenimente reale, cum ar fi o cameră PTZ care urmărește automat un obiect. Compromisul este între a pierde câțiva megabytes de înregistrări versus a revizui câteva clipuri scurte. Lasă nesetat (None) pentru a dezactiva această funcție." } }, "objects": { @@ -638,7 +638,7 @@ }, "detections": { "label": "Configurație detecții", - "description": "Setări pentru evenimentele de detecție (non-alertă).", + "description": "Setări pentru care obiecte urmărite generează detecții (fără alertă) și cum sunt păstrate detecțiile.", "enabled": { "label": "Activare detecții", "description": "Activează sau dezactivează evenimentele de detecție." @@ -742,8 +742,8 @@ "description": "Declanșează o reindexare completă a obiectelor istorice în baza de date de înglobări." }, "model": { - "label": "Model căutare semantică", - "description": "Modelul de înglobări folosit (ex: 'jinav1')." + "label": "Model de căutare semantică sau nume furnizor GenAI", + "description": "Modelul de înglobări de folosit pentru căutarea semantică (de exemplu 'jinav1'), sau numele unui furnizor GenAI cu rolul de înglobări." }, "model_size": { "label": "Mărime model", @@ -757,7 +757,7 @@ "snapshots": { "label": "Snapshot-uri", "enabled": { - "label": "Snapshot-uri activate", + "label": "Activează snapshot-urile", "description": "Activează sau dezactivează salvarea de snapshot-uri." }, "clean_copy": { @@ -766,15 +766,15 @@ }, "timestamp": { "label": "Overlay timestamp", - "description": "Pune data și ora pe snapshot-urile salvate." + "description": "Suprapune data și ora pe snapshot-urile din API." }, "bounding_box": { "label": "Overlay chenar", - "description": "Desenează chenarele obiectelor pe snapshot-uri." + "description": "Desenează chenarele obiectelor urmărite pe snapshot-urile din API." }, "crop": { "label": "Decupează snapshot-ul", - "description": "Decupează snapshot-ul pe mărimea obiectului detectat." + "description": "Decupează snapshot-urile din API pe chenarul obiectului detectat." }, "required_zones": { "label": "Zone obligatorii", @@ -782,11 +782,11 @@ }, "height": { "label": "Înălțime snapshot", - "description": "Înălțimea la care se redimensionează snapshot-ul; lasă gol pentru dimensiunea originală." + "description": "Înălțimea (în pixeli) la care să se redimensioneze snapshot-urile din API; lasă gol pentru a păstra dimensiunea originală." }, "retain": { "label": "Retenție snapshot-uri", - "description": "Setări pentru păstrarea snapshot-urilor.", + "description": "Setări de reținere pentru snapshot-uri, incluzând zilele implicite și suprascrierile per obiect.", "default": { "label": "Retenție implicită", "description": "Numărul implicit de zile pentru păstrare." @@ -801,10 +801,10 @@ } }, "quality": { - "label": "Calitate JPEG", - "description": "Calitatea encodării JPEG pentru snapshot-uri (0-100)." + "label": "Calitatea snapshot-ului", + "description": "Calitatea encodării pentru snapshot-urile salvate (0-100)." }, - "description": "Setări pentru snapshot-urile JPEG ale obiectelor urmărite." + "description": "Setări pentru snapshot-urile obiectelor urmărite, generate prin API, pentru toate camerele; pot fi suprascrise pentru fiecare cameră în parte." }, "timestamp_style": { "label": "Stil timestamp", @@ -988,6 +988,10 @@ "ignore_time_mismatch": { "label": "Ignoră decalaj timp", "description": "Ignoră diferențele de sincronizare a timpului între cameră și serverul Frigate pentru comunicarea ONVIF." + }, + "profile": { + "label": "Profil ONVIF", + "description": "Profil media ONVIF specific de utilizat pentru control PTZ, potrivit după token sau nume. Dacă nu este setat, se selectează automat primul profil cu configurație PTZ validă." } }, "version": { @@ -1202,7 +1206,7 @@ "label": "Hardware detector", "description": "Configurare pentru detectoarele de obiecte (CPU, GPU, backend-uri ONNX) și orice setări de model specifice detectorului.", "type": { - "label": "Tip detector", + "label": "Tip", "description": "Tipul de detector de folosit pentru detecția obiectelor (de exemplu, 'cpu', 'edgetpu', 'openvino')." }, "cpu": { @@ -1987,13 +1991,116 @@ "label": "Linger socket ZMQ (ms)", "description": "Perioada de tip 'linger' a socket-ului în milisecunde." } + }, + "axengine": { + "label": "NPU AXEngine", + "description": "Detector NPU AXERA AX650N/AX8850N care rulează fișiere .axmodel compilate prin intermediul runtime-ului AXEngine.", + "type": { + "label": "Tip" + }, + "model": { + "label": "Configurație model specifică detectorului", + "description": "Opțiuni de configurare a modelului specifice detectorului (cale, dimensiune intrare etc.).", + "path": { + "label": "Calea către modelul personalizat de detecție a obiectelor", + "description": "Calea către un fișier de model de detecție personalizat (sau plus:// pentru modelele Frigate+)." + }, + "labelmap_path": { + "label": "Harta etichetelor pentru detectorul de obiecte personalizat", + "description": "Calea către un fișier de hartă a etichetelor care asociază clasele numerice cu etichete de tip text pentru detector." + }, + "width": { + "label": "Lățimea de intrare a modelului de detecție a obiectelor", + "description": "Lățimea tensorului de intrare al modelului în pixeli." + }, + "height": { + "label": "Înălțimea tensorului de intrare al modelului în pixeli", + "description": "Înălțimea tensorului de intrare al modelului în pixeli." + }, + "labelmap": { + "label": "Personalizarea hărții etichetelor", + "description": "Suprascrieri sau reasocieri de intrări pentru a le fuziona în harta de etichete standard." + }, + "attributes_map": { + "label": "Harta etichetelor de obiecte către etichetele atributelor acestora", + "description": "Harta de la etichetele obiectelor la etichetele atributelor utilizate pentru a atașa metadate (de exemplu „car” -> [„license_plate”])." + }, + "input_tensor": { + "label": "Forma tensorului de intrare al modelului", + "description": "Formatul tensorului așteptat de model: „nhwc” sau „nchw”." + }, + "input_pixel_format": { + "label": "Format culoare pixeli pentru intrarea modelului", + "description": "Spațiul de culoare al pixelilor așteptat de model: 'rgb', 'bgr' sau 'yuv'." + }, + "input_dtype": { + "label": "Tip D intrare model", + "description": "Tipul de date al tensorului de intrare al modelului (de exemplu 'float32')." + }, + "model_type": { + "label": "Tip model detecție obiecte", + "description": "Tipul arhitecturii modelului detector (ssd, yolox, yolonas) folosit de unii detectori pentru optimizare." + } + }, + "model_path": { + "label": "Cale model specifică detectorului", + "description": "Calea fișierului către binarul modelului detector, dacă este cerută de detectorul ales." + } + }, + "model": { + "label": "Configurația modelului specifică detectorului", + "description": "Opțiuni de configurare a modelului specifice detectorului (cale, dimensiune intrare etc.).", + "path": { + "label": "Cale model detector de obiecte personalizat", + "description": "Calea către un fișier al modelului personalizat de detecție (sau plus:// pentru modelele Frigate+)." + }, + "labelmap_path": { + "label": "Harta de etichete pentru detectorul personalizat de obiecte", + "description": "Calea către un fișier labelmap care asociază clasele numerice cu etichete text pentru detector." + }, + "width": { + "label": "Lățimea de intrare pentru modelul de detecție a obiectelor", + "description": "Lățimea tensorului de intrare al modelului în pixeli." + }, + "height": { + "label": "Înălțimea de intrare pentru modelul de detecție a obiectelor", + "description": "Înălțimea tensorului de intrare al modelului în pixeli." + }, + "labelmap": { + "label": "Personalizare labelmap", + "description": "Suprascrie sau remapază intrările pentru a fi combinate în labelmap-ul standard." + }, + "attributes_map": { + "label": "Harta etichetelor obiectelor la etichetele atributelor lor", + "description": "Maparea de la etichetele obiectelor la etichetele atributelor folosite pentru a atașa metadate (de exemplu 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Forma tensorului de intrare al modelului", + "description": "Formatul tensorului așteptat de model: 'nhwc' sau 'nchw'." + }, + "input_pixel_format": { + "label": "Formatul de culoare al pixelilor de intrare pentru model", + "description": "Spațiul de culoare al pixelilor așteptat de model: 'rgb', 'bgr' sau 'yuv'." + }, + "input_dtype": { + "label": "Tipul de date (D Type) de intrare pentru model", + "description": "Tipul de date pentru tensorul de intrare al modelului (de exemplu 'float32')." + }, + "model_type": { + "label": "Tipul modelului de detecție a obiectelor", + "description": "Tipul arhitecturii modelului detectorului (ssd, yolox, yolonas) folosit de unii detectori pentru optimizare." + } + }, + "model_path": { + "label": "Calea modelului specifică detectorului", + "description": "Calea fișierului către binarul modelului detectorului, dacă e cerută de detectorul ales." } }, "model": { "label": "Model detecție", "description": "Setări pentru configurarea unui model personalizat de detecție și a formei intrării acestuia.", "path": { - "label": "Cale model personalizat detecție obiecte", + "label": "Cale model detector de obiecte personalizat", "description": "Calea către un fișier de model personalizat (sau plus:// pentru modelele Frigate+)." }, "labelmap_path": { @@ -2034,7 +2141,7 @@ } }, "genai": { - "label": "Configurație GenAI (furnizori numiți)", + "label": "Configurație AI generativ", "description": "Setări pentru furnizorii de AI generativ folosiți pentru descrieri de obiecte și rezumate.", "api_key": { "label": "Cheie API", @@ -2188,5 +2295,17 @@ "label": "Arată în interfață", "description": "Comută dacă această cameră este vizibilă peste tot în interfața Frigate. Dezactivarea acestei opțiuni va necesita editarea manuală a config-ului pentru a vedea din nou camera în interfață." } + }, + "profiles": { + "label": "Profiluri", + "description": "Definiții de profiluri numite cu nume prietenoase. Profilurile camerelor trebuie să facă referință la numele definite aici.", + "friendly_name": { + "label": "Nume prietenos", + "description": "Numele afișat pentru acest profil în interfața utilizatorului (UI)." + } + }, + "active_profile": { + "label": "Profil activ", + "description": "Numele profilului activ în prezent. Doar la rulare (runtime), nu este salvat în YAML." } } diff --git a/web/public/locales/ro/objects.json b/web/public/locales/ro/objects.json index 6c92d8b49..90dfc34cb 100644 --- a/web/public/locales/ro/objects.json +++ b/web/public/locales/ro/objects.json @@ -116,5 +116,10 @@ "an_post": "An Post", "postnl": "PostNL", "nzpost": "NZPost", - "postnord": "PostNord" + "postnord": "PostNord", + "canada_post": "Canada Post", + "royal_mail": "Royal Mail", + "school_bus": "Autobus Scolar", + "skunk": "Sconcs", + "kangaroo": "Cangur" } diff --git a/web/public/locales/ro/views/classificationModel.json b/web/public/locales/ro/views/classificationModel.json index 1ecc6018e..8a7a077be 100644 --- a/web/public/locales/ro/views/classificationModel.json +++ b/web/public/locales/ro/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Clasă ștearsă", - "deletedImage": "Imagini șterse", + "deletedCategory_one": "Am șters {{count}} clasă", + "deletedCategory_few": "Am șters {{count}} clase", + "deletedCategory_other": "Am șters {{count}} de clase", + "deletedImage_one": "Am șters {{count}} imagine", + "deletedImage_few": "Am șters {{count}} imagini", + "deletedImage_other": "Am șters {{count}} de imagini", "categorizedImage": "Imagine clasificată cu succes", "trainedModel": "Model antrenat cu succes.", "trainingModel": "Antrenamentul modelului a fost pornit cu succes.", @@ -21,7 +25,8 @@ "deletedModel_few": "{{count}} modele șterse cu succes", "deletedModel_other": "{{count}} modele șterse cu succes", "updatedModel": "Configurația modelului a fost actualizată cu succes", - "renamedCategory": "Clasa a fost redenumită cu succes în {{name}}" + "renamedCategory": "Clasa a fost redenumită cu succes în {{name}}", + "reclassifiedImage": "Imagine reclasificată cu succes" }, "error": { "deleteImageFailed": "Ștergerea a eșuat: {{errorMessage}}", @@ -31,7 +36,8 @@ "deleteModelFailed": "Ștergerea modelului a eșuat: {{errorMessage}}", "updateModelFailed": "Actualizarea modelului a eșuat: {{errorMessage}}", "renameCategoryFailed": "Redenumirea clasei a eșuat: {{errorMessage}}", - "trainingFailedToStart": "Nu s-a putut porni antrenarea modelului: {{errorMessage}}" + "trainingFailedToStart": "Nu s-a putut porni antrenarea modelului: {{errorMessage}}", + "reclassifyFailed": "Nu am putut reclasifica imaginea: {{errorMessage}}" } }, "deleteCategory": { @@ -156,8 +162,13 @@ "allImagesRequired_other": "Te rog să clasifici toate imaginile. {{count}} de imagini rămase.", "modelCreated": "Modelul a fost creat cu succes. Folosește vizualizarea Clasificări recente pentru a adăuga imagini pentru stările lipsă, apoi antrenează modelul.", "missingStatesWarning": { - "title": "Exemple de stări lipsă", - "description": "Este recomandat să alegi exemple pentru toate stările pentru rezultate optime. Poți continua fără a selecta toate stările, dar modelul nu va fi antrenat până când toate stările nu au imagini. După continuare, folosește vizualizarea Clasificări recente pentru a clasifica imagini pentru stările lipsă, apoi antrenează modelul." + "title": "Exemple de clase lipsă", + "description": "Nu toate clasele au exemple. Încearcă să generezi exemple noi pentru a găsi clasa lipsă, sau continuă și folosește vizualizarea Clasificări recente pentru a adăuga imagini mai târziu." + }, + "refreshExamples": "Generează exemple noi", + "refreshConfirm": { + "title": "Generezi exemple noi?", + "description": "Asta va genera un set nou de imagini și va goli toate selecțiile, inclusiv clasele anterioare. Va trebui să selectezi din nou exemple pentru toate clasele." } } }, @@ -189,5 +200,7 @@ "modelNotReady": "Modelul nu este pregătit pentru antrenare", "noChanges": "Nicio modificare a setului de date de la ultima antrenare." }, - "none": "Niciuna" + "none": "Niciuna", + "reclassifyImageAs": "Reclasifică imaginea ca:", + "reclassifyImage": "Reclasifică imaginea" } diff --git a/web/public/locales/ro/views/events.json b/web/public/locales/ro/views/events.json index bcaec672f..455257a92 100644 --- a/web/public/locales/ro/views/events.json +++ b/web/public/locales/ro/views/events.json @@ -14,7 +14,9 @@ "description": "Elementele de revizuire pot fi create doar pentru o cameră atunci când înregistrările sunt activate pentru acea cameră." } }, - "timeline": "Cronologie", + "timeline": { + "label": "Cronologie" + }, "timeline.aria": "Selectează cronologia", "events": { "aria": "Selectează evenimente", diff --git a/web/public/locales/ro/views/explore.json b/web/public/locales/ro/views/explore.json index 371565afe..5d4057b0b 100644 --- a/web/public/locales/ro/views/explore.json +++ b/web/public/locales/ro/views/explore.json @@ -170,7 +170,8 @@ "attributes": "Atribute de clasificare", "title": { "label": "Titlu" - } + }, + "scoreInfo": "Informații scor" }, "exploreMore": "Explorează mai multe obiecte cu {{label}}", "trackedObjectDetails": "Detalii despre obiectul urmărit", @@ -230,12 +231,18 @@ "debugReplay": { "label": "Reluare de depanare", "aria": "Vezi acest obiect urmărit în vizualizarea de reluare de depanare" + }, + "more": { + "aria": "Mai mult" } }, "dialog": { "confirmDelete": { "title": "Confirmă ștergerea", "desc": "Ștergerea acestui obiect urmărit elimină snapshot-ul, orice înglobări salvate și orice intrări asociate detaliilor de urmărire. Materialul video înregistrat al acestui obiect urmărit în vizualizarea Istoric NU va fi șters.

    Ești sigur că vrei să continui?" + }, + "toast": { + "error": "Eroare la ștergerea acestui obiect urmărit: {{errorMessage}}" } }, "noTrackedObjects": "Nu au fost găsite obiecte urmărite", diff --git a/web/public/locales/ro/views/exports.json b/web/public/locales/ro/views/exports.json index 7b93af723..1b1e0b2d8 100644 --- a/web/public/locales/ro/views/exports.json +++ b/web/public/locales/ro/views/exports.json @@ -2,7 +2,9 @@ "search": "Căutare", "documentTitle": "Exporturi - Frigate", "noExports": "Nu s-au găsit exporturi", - "deleteExport": "Șterge exportul", + "deleteExport": { + "label": "Șterge export" + }, "deleteExport.desc": "Sigur vrei să ștergi {{exportName}}?", "editExport": { "title": "Redenumire export", diff --git a/web/public/locales/ro/views/faceLibrary.json b/web/public/locales/ro/views/faceLibrary.json index 570db33fb..15979a6c7 100644 --- a/web/public/locales/ro/views/faceLibrary.json +++ b/web/public/locales/ro/views/faceLibrary.json @@ -76,7 +76,8 @@ "deletedFace_few": "{{count}} fețe au fost șterse cu succes.", "deletedFace_other": "{{count}} de fețe au fost șterse cu succes.", "uploadedImage": "Imagine încărcată cu succes.", - "addFaceLibrary": "{{name}} a fost adăugat(ă) cu succes la biblioteca de fețe!" + "addFaceLibrary": "{{name}} a fost adăugat(ă) cu succes la biblioteca de fețe!", + "reclassifiedFace": "Față reclasificată cu succes." }, "error": { "addFaceLibraryFailed": "Setarea numelui feței a eșuat: {{errorMessage}}", @@ -85,7 +86,8 @@ "renameFaceFailed": "Redenumirea feței a eșuat: {{errorMessage}}", "trainFailed": "Antrenarea a eșuat: {{errorMessage}}", "uploadingImageFailed": "Încărcarea imaginii a eșuat: {{errorMessage}}", - "updateFaceScoreFailed": "Nu s-a putut actualiza scorul feței: {{errorMessage}}" + "updateFaceScoreFailed": "Nu s-a putut actualiza scorul feței: {{errorMessage}}", + "reclassifyFailed": "Nu s-a putut reclasifica fața: {{errorMessage}}" } }, "imageEntry": { @@ -100,5 +102,7 @@ "trainFace": "Antrenează fața", "readTheDocs": "Citește documentația", "nofaces": "Nu sunt fețe disponibile", - "pixels": "{{area}}px" + "pixels": "{{area}}px", + "reclassifyFaceAs": "Reclasifică fața ca:", + "reclassifyFace": "Reclasifică fața" } diff --git a/web/public/locales/ro/views/live.json b/web/public/locales/ro/views/live.json index 45f8a68f2..6b8c8c979 100644 --- a/web/public/locales/ro/views/live.json +++ b/web/public/locales/ro/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Frigate - Live", + "documentTitle": { + "default": "Live - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Frigate - Live", "lowBandwidthMode": "Mod Latime de Banda Limitata", "twoWayTalk": { @@ -19,7 +21,8 @@ "clickMove": { "label": "Apasă în cadrul imaginii pentru a centra camera", "enable": "Activează mutarea prin clic", - "disable": "Dezactivează mutarea prin clic" + "disable": "Dezactivează mutarea prin clic", + "enableWithZoom": "Activează clic pentru mutare / trage pentru zoom" }, "left": { "label": "Mișcă camera PTZ spre stânga" diff --git a/web/public/locales/ro/views/settings.json b/web/public/locales/ro/views/settings.json index d51d53873..632f6137e 100644 --- a/web/public/locales/ro/views/settings.json +++ b/web/public/locales/ro/views/settings.json @@ -8,14 +8,15 @@ "notifications": "Setări Notificări - Frigate", "motionTuner": "Reglaj Mișcare - Frigate", "object": "Depanare - Frigate", - "general": "Setări Profil - Frigate", + "general": "Setări interfață - Frigate", "frigatePlus": "Setări Frigate+ - Frigate", "enrichments": "Setări Îmbunătățiri - Frigate", "cameraManagement": "Gestionare Camere - Frigate", "cameraReview": "Setări Review Cameră - Frigate", "globalConfig": "Configurație Globală - Frigate", "cameraConfig": "Configurație Cameră - Frigate", - "maintenance": "Mentenanță - Frigate" + "maintenance": "Mentenanță - Frigate", + "profiles": "Profile - Frigate" }, "menu": { "ui": "Interfață (UI)", @@ -87,7 +88,10 @@ "cameraMqtt": "MQTT Cameră", "maintenance": "Mentenanță", "mediaSync": "Sincronizare media", - "regionGrid": "Grilă regiune" + "regionGrid": "Grilă regiune", + "uiSettings": "Setări UI", + "profiles": "Profile", + "systemGo2rtcStreams": "stream-uri go2rtc" }, "dialog": { "unsavedChanges": { @@ -100,7 +104,7 @@ "noCamera": "Nicio cameră" }, "general": { - "title": "Setări Profil", + "title": "Setări UI", "liveDashboard": { "title": "Dashboard Live", "automaticLiveView": { @@ -462,6 +466,10 @@ "zone": "zonă", "motion_mask": "mască mișcare", "object_mask": "mască obiect" + }, + "revertOverride": { + "desc": "Asta va elimina suprascrierea de profil pentru {{type}} {{name}} și va reveni la configurația de bază.", + "title": "Revino la configurația de bază" } }, "distance": { @@ -484,6 +492,17 @@ "error": { "mustBeGreaterOrEqualTo": "Pragul de viteză trebuie să fie mai mare sau egal cu 0.1." } + }, + "id": { + "error": { + "mustNotBeEmpty": "ID-ul nu trebuie să fie gol.", + "alreadyExists": "O mască cu acest ID există deja pentru această cameră." + } + }, + "name": { + "error": { + "mustNotBeEmpty": "Numele nu trebuie să fie gol." + } } }, "disabledInConfig": "Elementul este dezactivat în fișierul de configurare", @@ -492,7 +511,10 @@ "title": "Activată", "description": "Specifică dacă această mască este activată în fișierul de configurare. Dacă este dezactivată, nu poate fi activată prin MQTT. Măștile dezactivate sunt ignorate la rulare." } - } + }, + "profileBase": "(bază)", + "profileOverride": "(suprascriere)", + "addDisabledProfile": "Adaugă mai întâi în configurația de bază, apoi suprascrie în profil" }, "debug": { "motion": { @@ -741,8 +763,8 @@ "camera": "Cameră" }, "documentation": "Citește documentația", - "cleanCopyWarning": "Unele camere au snapshot-uri activate, dar au copia curată dezactivată. Trebuie să activezi clean_copy în configurația snapshot-urilor pentru a putea trimite imagini de la aceste camere către Frigate+.", - "desc": "Trimiterea către Frigate+ necesită ca ambele opțiuni, snapshot-uri și clean_copy, să fie activate în configurație." + "cleanCopyWarning": "Unele camere au snapshot-urile dezactivate", + "desc": "Trimiterea către Frigate+ necesită ca snapshot-urile să fie activate în configurația ta." }, "modelInfo": { "title": "Informații Model", @@ -1319,6 +1341,14 @@ "confirmButton": "Șterge definitiv", "success": "Camera {{cameraName}} a fost ștearsă cu succes", "error": "Eroare la ștergerea camerei {{cameraName}}" + }, + "profiles": { + "title": "Suprascrieri profil cameră", + "selectLabel": "Selectează profilul", + "description": "Configurează care camere sunt activate sau dezactivate când un profil este activat. Camerele setate pe \"Moștenire\" își păstrează starea de bază de activare.", + "inherit": "Moștenire", + "enabled": "Activat", + "disabled": "Dezactivat" } }, "cameraReview": { @@ -1373,6 +1403,9 @@ "value": { "label": "Valoare nouă", "reset": "Resetare" + }, + "profile": { + "label": "Profil" } }, "detectionModel": { @@ -1429,7 +1462,9 @@ "review_thumbnails": "Miniaturi Review", "previews": "Previzualizări", "exports": "Exporturi", - "recordings": "Înregistrări" + "recordings": "Înregistrări", + "verbose": "Detaliat", + "verboseDesc": "Scrie pe disc o listă completă a fișierelor orfane pentru verificare." }, "regionGrid": { "title": "Grilă regiune", @@ -1449,7 +1484,8 @@ }, "camera": { "title": "Setări Cameră", - "description": "Aceste setări se aplică doar pentru această cameră și suprascriu setările globale." + "description": "Aceste setări se aplică doar pentru această cameră și suprascriu setările globale.", + "noCameras": "Nicio cameră disponibilă" }, "advancedSettingsCount": "Setări Avansate ({{count}})", "advancedCount": "Avansat ({{count}})", @@ -1480,7 +1516,35 @@ "manual": "Argumente manuale", "inherit": "Moștenește de la setările camerei", "selectPreset": "Selectează presetarea", - "manualPlaceholder": "Introdu argumentele FFmpeg" + "manualPlaceholder": "Introdu argumentele FFmpeg", + "none": "Niciunul", + "useGlobalSetting": "Moștenește din setarea globală", + "presetLabels": { + "preset-rpi-64-h264": "Raspberry Pi (H.264)", + "preset-rpi-64-h265": "Raspberry Pi (H.265)", + "preset-vaapi": "VAAPI (GPU Intel/AMD)", + "preset-intel-qsv-h264": "Intel QuickSync (H.264)", + "preset-intel-qsv-h265": "Intel QuickSync (H.265)", + "preset-nvidia": "GPU NVIDIA", + "preset-jetson-h264": "NVIDIA Jetson (H.264)", + "preset-jetson-h265": "NVIDIA Jetson (H.265)", + "preset-rkmpp": "Rockchip RKMPP", + "preset-http-jpeg-generic": "HTTP JPEG (Generic)", + "preset-http-mjpeg-generic": "HTTP MJPEG (Generic)", + "preset-http-reolink": "HTTP - Camere Reolink", + "preset-rtmp-generic": "RTMP (Generic)", + "preset-rtsp-generic": "RTSP (Generic)", + "preset-rtsp-restream": "RTSP - Restream de la go2rtc", + "preset-rtsp-restream-low-latency": "RTSP - Restream de la go2rtc (Latență scăzută)", + "preset-rtsp-udp": "RTSP - UDP", + "preset-rtsp-blue-iris": "RTSP - Blue Iris", + "preset-record-generic": "Înregistrare (Generic, fără audio)", + "preset-record-generic-audio-copy": "Înregistrare (Generic + Copiere audio)", + "preset-record-generic-audio-aac": "Înregistrare (Generic + Audio în AAC)", + "preset-record-mjpeg": "Înregistrare - Camere MJPEG", + "preset-record-jpeg": "Înregistrare - Camere JPEG", + "preset-record-ubiquiti": "Înregistrare - Camere Ubiquiti" + } }, "cameraInputs": { "itemTitle": "Stream-ul {{index}}" @@ -1512,7 +1576,8 @@ "genai": "GenAI", "face_recognition": "Recunoaștere Facială", "lpr": "Recunoaștere Numere Înmatriculare", - "birdseye": "Birdseye" + "birdseye": "Birdseye", + "masksAndZones": "Măști / zone" }, "detect": { "title": "Setări Detecție" @@ -1524,7 +1589,8 @@ "keyDuplicate": "Numele detectorului există deja.", "noSchema": "Nu sunt disponibile scheme de detectoare.", "none": "Nicio instanță de detector configurată.", - "add": "Adaugă detector" + "add": "Adaugă detector", + "addCustomKey": "Adaugă cheie personalizată" }, "record": { "title": "Setări Înregistrare" @@ -1577,7 +1643,25 @@ "timestamp_style": { "title": "Setări Timestamp" }, - "searchPlaceholder": "Caută..." + "searchPlaceholder": "Caută...", + "genaiRoles": { + "options": { + "embeddings": "Înglobare", + "vision": "Viziune", + "tools": "Instrumente" + } + }, + "semanticSearchModel": { + "placeholder": "Selectează modelul…", + "builtIn": "Modele integrate", + "genaiProviders": "Furnizori GenAI" + }, + "reviewLabels": { + "summary": "{{count}} etichete selectate", + "empty": "Nicio etichetă disponibilă", + "allNonAlertDetections": "Toată activitatea fără alertă va fi inclusă ca detecții." + }, + "addCustomLabel": "Adaugă etichetă personalizată..." }, "globalConfig": { "title": "Configurare Globală", @@ -1617,5 +1701,143 @@ "unsavedChanges": "Ai modificări nesalvate", "confirmReset": "Confirmă Resetarea", "resetToDefaultDescription": "Această acțiune va reseta toate setările din această secțiune la valorile implicite. Acțiunea este ireversibilă.", - "resetToGlobalDescription": "Această acțiune va reseta setările din această secțiune la valorile globale implicite. Acțiunea este ireversibilă." + "resetToGlobalDescription": "Această acțiune va reseta setările din această secțiune la valorile globale implicite. Acțiunea este ireversibilă.", + "button": { + "overriddenGlobal": "Suprascris (global)", + "overriddenGlobalTooltip": "Această cameră suprascrie setările globale de configurare din această secțiune", + "overriddenBaseConfig": "Suprascris (configurația de bază)", + "overriddenBaseConfigTooltip": "Profilul {{profile}} suprascrie setările de configurare din această secțiune" + }, + "profiles": { + "title": "Profile", + "activeProfile": "Profil activ", + "noActiveProfile": "Niciun profil activ", + "active": "Activ", + "activated": "Profilul '{{profile}}' a fost activat", + "activateFailed": "Setarea profilului a eșuat", + "deactivated": "Profil dezactivat", + "noProfiles": "Niciun profil definit.", + "noOverrides": "Fără suprascrieri", + "cameraCount_one": "{{count}} cameră", + "cameraCount_few": "{{count}} camere", + "cameraCount_other": "{{count}} de camere", + "baseConfig": "Configurație de bază", + "addProfile": "Adaugă profil", + "newProfile": "Profil nou", + "profileNamePlaceholder": "de ex., Armat, Plecat, Mod noapte", + "friendlyNameLabel": "Nume profil", + "profileIdLabel": "ID profil", + "profileIdDescription": "Identificator intern folosit în configurație și automatizări", + "nameInvalid": "Sunt permise doar litere mici, numere și underscore-uri", + "nameDuplicate": "Un profil cu acest nume există deja", + "error": { + "mustBeAtLeastTwoCharacters": "Trebuie să aibă cel puțin 2 caractere", + "mustNotContainPeriod": "Nu trebuie să conțină puncte", + "alreadyExists": "Un profil cu acest ID există deja" + }, + "renameProfile": "Redenumește profilul", + "renameSuccess": "Profilul a fost redenumit în '{{profile}}'", + "deleteProfile": "Șterge profilul", + "deleteProfileConfirm": "Ștergi profilul \"{{profile}}\" de pe toate camerele? Această acțiune nu poate fi anulată.", + "deleteSuccess": "Profilul '{{profile}}' a fost șters", + "createSuccess": "Profilul '{{profile}}' a fost creat", + "removeOverride": "Elimină suprascrierea profilului", + "deleteSection": "Șterge suprascrierile secțiunii", + "deleteSectionConfirm": "Elimini suprascrierile {{section}} pentru profilul {{profile}} de pe {{camera}}?", + "deleteSectionSuccess": "Au fost eliminate suprascrierile {{section}} pentru {{profile}}", + "enableSwitch": "Activează profilele", + "enabledDescription": "Profilele sunt activate. Creează un profil nou mai jos, navighează la o secțiune de configurare a camerei pentru a face modificările, și salvează pentru ca acestea să aibă efect.", + "disabledDescription": "Profilele îți permit să definești seturi denumite de suprascrieri pentru configurația camerei (de ex., armat, plecat, noapte) care pot fi activate la cerere.", + "columnCamera": "Camera", + "columnOverrides": "Suprascrieri profil" + }, + "go2rtcStreams": { + "title": "Stream-uri go2rtc", + "description": "Gestionează configurațiile de stream-uri go2rtc pentru retransmisia camerelor. Fiecare stream are un nume și unul sau mai multe URL-uri sursă.", + "addStream": "Adaugă stream", + "addStreamDesc": "Introdu un nume pentru noul stream. Acest nume va fi folosit pentru a referenția stream-ul în configurația camerei tale.", + "addUrl": "Adaugă URL", + "streamName": "Nume stream", + "streamNamePlaceholder": "de ex., usa_intrare", + "streamUrlPlaceholder": "de ex., rtsp://user:parola@192.168.1.100/stream", + "deleteStream": "Șterge stream", + "deleteStreamConfirm": "Sigur vrei să ștergi stream-ul \"{{streamName}}\"? Camerele care referențiază acest stream s-ar putea să nu mai funcționeze.", + "noStreams": "Niciun stream go2rtc configurat. Adaugă un stream pentru a începe.", + "validation": { + "nameRequired": "Numele stream-ului este obligatoriu", + "nameDuplicate": "Un stream cu acest nume există deja", + "nameInvalid": "Numele stream-ului poate conține doar litere, numere, underscore-uri și cratime", + "urlRequired": "Cel puțin un URL este obligatoriu" + }, + "renameStream": "Redenumește stream-ul", + "renameStreamDesc": "Introdu un nume nou pentru acest stream. Redenumirea unui stream poate strica camerele sau alte stream-uri care îl referențiază după nume.", + "newStreamName": "Nume nou de stream", + "ffmpeg": { + "useFfmpegModule": "Folosește modul de compatibilitate (ffmpeg)", + "video": "Video", + "audio": "Audio", + "hardware": "Accelerare hardware", + "videoCopy": "Copiază", + "videoH264": "Transcodează în H.264", + "videoH265": "Transcodează în H.265", + "videoExclude": "Exclude", + "audioCopy": "Copiază", + "audioAac": "Transcodează în AAC", + "audioOpus": "Transcodează în Opus", + "audioPcmu": "Transcodează în PCM μ-law", + "audioPcma": "Transcodează în PCM A-law", + "audioPcm": "Transcodează în PCM", + "audioMp3": "Transcodează în MP3", + "audioExclude": "Exclude", + "hardwareNone": "Fără accelerare hardware", + "hardwareAuto": "Accelerare hardware automată" + } + }, + "timestampPosition": { + "tl": "Sus stânga", + "tr": "Sus dreapta", + "bl": "Jos stânga", + "br": "Jos dreapta" + }, + "onvif": { + "profileAuto": "Auto", + "profileLoading": "Se încarcă profilurile..." + }, + "configMessages": { + "review": { + "recordDisabled": "Înregistrarea este dezactivată, elementele de revizuire nu vor fi generate.", + "detectDisabled": "Detecția obiectelor este dezactivată. Elementele de revizuire necesită obiecte detectate pentru a categorisi alertele și detecțiile.", + "allNonAlertDetections": "Toată activitatea fără alertă va fi inclusă ca detecții." + }, + "audio": { + "noAudioRole": "Niciun flux nu are rolul audio definit. Trebuie să activați rolul audio pentru ca detecția audio să funcționeze." + }, + "audioTranscription": { + "audioDetectionDisabled": "Detecția audio nu este activată pentru această cameră. Transcrierea audio necesită ca detecția audio să fie activă." + }, + "detect": { + "fpsGreaterThanFive": "Setarea cadrelor pe secundă pentru detecție la o valoare mai mare de 5 nu este recomandată." + }, + "faceRecognition": { + "globalDisabled": "Recunoașterea facială nu este activată la nivel global. Activați-o în setările globale pentru ca recunoașterea facială la nivel de cameră să funcționeze.", + "personNotTracked": "Recunoașterea facială necesită urmărirea obiectului „person”. Asigurați-vă că „person” este în lista de urmărire a obiectelor." + }, + "lpr": { + "globalDisabled": "Recunoașterea plăcuțelor de înmatriculare nu este activată la nivel global. Activați-o în setările globale pentru ca recunoașterea la nivel de cameră să funcționeze.", + "vehicleNotTracked": "Recunoașterea plăcuțelor de înmatriculare necesită ca „car” sau „motorcycle” să fie urmărite." + }, + "record": { + "noRecordRole": "Niciun flux nu are rolul de înregistrare definit. Înregistrarea nu va funcționa." + }, + "birdseye": { + "objectsModeDetectDisabled": "Birdseye este setat pe modul 'objects', dar detecția obiectelor este dezactivată pentru această cameră. Camera nu va apărea în Birdseye." + }, + "snapshots": { + "detectDisabled": "Detecția obiectelor este dezactivată. Snapshot-urile sunt generate din obiectele urmărite și nu vor fi create." + }, + "detectors": { + "mixedTypes": "Toți detectorii trebuie să folosească același tip. Șterge detectorii existenți pentru a folosi un alt tip.", + "mixedTypesSuggestion": "Toți detectorii trebuie să folosească același tip. Șterge detectorii existenți sau selectează {{type}}." + } + } } diff --git a/web/public/locales/ro/views/system.json b/web/public/locales/ro/views/system.json index 420884e9b..e829edd60 100644 --- a/web/public/locales/ro/views/system.json +++ b/web/public/locales/ro/views/system.json @@ -50,7 +50,8 @@ "description": "Acesta este un bug cunoscut în instrumentele de raportare GPU Intel (intel_gpu_top), unde acestea se blochează și returnează repetat o utilizare GPU de 0% chiar și atunci când accelerarea hardware și detecția obiectelor rulează corect pe (i)GPU. Aceasta nu este o problemă Frigate. Poți reporni host-ul pentru a remedia temporar problema și a confirma că GPU-ul funcționează corect. Performanța nu este afectată." }, "gpuTemperature": "Temperatură GPU", - "npuTemperature": "Temperatură NPU" + "npuTemperature": "Temperatură NPU", + "gpuCompute": "Calcul / Codare GPU" }, "detector": { "temperature": "Temperatură detector", @@ -151,7 +152,9 @@ "count": "{{count}} mesaje", "expanded": { "payload": "Conținut" - } + }, + "count_one": "{{count}} mesaj", + "count_other": "{{count}} mesaje" } }, "metrics": "Metrici sistem", @@ -214,7 +217,8 @@ "detect": "detectează", "cameraFfmpeg": "FFmpeg {{camName}}", "camera": "cameră", - "ffmpeg": "FFmpeg" + "ffmpeg": "FFmpeg", + "cameraGpu": "{{camName}} GPU" }, "title": "Camere", "overview": "Prezentare generală", diff --git a/web/public/locales/ru/config/cameras.json b/web/public/locales/ru/config/cameras.json index 0967ef424..d50ac0316 100644 --- a/web/public/locales/ru/config/cameras.json +++ b/web/public/locales/ru/config/cameras.json @@ -1 +1,110 @@ -{} +{ + "name": { + "label": "Наименование камеры", + "description": "Наименование камеры это обязательное поле" + }, + "enabled": { + "label": "Включено", + "description": "Включено" + }, + "friendly_name": { + "label": "Отображаемое имя", + "description": "Отображаемое имя уже используется" + }, + "label": "Конфигурация", + "audio": { + "label": "Аудиособытия", + "description": "Настройки обнаружения аудиособытий для этой камеры.", + "enabled": { + "label": "Включить обнаружение звука", + "description": "Включить или отключить аудиособытия для этой камеры." + }, + "max_not_heard": { + "label": "Завершение таймаута", + "description": "Количество секунд без указания типа звука до завершения звукового события." + }, + "min_volume": { + "label": "Минимальная громкость", + "description": "Для запуска функции обнаружения звука требуется минимальный пороговый уровень громкости RMS; более низкие значения повышают чувствительность (например, 200 — высокий, 500 — средний, 1000 — низкий)." + }, + "listen": { + "description": "Список типов аудиособытий для обнаружения (например: лай, пожарная тревога, крик, речь, вопль).", + "label": "Типы аудиособытий" + }, + "filters": { + "label": "Аудиофильтры", + "description": "Настройки фильтров для каждого типа аудиофайлов, такие как пороговые значения, используются для уменьшения количества ложных срабатываний." + }, + "enabled_in_config": { + "label": "Исходное состояние звука", + "description": "Указывает, было ли изначально включено обнаружение звука в статическом конфигурационном файле." + }, + "num_threads": { + "label": "Обнаружение потоков", + "description": "Количество потоков, используемых для обработки обнаружения звука." + } + }, + "audio_transcription": { + "label": "Расшифровка аудиозаписи", + "description": "Настройки для транскрипции аудио в реальном времени и речи, используемые для событий и субтитров в реальном времени.", + "enabled": { + "label": "Включить транскрипцию", + "description": "Включить или отключить транскрипцию аудиособытий, запускаемую вручную." + }, + "enabled_in_config": { + "label": "Исходное состояние транскрипции" + }, + "live_enabled": { + "label": "Транскрипция в реальном времени", + "description": "Включить потоковую транскрипцию аудио в режиме реального времени по мере его поступления." + } + }, + "birdseye": { + "description": "Настройки для составного режима просмотра Birdseye, который объединяет видеопоток с нескольких камер в единый макет.", + "label": "Режим Birdseye", + "enabled": { + "label": "Включить Birdseye", + "description": "Включить или отключить функцию Birdseye." + }, + "mode": { + "label": "Режим слежения", + "description": "Режимы добавления камер в Birdseye: «объекты», «движение» или «непрерывный»." + }, + "order": { + "label": "Позиция", + "description": "Числовое значение, управляющее порядком расположения камер в схеме Birdseye." + } + }, + "detect": { + "label": "Обнаружение объектов", + "description": "Настройки роли обнаружения, используемые для запуска обнаружения объектов и инициализации трекеров.", + "enabled": { + "label": "Включить обнаружение объектов", + "description": "Включить или отключить обнаружение объектов для этой камеры." + }, + "height": { + "label": "Высота обнаружения", + "description": "Высота (в пикселях) кадров, используемых для обнаружения потока; оставьте поле пустым, чтобы использовать собственное разрешение потока." + }, + "width": { + "label": "Ширина обнаружения", + "description": "Ширина (в пикселях) кадров, используемых для обнаружения потока; оставьте поле пустым, чтобы использовать собственное разрешение потока." + }, + "fps": { + "label": "Частота кадров обнаружения", + "description": "Желаемое количество кадров в секунду для выполнения обнаружения; более низкие значения снижают нагрузку на ЦП (рекомендуемое значение — 5, более высокое значение — максимум 10 — следует устанавливать только при отслеживании чрезвычайно быстро движущихся объектов)." + }, + "min_initialized": { + "label": "Минимальное количество кадров инициализации", + "description": "Количество последовательных срабатываний обнаружения, необходимых для создания отслеживаемого объекта. Увеличьте это значение, чтобы уменьшить количество ложных инициализаций. Значение по умолчанию — частота кадров, деленная на 2." + }, + "max_disappeared": { + "label": "Максимальное количество исчезнувших кадров", + "description": "Количество кадров без обнаружения до того, как отслеживаемый объект будет считаться исчезнувшим." + }, + "stationary": { + "label": "Конфигурация стационарных объектов", + "description": "Настройки для обнаружения и управления объектами, которые остаются неподвижными в течение определенного периода времени." + } + } +} diff --git a/web/public/locales/ru/config/global.json b/web/public/locales/ru/config/global.json index 0967ef424..5e7de1ab3 100644 --- a/web/public/locales/ru/config/global.json +++ b/web/public/locales/ru/config/global.json @@ -1 +1,87 @@ -{} +{ + "audio": { + "label": "Аудиособытия", + "enabled": { + "label": "Включить обнаружение звука" + }, + "max_not_heard": { + "label": "Завершение таймаута", + "description": "Количество секунд без указания типа звука до завершения звукового события." + }, + "min_volume": { + "label": "Минимальная громкость", + "description": "Для запуска функции обнаружения звука требуется минимальный пороговый уровень громкости RMS; более низкие значения повышают чувствительность (например, 200 — высокий, 500 — средний, 1000 — низкий)." + }, + "listen": { + "description": "Список типов аудиособытий для обнаружения (например: лай, пожарная тревога, крик, речь, вопль).", + "label": "Типы аудиособытий" + }, + "filters": { + "label": "Аудиофильтры", + "description": "Настройки фильтров для каждого типа аудиофайлов, такие как пороговые значения, используются для уменьшения количества ложных срабатываний." + }, + "enabled_in_config": { + "label": "Исходное состояние звука", + "description": "Указывает, было ли изначально включено обнаружение звука в статическом конфигурационном файле." + }, + "num_threads": { + "label": "Обнаружение потоков", + "description": "Количество потоков, используемых для обработки обнаружения звука." + } + }, + "audio_transcription": { + "label": "Расшифровка аудиозаписи", + "description": "Настройки для транскрипции аудио в реальном времени и речи, используемые для событий и субтитров в реальном времени.", + "live_enabled": { + "label": "Транскрипция в реальном времени", + "description": "Включить потоковую транскрипцию аудио в режиме реального времени по мере его поступления." + } + }, + "birdseye": { + "description": "Настройки для составного режима просмотра Birdseye, который объединяет видеопоток с нескольких камер в единый макет.", + "label": "Режим Birdseye", + "enabled": { + "label": "Включить Birdseye", + "description": "Включить или отключить функцию Birdseye." + }, + "mode": { + "label": "Режим слежения", + "description": "Режимы добавления камер в Birdseye: «объекты», «движение» или «непрерывный»." + }, + "order": { + "label": "Позиция", + "description": "Числовое значение, управляющее порядком расположения камер в схеме Birdseye." + } + }, + "detect": { + "label": "Обнаружение объектов", + "description": "Настройки роли обнаружения, используемые для запуска обнаружения объектов и инициализации трекеров.", + "enabled": { + "label": "Включить обнаружение объектов" + }, + "height": { + "label": "Высота обнаружения", + "description": "Высота (в пикселях) кадров, используемых для обнаружения потока; оставьте поле пустым, чтобы использовать собственное разрешение потока." + }, + "width": { + "label": "Ширина обнаружения", + "description": "Ширина (в пикселях) кадров, используемых для обнаружения потока; оставьте поле пустым, чтобы использовать собственное разрешение потока." + }, + "fps": { + "label": "Частота кадров обнаружения", + "description": "Желаемое количество кадров в секунду для выполнения обнаружения; более низкие значения снижают нагрузку на ЦП (рекомендуемое значение — 5, более высокое значение — максимум 10 — следует устанавливать только при отслеживании чрезвычайно быстро движущихся объектов)." + }, + "min_initialized": { + "label": "Минимальное количество кадров инициализации", + "description": "Количество последовательных срабатываний обнаружения, необходимых для создания отслеживаемого объекта. Увеличьте это значение, чтобы уменьшить количество ложных инициализаций. Значение по умолчанию — частота кадров, деленная на 2." + }, + "max_disappeared": { + "label": "Максимальное количество исчезнувших кадров", + "description": "Количество кадров без обнаружения до того, как отслеживаемый объект будет считаться исчезнувшим." + }, + "stationary": { + "label": "Конфигурация стационарных объектов", + "description": "Настройки для обнаружения и управления объектами, которые остаются неподвижными в течение определенного периода времени." + } + } +} diff --git a/web/public/locales/ru/views/classificationModel.json b/web/public/locales/ru/views/classificationModel.json index b5b7e2222..6dbe7a4b1 100644 --- a/web/public/locales/ru/views/classificationModel.json +++ b/web/public/locales/ru/views/classificationModel.json @@ -17,8 +17,12 @@ }, "toast": { "success": { - "deletedCategory": "Класс удалён", - "deletedImage": "Изображения удалены", + "deletedCategory_one": "Класс удалён", + "deletedCategory_few": "", + "deletedCategory_many": "", + "deletedImage_one": "Изображения удалены", + "deletedImage_few": "", + "deletedImage_many": "", "deletedModel_one": "Успешно удалена {{count}} модель", "deletedModel_few": "Успешно удалены {{count}} модели", "deletedModel_many": "Успешно удалены {{count}} моделей", diff --git a/web/public/locales/sk/views/classificationModel.json b/web/public/locales/sk/views/classificationModel.json index 58a802fd2..7b5c0e59c 100644 --- a/web/public/locales/sk/views/classificationModel.json +++ b/web/public/locales/sk/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Vymazaná Trieda", - "deletedImage": "Vymazané Obrázky", + "deletedCategory_one": "Vymazaná Trieda", + "deletedCategory_few": "", + "deletedCategory_other": "", + "deletedImage_one": "Vymazané Obrázky", + "deletedImage_few": "", + "deletedImage_other": "", "categorizedImage": "Obrázok bol úspešne klasifikovaný", "trainedModel": "Úspešne vyškolený model.", "trainingModel": "Úspešne spustené trénovanie modelu.", diff --git a/web/public/locales/sl/views/classificationModel.json b/web/public/locales/sl/views/classificationModel.json index 317a5f6da..514c3afe3 100644 --- a/web/public/locales/sl/views/classificationModel.json +++ b/web/public/locales/sl/views/classificationModel.json @@ -18,8 +18,14 @@ }, "toast": { "success": { - "deletedCategory": "Razred izbrisan", - "deletedImage": "Slike izbrisane", + "deletedCategory_one": "Razred izbrisan", + "deletedCategory_two": "", + "deletedCategory_few": "", + "deletedCategory_other": "", + "deletedImage_one": "Slike izbrisane", + "deletedImage_two": "", + "deletedImage_few": "", + "deletedImage_other": "", "trainedModel": "Model uspešno naučen.", "trainingModel": "Učenje modela se je uspešno začelo.", "deletedModel_one": "Model uspešno izbrisan", diff --git a/web/public/locales/sr/views/classificationModel.json b/web/public/locales/sr/views/classificationModel.json index 68abd5cbf..81650c587 100644 --- a/web/public/locales/sr/views/classificationModel.json +++ b/web/public/locales/sr/views/classificationModel.json @@ -23,8 +23,12 @@ }, "toast": { "success": { - "deletedCategory": "Обрисана класа", - "deletedImage": "Обрисане слике", + "deletedCategory_one": "Обрисана класа", + "deletedCategory_few": "", + "deletedCategory_other": "", + "deletedImage_one": "Обрисане слике", + "deletedImage_few": "", + "deletedImage_other": "", "deletedModel_one": "Успешно је обрисан {{count}} модел", "deletedModel_few": "Успешно су обрисана {{count}} модела", "deletedModel_other": "Успешно је обрисано {{count}} модела", diff --git a/web/public/locales/sv/views/classificationModel.json b/web/public/locales/sv/views/classificationModel.json index 5b5c5b77f..f4ac4b2ef 100644 --- a/web/public/locales/sv/views/classificationModel.json +++ b/web/public/locales/sv/views/classificationModel.json @@ -12,8 +12,10 @@ }, "toast": { "success": { - "deletedCategory": "Borttagen klass", - "deletedImage": "Raderade bilder", + "deletedCategory_one": "Borttagen klass", + "deletedCategory_other": "", + "deletedImage_one": "Raderade bilder", + "deletedImage_other": "", "categorizedImage": "Lyckades klassificera bilden", "trainedModel": "Modellen har tränats.", "trainingModel": "Modellträning har startat.", diff --git a/web/public/locales/tr/config/cameras.json b/web/public/locales/tr/config/cameras.json index 0967ef424..7bc693e87 100644 --- a/web/public/locales/tr/config/cameras.json +++ b/web/public/locales/tr/config/cameras.json @@ -1 +1,5 @@ -{} +{ + "name": { + "label": "Kamera ismi" + } +} diff --git a/web/public/locales/tr/config/global.json b/web/public/locales/tr/config/global.json index 0967ef424..4b4308cb3 100644 --- a/web/public/locales/tr/config/global.json +++ b/web/public/locales/tr/config/global.json @@ -1 +1,8 @@ -{} +{ + "safe_mode": { + "label": "Güvenli mod" + }, + "environment_vars": { + "label": "Ortam değişkenleri" + } +} diff --git a/web/public/locales/tr/config/validation.json b/web/public/locales/tr/config/validation.json index 0967ef424..73b68c515 100644 --- a/web/public/locales/tr/config/validation.json +++ b/web/public/locales/tr/config/validation.json @@ -1 +1,6 @@ -{} +{ + "minimum": "En az {{limit}} olmalı", + "maximum": "En fazla {{limit}} olmalı", + "exclusiveMinimum": "{{limit}}’den büyük olmalı", + "exclusiveMaximum": "{{limit}}’den küçük olmalı" +} diff --git a/web/public/locales/tr/views/classificationModel.json b/web/public/locales/tr/views/classificationModel.json index 2081188aa..3a14e1f91 100644 --- a/web/public/locales/tr/views/classificationModel.json +++ b/web/public/locales/tr/views/classificationModel.json @@ -17,8 +17,10 @@ }, "toast": { "success": { - "deletedCategory": "Silinmiş Sınıf", - "deletedImage": "Silinmiş Fotoğraflar", + "deletedCategory_one": "Silinmiş Sınıf", + "deletedCategory_other": "", + "deletedImage_one": "Silinmiş Fotoğraflar", + "deletedImage_other": "", "deletedModel_one": "{{count}} model başarıyla silindi", "deletedModel_other": "{{count}} model başarıyla silindi", "categorizedImage": "Fotoğraf Başarıyla Sınıflandırıldı", diff --git a/web/public/locales/tr/views/faceLibrary.json b/web/public/locales/tr/views/faceLibrary.json index 6df04530b..46663ac48 100644 --- a/web/public/locales/tr/views/faceLibrary.json +++ b/web/public/locales/tr/views/faceLibrary.json @@ -3,7 +3,8 @@ "description": { "placeholder": "Bu koleksiyona bir isim verin", "addFace": "İlk görselinizi yükleyerek Yüz Kütüphanesi’ne yeni bir koleksiyon ekleyin.", - "invalidName": "Geçersiz isim. İsimler; yalnızca harf, rakam, boşluk, kesme işareti (’), alt çizgi(_) ve tire (-) içerebilir." + "invalidName": "Geçersiz isim. İsimler; yalnızca harf, rakam, boşluk, kesme işareti (’), alt çizgi(_) ve tire (-) içerebilir.", + "nameCannotContainHash": "İsim, # içeremez." }, "details": { "person": "İnsan", diff --git a/web/public/locales/tr/views/live.json b/web/public/locales/tr/views/live.json index 60a8576ff..1c9af5328 100644 --- a/web/public/locales/tr/views/live.json +++ b/web/public/locales/tr/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "Canlı - Frigate", + "documentTitle": { + "default": "Canlı - Frigate" + }, "documentTitle.withCamera": "{{camera}} - Canlı - Frigate", "muteCameras": { "disable": "Tüm Kameraların Sesini Aç", diff --git a/web/public/locales/uk/views/classificationModel.json b/web/public/locales/uk/views/classificationModel.json index a96997bc7..faceecd91 100644 --- a/web/public/locales/uk/views/classificationModel.json +++ b/web/public/locales/uk/views/classificationModel.json @@ -12,8 +12,12 @@ }, "toast": { "success": { - "deletedCategory": "Видалений клас", - "deletedImage": "Видалені зображення", + "deletedCategory_one": "Видалений клас", + "deletedCategory_few": "", + "deletedCategory_many": "", + "deletedImage_one": "Видалені зображення", + "deletedImage_few": "", + "deletedImage_many": "", "categorizedImage": "Зображення успішно класифіковано", "trainedModel": "Успішно навчена модель.", "trainingModel": "Успішно розпочато навчання моделі.", diff --git a/web/public/locales/vi/views/classificationModel.json b/web/public/locales/vi/views/classificationModel.json index f0a012425..666a72fa1 100644 --- a/web/public/locales/vi/views/classificationModel.json +++ b/web/public/locales/vi/views/classificationModel.json @@ -12,8 +12,8 @@ }, "toast": { "success": { - "deletedCategory": "Lớp Đã Bị Xoá", - "deletedImage": "Hình ảnh đã bị xóa", + "deletedCategory_other": "Lớp Đã Bị Xoá", + "deletedImage_other": "Hình ảnh đã bị xóa", "deletedModel_other": "Đã xóa thành công {{count}} mô hình", "categorizedImage": "Phân Loại Hình Ảnh Thành Công", "trainedModel": "Đã huấn luyện mô hình thành công.", diff --git a/web/public/locales/yue-Hant/views/classificationModel.json b/web/public/locales/yue-Hant/views/classificationModel.json index c46b060d7..7bfda4a68 100644 --- a/web/public/locales/yue-Hant/views/classificationModel.json +++ b/web/public/locales/yue-Hant/views/classificationModel.json @@ -28,8 +28,8 @@ }, "toast": { "success": { - "deletedCategory": "已刪除類別", - "deletedImage": "已刪除影像", + "deletedCategory_other": "已刪除類別", + "deletedImage_other": "已刪除影像", "deletedModel_other": "已成功刪除 {{count}} 個模型", "categorizedImage": "影像分類成功", "trainedModel": "模型訓練成功。", diff --git a/web/public/locales/zh-CN/common.json b/web/public/locales/zh-CN/common.json index 44d326acf..e9337cfc7 100644 --- a/web/public/locales/zh-CN/common.json +++ b/web/public/locales/zh-CN/common.json @@ -167,7 +167,8 @@ "resetToDefault": "重置为默认", "saveAll": "保存全部", "savingAll": "保存全部中…", - "undoAll": "撤销全部" + "undoAll": "撤销全部", + "retry": "重试" }, "menu": { "system": "系统", @@ -271,7 +272,8 @@ "restart": "重启 Frigate", "classification": "目标分类", "actions": "操作", - "chat": "聊天" + "chat": "聊天", + "profiles": "配置模板" }, "toast": { "copyUrlToClipboard": "已复制链接到剪贴板。", @@ -280,7 +282,8 @@ "error": { "title": "保存配置信息失败: {{errorMessage}}", "noMessage": "保存配置信息失败" - } + }, + "success": "成功保存配置文件。" } }, "role": { @@ -312,5 +315,7 @@ "field": { "optional": "可选", "internalID": "Frigate 在配置与数据库中使用的内部 ID" - } + }, + "no_items": "没有项目", + "validation_errors": "验证错误" } diff --git a/web/public/locales/zh-CN/components/camera.json b/web/public/locales/zh-CN/components/camera.json index e01d5e9aa..9bd70155e 100644 --- a/web/public/locales/zh-CN/components/camera.json +++ b/web/public/locales/zh-CN/components/camera.json @@ -82,6 +82,7 @@ "zones": "区域", "mask": "遮罩", "motion": "画面变动", - "regions": "区域" + "regions": "区域", + "paths": "行动轨迹" } } diff --git a/web/public/locales/zh-CN/config/cameras.json b/web/public/locales/zh-CN/config/cameras.json index 9bb156748..aa627f549 100644 --- a/web/public/locales/zh-CN/config/cameras.json +++ b/web/public/locales/zh-CN/config/cameras.json @@ -41,7 +41,7 @@ }, "enabled_in_config": { "label": "原始音频状态", - "description": "指示原始静态配置文件中是否启用了音频检测。" + "description": "指示原始静态配置文件中是否开启了音频检测。" } }, "audio_transcription": { @@ -79,8 +79,8 @@ "label": "目标检测", "description": "用于运行目标检测、初始化追踪器的检测模块设置。", "enabled": { - "label": "开启检测", - "description": "开启或关闭该摄像头的目标检测。如需运行目标追踪,必须先开启检测。" + "label": "开启目标检测", + "description": "开启或关闭该摄像头的目标检测。" }, "height": { "label": "检测画面高度", @@ -299,7 +299,7 @@ }, "skip_motion_threshold": { "label": "跳过画面变动阈值", - "description": "如果单帧中图像变化超过此比例,检测器将返回无画面变动框并立即重新校准。这可以节省 CPU 并减少闪电、风暴等情况下的误报,但可能会错过真实事件,如 PTZ 摄像头自动追踪目标。权衡的是丢弃几兆字节的录像与查看几个短片之间的取舍。范围 0.0 到 1.0。" + "description": "如果单帧中画面变化超过此比例,检测器将判定为无画面变动并立即重新校准。这可以节省 CPU 并减少闪电、风暴等情况下的误报,但也可能会错过真正的事件,如 PTZ 摄像头自动追踪目标。你需要权衡取舍:是否牺牲少量录制片段,换取更少无效视频与更低的误检。保持为空即可关闭该功能。" }, "improve_contrast": { "label": "改善对比度", @@ -560,7 +560,7 @@ }, "detections": { "label": "检测配置", - "description": "创建检测事件(非警报)以及保留多长时间的设置。", + "description": "用于设置哪些追踪目标会生成检测记录(非警报类),以及检测记录的保留方式。", "enabled": { "label": "开启检测", "description": "开启或关闭此摄像头的检测事件。" @@ -636,15 +636,15 @@ }, "timestamp": { "label": "时间戳叠加", - "description": "在保存的快照上叠加时间戳。" + "description": "在 API 生成的快照上叠加时间戳。" }, "bounding_box": { "label": "边界框叠加", - "description": "在保存的快照上绘制追踪目标的边界框。" + "description": "在 API 生成的快照上绘制追踪目标的边界框。" }, "crop": { "label": "裁剪快照", - "description": "将保存的快照裁剪到检测到的目标边界框。" + "description": "在 API 生成的快照裁剪到检测到的目标边界框。" }, "required_zones": { "label": "必需区域", @@ -652,11 +652,11 @@ }, "height": { "label": "快照高度", - "description": "将保存的快照调整到的目标高度(像素);留空则保持原始大小。" + "description": "将 API 生成的快照调整到的目标高度(像素);留空则保持原始大小。" }, "retain": { "label": "快照保留", - "description": "保存快照的保留设置,包括默认天数和按目标覆盖。", + "description": "快照的保留设置,包括默认天数和按目标覆盖。", "default": { "label": "默认保留", "description": "保留快照的默认天数。" @@ -671,10 +671,10 @@ } }, "quality": { - "label": "JPEG 质量", - "description": "保存快照的 JPEG 编码质量(0-100)。" + "label": "快照质量", + "description": "保存快照的编码质量(0-100)。" }, - "description": "此摄像头保存的追踪目标 JPEG 快照设置。" + "description": "此摄像头的追踪目标 API 快照设置。" }, "timestamp_style": { "label": "时间戳样式", @@ -748,8 +748,8 @@ "label": "车牌识别", "description": "车牌识别设置,包括检测阈值、格式化和已知车牌。", "enabled": { - "label": "开启 LPR", - "description": "在此摄像头上启用或禁用 LPR。" + "label": "开启车牌识别", + "description": "在此摄像头上启用或禁用车牌识别。" }, "min_area": { "label": "最小车牌区域", @@ -834,6 +834,10 @@ "ignore_time_mismatch": { "label": "忽略时间不匹配", "description": "忽略 ONVIF 通信中摄像头和 Frigate 服务器之间的时间同步差异。" + }, + "profile": { + "label": "ONVIF 配置文件", + "description": "用于 PTZ 控制的指定 ONVIF 媒体配置,将通过 Token 或名称匹配。如果未手动指定,将自动选择第一个包含有效 PTZ 配置的媒体配置。" } }, "ui": { @@ -937,5 +941,9 @@ "enabled_in_config": { "label": "原始摄像头状态", "description": "保持摄像头的原始状态跟踪。" + }, + "profiles": { + "label": "配置模板", + "description": "可在运行时切换指定命名的配置模板,支持局部覆盖参数。" } } diff --git a/web/public/locales/zh-CN/config/global.json b/web/public/locales/zh-CN/config/global.json index 3ccb7423b..b14f4acbf 100644 --- a/web/public/locales/zh-CN/config/global.json +++ b/web/public/locales/zh-CN/config/global.json @@ -5,7 +5,7 @@ }, "safe_mode": { "label": "安全模式", - "description": "启用后,Frigate 将以安全模式启动,将会关闭部分功能,以便排查问题。" + "description": "开启后,Frigate 将以安全模式启动,将会关闭部分功能,以便排查问题。" }, "environment_vars": { "label": "环境变量", @@ -52,7 +52,7 @@ }, "enabled_in_config": { "label": "原始音频状态", - "description": "指示原始静态配置文件中是否启用了音频检测。" + "description": "指示原始静态配置文件中是否开启了音频检测。" } }, "auth": { @@ -68,7 +68,7 @@ }, "reset_admin_password": { "label": "重置管理员密码", - "description": "启用后,启动时将重置管理员用户密码,并在日志中打印新密码。" + "description": "开启后,启动时将重置管理员用户密码,并在日志中打印新密码。" }, "cookie_name": { "label": "JWT Cookie 名称", @@ -183,8 +183,8 @@ "label": "目标检测", "description": "用于运行目标检测、初始化追踪器的检测模块设置。", "enabled": { - "label": "开启检测", - "description": "为所有摄像头启用或禁用目标检测;可按摄像头覆盖。必须启用检测才能运行目标追踪。" + "label": "开启目标检测", + "description": "为所有摄像头启用或禁用目标检测,可按摄像头覆盖。" }, "height": { "label": "检测画面高度", @@ -536,7 +536,7 @@ "description": "Frigate Web 端点(端口 8971)的 TLS 设置。", "enabled": { "label": "开启 TLS", - "description": "在配置的 TLS 端口上为 Frigate 的 Web UI 和 API 启用 TLS。" + "description": "为 Frigate 的 Web 页面和 API 的端口开启 TLS 加密。" } }, "ui": { @@ -567,7 +567,7 @@ "label": "检测器硬件", "description": "目标检测器(CPU、GPU、ONNX 后端)的配置以及任何检测器特定的模型设置。", "type": { - "label": "检测器类型", + "label": "类型", "description": "用于目标检测的检测器类型(例如 'cpu'、'edgetpu'、'openvino')。" }, "cpu": { @@ -1352,6 +1352,61 @@ "label": "ZMQ 套接字逗留时间(毫秒)", "description": "套接字逗留时间(毫秒)。" } + }, + "axengine": { + "label": "爱芯元智 NPU", + "description": "AXERA AX650N/AX8850N NPU 检测器,通过 AXEngine 运行库加载并执行编译后的 .axmodel 模型文件。", + "type": { + "label": "类型" + } + }, + "model": { + "label": "检测器特定的模型配置", + "description": "检测器特定的模型配置选项(路径、输入尺寸等)。", + "path": { + "label": "自定义目标检测模型路径", + "description": "自定义检测模型文件的路径(或使用 plus:// 指定 Frigate+ 模型)。" + }, + "labelmap_path": { + "label": "自定义目标检测器的标签映射(labelmap)", + "description": "检测器标签映射文件(labelmap)路径,用于将数字类别映射为文字标签。" + }, + "width": { + "label": "目标检测模型输入宽度", + "description": "模型输入张量(input tensor)的宽度(以像素为单位)。" + }, + "height": { + "label": "目标检测模型输入高度", + "description": "模型输入张量(input tensor)的高度(以像素为单位)。" + }, + "labelmap": { + "label": "标签映射(labelmap)自定义", + "description": "合并到标准标签映射表中的覆盖 / 重映射规则。" + }, + "attributes_map": { + "label": "目标标签到其属性标签的映射", + "description": "用于绑定元数据的目标标签 → 属性标签映射关系(例如:'car'→ ['license_plate'] 为将车牌属性绑定到车辆上)。" + }, + "input_tensor": { + "label": "模型输入张量形状", + "description": "模型期望的张量格式(Tensor format):'nhwc' 或 'nchw'。" + }, + "input_pixel_format": { + "label": "模型输入像素颜色格式", + "description": "模型期望的像素颜色空间:'rgb'、'bgr' 或 'yuv'。" + }, + "input_dtype": { + "label": "模型输入数据类型", + "description": "模型输入张量的数据类型(例如 'float32')。" + }, + "model_type": { + "label": "目标检测模型类型", + "description": "某些检测器用于优化的检测器模型架构类型(ssd、yolox、yolonas)。" + } + }, + "model_path": { + "label": "检测器专用模型路径", + "description": "所选检测器需要时,需填写其模型文件的路径。" } }, "model": { @@ -1399,7 +1454,7 @@ } }, "genai": { - "label": "生成式 AI 配置(命名提供商)。", + "label": "生成式 AI 配置", "description": "用于生成目标描述和核查摘要的集成生成式 AI 提供商设置。", "api_key": { "label": "API 密钥", @@ -1432,7 +1487,7 @@ }, "live": { "label": "实时回放", - "description": "Web UI 用于控制实时监控流分辨率和质量的设置。", + "description": "用于控制 JSMPEG 实时流分辨率与画质的设置。此设置不影响使用 go2rtc 进行实时预览的摄像头。", "streams": { "label": "实时监控流名称", "description": "配置的流名称到用于实时监控播放的 restream/go2rtc 名称的映射。" @@ -1463,7 +1518,7 @@ }, "skip_motion_threshold": { "label": "跳过画面变动阈值", - "description": "如果单帧中图像变化超过此比例,检测器将返回无画面变动框并立即重新校准。这可以节省 CPU 并减少闪电、风暴等情况下的误报,但可能会错过真实事件,如 PTZ 摄像头自动追踪目标。权衡的是丢弃几兆字节的录像与查看几个短片之间的取舍。范围 0.0 到 1.0。" + "description": "如果单帧中画面变化超过此比例,检测器将判定为无画面变动并立即重新校准。这可以节省 CPU 并减少闪电、风暴等情况下的误报,但也可能会错过真正的事件,如 PTZ 摄像头自动追踪目标。你需要权衡取舍:是否牺牲少量录制片段,换取更少无效视频与更低的误检。保持为空即可关闭该功能。" }, "improve_contrast": { "label": "改善对比度", @@ -1724,7 +1779,7 @@ }, "detections": { "label": "检测配置", - "description": "创建检测事件(非警报)以及保留多长时间的设置。", + "description": "用于设置哪些追踪目标会生成检测记录(非警报类),以及检测记录的保留方式。", "enabled": { "label": "开启检测", "description": "为所有摄像头启用或禁用检测事件;可按摄像头覆盖。" @@ -1789,7 +1844,7 @@ }, "snapshots": { "label": "快照", - "description": "所有摄像头的追踪目标 JPEG 快照保存设置;可按摄像头覆盖。", + "description": "所有摄像头的追踪目标 API 快照设置;可摄像头单独配置覆盖全局配置。", "enabled": { "label": "开启快照", "description": "为所有摄像头启用或禁用保存快照;可按摄像头覆盖。" @@ -1800,15 +1855,15 @@ }, "timestamp": { "label": "时间戳叠加", - "description": "在保存的快照上叠加时间戳。" + "description": "在 API 生成的快照上叠加时间戳。" }, "bounding_box": { "label": "边界框叠加", - "description": "在保存的快照上绘制追踪目标的边界框。" + "description": "在 API 生成的快照上绘制追踪目标的边界框。" }, "crop": { "label": "裁剪快照", - "description": "将保存的快照裁剪到检测到的目标边界框。" + "description": "在 API 生成的快照裁剪到检测到的目标边界框。" }, "required_zones": { "label": "必需区域", @@ -1816,11 +1871,11 @@ }, "height": { "label": "快照高度", - "description": "将保存的快照调整到的目标高度(像素);留空则保持原始大小。" + "description": "将 API 生成的快照调整到的目标高度(像素);留空则保持原始大小。" }, "retain": { "label": "快照保留", - "description": "保存快照的保留设置,包括默认天数和按目标覆盖。", + "description": "快照的保留设置,包括默认天数和按目标覆盖。", "default": { "label": "默认保留", "description": "保留快照的默认天数。" @@ -1835,8 +1890,8 @@ } }, "quality": { - "label": "JPEG 质量", - "description": "保存快照的 JPEG 编码质量(0-100)。" + "label": "快照质量", + "description": "保存快照的编码质量(0-100)。" } }, "timestamp_style": { @@ -1951,8 +2006,8 @@ "description": "触发将历史追踪目标完全重新索引到嵌入数据库。" }, "model": { - "label": "语义搜索模型", - "description": "用于语义搜索的嵌入模型(例如 'jinav1')。" + "label": "语义搜索模型或生成式 AI 服务名称", + "description": "用于语义搜索的嵌入模型(例如 'jinav1'),或具有嵌入功能(embeddings)的生成式 AI 服务名称。" }, "model_size": { "label": "模型大小", @@ -1995,12 +2050,12 @@ "label": "车牌识别", "description": "车牌识别设置,包括检测阈值、格式化和已知车牌。", "enabled": { - "label": "开启 LPR", + "label": "开启车牌识别", "description": "为所有摄像头启用或禁用车牌识别;可按摄像头覆盖。" }, "model_size": { "label": "模型大小", - "description": "用于文本检测/识别的模型大小。大多数用户应使用 'small'。" + "description": "用于文本检测/识别的模型大小,大多数用户应使用 'small',只有'small'模型支持中文。" }, "detection_threshold": { "label": "检测阈值", @@ -2187,6 +2242,22 @@ "ignore_time_mismatch": { "label": "忽略时间不匹配", "description": "忽略 ONVIF 通信中摄像头和 Frigate 服务器之间的时间同步差异。" + }, + "profile": { + "label": "ONVIF 配置文件", + "description": "用于 PTZ 控制的指定 ONVIF 媒体配置,将通过 Token 或名称匹配。如果未手动指定,将自动选择第一个包含有效 PTZ 配置的媒体配置。" } + }, + "profiles": { + "label": "配置模板", + "description": "带有别名的命名配置模板定义。摄像头配置模板必须引用此处定义的名称。", + "friendly_name": { + "label": "别名", + "description": "在界面中显示的此配置模板名称,可以使用中文。" + } + }, + "active_profile": { + "label": "激活配置模板", + "description": "当前激活的配置模板名称。仅在运行时使用,不会写入 YAML 配置文件中。" } } diff --git a/web/public/locales/zh-CN/objects.json b/web/public/locales/zh-CN/objects.json index 193f87179..f8d07bc23 100644 --- a/web/public/locales/zh-CN/objects.json +++ b/web/public/locales/zh-CN/objects.json @@ -116,5 +116,10 @@ "nzpost": "新西兰邮政", "postnord": "北欧邮政", "gls": "GLS", - "dpd": "DPD" + "dpd": "DPD", + "canada_post": "加拿大邮政", + "royal_mail": "英国皇家邮政", + "school_bus": "校车", + "skunk": "臭鼬", + "kangaroo": "袋鼠" } diff --git a/web/public/locales/zh-CN/views/classificationModel.json b/web/public/locales/zh-CN/views/classificationModel.json index 3e9cf67fe..ea106839b 100644 --- a/web/public/locales/zh-CN/views/classificationModel.json +++ b/web/public/locales/zh-CN/views/classificationModel.json @@ -12,14 +12,15 @@ }, "toast": { "success": { - "deletedCategory": "删除类别", - "deletedImage": "删除图片", + "deletedCategory_other": "删除 {{count}} 个类别", + "deletedImage_other": "删除 {{count}} 张图片", "categorizedImage": "成功分类图片", "trainedModel": "训练模型成功。", "trainingModel": "已开始训练模型。", "deletedModel_other": "已删除 {{count}} 个模型", "updatedModel": "已更新模型配置", - "renamedCategory": "成功修改类别名称为 {{name}}" + "renamedCategory": "成功修改类别名称为 {{name}}", + "reclassifiedImage": "成功重新分类图片" }, "error": { "deleteImageFailed": "删除失败:{{errorMessage}}", @@ -29,7 +30,8 @@ "deleteModelFailed": "删除模型失败:{{errorMessage}}", "updateModelFailed": "更新模型失败:{{errorMessage}}", "trainingFailedToStart": "开始训练模型失败:{{errorMessage}}", - "renameCategoryFailed": "修改类别名称失败:{{errorMessage}}" + "renameCategoryFailed": "修改类别名称失败:{{errorMessage}}", + "reclassifyFailed": "重新分类图片失败:{{errorMessage}}" } }, "deleteCategory": { @@ -148,8 +150,13 @@ "allImagesRequired_other": "请对所有图片进行分类。还有 {{count}} 张图片需要分类。", "modelCreated": "模型创建成功。请在“最近分类”页面为缺失的状态添加图片,然后训练模型。", "missingStatesWarning": { - "title": "缺失状态示例", - "description": "建议为所有状态都选择示例图片以获得最佳效果。你也可以跳过当前为分类状态选择图片,但需要所有状态都有对应的图片,模型才能够进行训练。跳过后你可通过“最近分类”页面为缺失的状态分类添加图片,然后再训练模型。" + "title": "缺失分类示例", + "description": "并非所有类别都有示例。可尝试生成新示例以查找缺失的类别,或继续该步骤,之后通过 “最近分类” 页面添加图片。" + }, + "refreshExamples": "生成新示例", + "refreshConfirm": { + "title": "需要生成新示例?", + "description": "此操作将生成一组新的图片,并清除所有选择内容(包括之前的所有类别)。你需要为所有类别重新选择示例。" } } }, @@ -179,5 +186,7 @@ "noChanges": "自上次训练以来,数据集未作任何更改。", "modelNotReady": "模型尚未准备好进行训练" }, - "none": "无标签" + "none": "无标签", + "reclassifyImageAs": "重新分类图片为:", + "reclassifyImage": "重新分类图片" } diff --git a/web/public/locales/zh-CN/views/events.json b/web/public/locales/zh-CN/views/events.json index 3e479aa4a..f02a83907 100644 --- a/web/public/locales/zh-CN/views/events.json +++ b/web/public/locales/zh-CN/views/events.json @@ -12,10 +12,12 @@ "motion": "还没有画面变动类数据", "recordingsDisabled": { "title": "必须要开启录制功能", - "description": "必须要摄像头启用录制功能时,才可为其创建回放项目。" + "description": "必须要摄像头开启录制功能时,才可为其创建回放项目。" } }, - "timeline": "时间线", + "timeline": { + "label": "时间线" + }, "timeline.aria": "选择时间线", "events": { "label": "事件", diff --git a/web/public/locales/zh-CN/views/explore.json b/web/public/locales/zh-CN/views/explore.json index 63668057a..db062d455 100644 --- a/web/public/locales/zh-CN/views/explore.json +++ b/web/public/locales/zh-CN/views/explore.json @@ -169,7 +169,8 @@ "attributes": "分类属性", "title": { "label": "标题" - } + }, + "scoreInfo": "分数信息" }, "itemMenu": { "downloadVideo": { @@ -224,12 +225,18 @@ "debugReplay": { "label": "调试回放", "aria": "在调试回放视图中查看此被追踪对象" + }, + "more": { + "aria": "更多" } }, "dialog": { "confirmDelete": { "title": "确认删除", "desc": "删除此追踪目标后,将移除快照、所有已保存的嵌入向量数据以及任何相关的目标追踪详情条目,但在 历史 页面中追踪目标的录制视频片段不会被删除。

    你确定要继续删除该追踪目标吗?" + }, + "toast": { + "error": "删除该追踪目标时出错:{{errorMessage}}" } }, "noTrackedObjects": "未找到追踪目标", diff --git a/web/public/locales/zh-CN/views/exports.json b/web/public/locales/zh-CN/views/exports.json index 864283d3f..b57b1a1c6 100644 --- a/web/public/locales/zh-CN/views/exports.json +++ b/web/public/locales/zh-CN/views/exports.json @@ -2,7 +2,9 @@ "documentTitle": "导出 - Frigate", "search": "搜索", "noExports": "没有找到导出的项目", - "deleteExport": "删除导出的项目", + "deleteExport": { + "label": "删除导出" + }, "deleteExport.desc": "你确定要删除 {{exportName}} 吗?", "editExport": { "title": "重命名导出", diff --git a/web/public/locales/zh-CN/views/faceLibrary.json b/web/public/locales/zh-CN/views/faceLibrary.json index b8e9a9501..d383fb348 100644 --- a/web/public/locales/zh-CN/views/faceLibrary.json +++ b/web/public/locales/zh-CN/views/faceLibrary.json @@ -65,7 +65,8 @@ "deletedName_other": "成功删除 {{count}} 个 人脸特征。", "trainedFace": "人脸特征训练成功。", "updatedFaceScore": "更新 {{name}} 人脸特征评分({{score}})成功。", - "renamedFace": "成功重命名人脸为{{name}}" + "renamedFace": "成功重命名人脸为{{name}}", + "reclassifiedFace": "重新分类人脸成功。" }, "error": { "uploadingImageFailed": "图片上传失败:{{errorMessage}}", @@ -74,7 +75,8 @@ "deleteNameFailed": "数据集删除失败:{{errorMessage}}", "trainFailed": "训练失败:{{errorMessage}}", "updateFaceScoreFailed": "更新人脸评分失败:{{errorMessage}}", - "renameFaceFailed": "重命名人脸失败:{{errorMessage}}" + "renameFaceFailed": "重命名人脸失败:{{errorMessage}}", + "reclassifyFailed": "重新分类人脸失败:{{errorMessage}}" } }, "steps": { @@ -95,5 +97,7 @@ "title": "删除人脸" }, "pixels": "{{area}} 像素", - "nofaces": "没有可用的人脸" + "nofaces": "没有可用的人脸", + "reclassifyFaceAs": "将人脸重新分类为:", + "reclassifyFace": "重新分类人脸" } diff --git a/web/public/locales/zh-CN/views/live.json b/web/public/locales/zh-CN/views/live.json index 0f025b5cc..10b8641d3 100644 --- a/web/public/locales/zh-CN/views/live.json +++ b/web/public/locales/zh-CN/views/live.json @@ -1,5 +1,7 @@ { - "documentTitle": "实时监控 - Frigate", + "documentTitle": { + "default": "实时监控 - Frigate" + }, "documentTitle.withCamera": "{{camera}} - 实时监控 - Frigate", "lowBandwidthMode": "低带宽模式", "twoWayTalk": { @@ -14,8 +16,9 @@ "move": { "clickMove": { "label": "点击画面以使摄像头居中", - "enable": "启用点击移动", - "disable": "禁用点击移动" + "enable": "开启点击移动", + "disable": "禁用点击移动", + "enableWithZoom": "开启点击移动 / 拖动缩放功能" }, "left": { "label": "PTZ摄像头向左移动" @@ -62,19 +65,19 @@ "disable": "取消屏蔽所有摄像头" }, "detect": { - "enable": "启用检测", + "enable": "开启检测", "disable": "关闭检测" }, "recording": { - "enable": "启用录制", + "enable": "开启录制", "disable": "关闭录制" }, "snapshots": { - "enable": "启用快照", + "enable": "开启快照", "disable": "关闭快照" }, "audioDetect": { - "enable": "启用音频检测", + "enable": "开启音频检测", "disable": "关闭音频检测" }, "autotracking": { diff --git a/web/public/locales/zh-CN/views/settings.json b/web/public/locales/zh-CN/views/settings.json index a5d78a01d..55190e53b 100644 --- a/web/public/locales/zh-CN/views/settings.json +++ b/web/public/locales/zh-CN/views/settings.json @@ -7,7 +7,7 @@ "masksAndZones": "遮罩和区域编辑器 - Frigate", "motionTuner": "画面变动调整 - Frigate", "object": "调试 - Frigate", - "general": "配置文件设置 - Frigate", + "general": "界面设置 - Frigate", "frigatePlus": "Frigate+ 设置 - Frigate", "notifications": "通知设置 - Frigate", "enrichments": "增强功能设置 - Frigate", @@ -15,7 +15,8 @@ "cameraReview": "摄像头核查设置 - Frigate", "globalConfig": "全局配置 - Frigate", "cameraConfig": "摄像头配置 - Frigate", - "maintenance": "维护 - Frigate" + "maintenance": "维护 - Frigate", + "profiles": "配置模板 - Frigate" }, "menu": { "ui": "界面设置", @@ -87,7 +88,11 @@ "cameraTimestampStyle": "时间戳样式", "cameraMqtt": "摄像头 MQTT", "mediaSync": "媒体同步", - "regionGrid": "区域网格" + "regionGrid": "区域网格", + "uiSettings": "界面设置", + "profiles": "配置模板", + "systemGo2rtcStreams": "go2rtc 视频流", + "maintenance": "维护" }, "dialog": { "unsavedChanges": { @@ -100,7 +105,7 @@ "noCamera": "没有摄像头" }, "general": { - "title": "配置文件设置", + "title": "界面设置", "liveDashboard": { "title": "实时监控面板", "automaticLiveView": { @@ -351,12 +356,26 @@ "zone": "区域", "motion_mask": "画面变动遮罩", "object_mask": "目标遮罩" + }, + "revertOverride": { + "title": "恢复为默认配置" } }, "speed": { "error": { "mustBeGreaterOrEqualTo": "速度阈值必须大于或等于0.1。" } + }, + "id": { + "error": { + "mustNotBeEmpty": "ID 不能为空。", + "alreadyExists": "此摄像头已存在使用该 ID 的遮罩。" + } + }, + "name": { + "error": { + "mustNotBeEmpty": "名称不能为空。" + } } }, "zones": { @@ -486,7 +505,10 @@ "title": "开启", "description": "指示该遮罩在配置文件中是否处于激活并启用的状态。若被禁用,则无法通过 MQTT 启用。禁用的遮罩在运行时会被忽略。" } - } + }, + "profileBase": "(基础)", + "profileOverride": "(覆盖)", + "addDisabledProfile": "先添加到基础配置中,然后在配置模板中进行覆盖" }, "motionDetectionTuner": { "title": "画面变动检测调整", @@ -753,9 +775,9 @@ }, "snapshotConfig": { "title": "快照配置", - "desc": "提交到 Frigate+ 需要同时在配置中启用快照和 clean_copy 快照。", + "desc": "提交到 Frigate+ 需要同时在配置中开启快照功能。", "documentation": "阅读文档", - "cleanCopyWarning": "部分摄像头已启用快照但未启用 clean_copy。您需要在快照配置中启用 clean_copy,才能将这些摄像头的图像提交到 Frigate+。", + "cleanCopyWarning": "部分摄像头未开启快照功能", "table": { "camera": "摄像头", "snapshots": "快照", @@ -1374,6 +1396,14 @@ "confirmButton": "永久删除", "success": "摄像头 {{cameraName}} 删除完成", "error": "删除摄像头 {{cameraName}} 失败" + }, + "profiles": { + "title": "配置模板的摄像头覆盖项", + "selectLabel": "选择配置模板", + "description": "配置在启用某个配置模板时,哪些摄像头应被开启或关闭。设置为“继承”的摄像头会沿用它原本的启用/禁用状态。", + "inherit": "继承", + "enabled": "开启", + "disabled": "关闭" } }, "cameraReview": { @@ -1428,6 +1458,9 @@ "value": { "label": "新值", "reset": "重置" + }, + "profile": { + "label": "配置" } }, "detectionModel": { @@ -1465,7 +1498,8 @@ "genai": "生成式 AI", "face_recognition": "人脸识别", "lpr": "车牌识别", - "birdseye": "鸟瞰图" + "birdseye": "鸟瞰图", + "masksAndZones": "遮罩 / 区域" }, "global": { "title": "全局设置", @@ -1473,7 +1507,8 @@ }, "camera": { "title": "摄像头设置", - "description": "这些设置仅适用于此摄像头,并会覆盖全局设置。" + "description": "这些设置仅适用于此摄像头,并会覆盖全局设置。", + "noCameras": "没有可用的摄像头" }, "advancedSettingsCount": "高级设置 ({{count}})", "advancedCount": "高级选项 ({{count}})", @@ -1495,7 +1530,35 @@ "manual": "手动参数", "inherit": "继承摄像头设置", "selectPreset": "选择预设", - "manualPlaceholder": "输入 FFmpeg 参数" + "manualPlaceholder": "输入 FFmpeg 参数", + "none": "无", + "useGlobalSetting": "继承全局设置", + "presetLabels": { + "preset-rpi-64-h264": "树莓派(H.264)", + "preset-rpi-64-h265": "树莓派(H.265)", + "preset-vaapi": "VAAPI (Intel/AMD GPU)", + "preset-intel-qsv-h264": "Intel QuickSync (H.264)", + "preset-intel-qsv-h265": "Intel QuickSync (H.265)", + "preset-nvidia": "NVIDIA GPU", + "preset-jetson-h264": "NVIDIA Jetson (H.264)", + "preset-jetson-h265": "NVIDIA Jetson (H.265)", + "preset-rkmpp": "瑞芯微 RKMPP", + "preset-http-jpeg-generic": "HTTP JPEG(通用)", + "preset-http-mjpeg-generic": "HTTP MJPEG(通用)", + "preset-http-reolink": "HTTP - Reolink 摄像头", + "preset-rtmp-generic": "RTMP(通用)", + "preset-rtsp-generic": "RTSP(通用)", + "preset-rtsp-restream": "RTSP - 从 go2rtc 转流", + "preset-rtsp-restream-low-latency": "RTSP - 从 go2rtc 转流(低延迟)", + "preset-rtsp-udp": "RTSP - UDP协议", + "preset-rtsp-blue-iris": "RTSP - Blue Iris", + "preset-record-generic": "录制(通用,无音频)", + "preset-record-generic-audio-copy": "录制(通用,不转码音频)", + "preset-record-generic-audio-aac": "录制(通用并将音频转码为 AAC)", + "preset-record-mjpeg": "录制 - MJPEG 流摄像头", + "preset-record-jpeg": "录制 - JPEG 流摄像头", + "preset-record-ubiquiti": "录制 - 优必飞摄像头" + } }, "cameraInputs": { "itemTitle": "视频流 {{index}}" @@ -1512,7 +1575,8 @@ "keyDuplicate": "检测器名称已存在。", "noSchema": "没有可用的检测器架构。", "none": "未配置检测器实例。", - "add": "添加检测器" + "add": "添加检测器", + "addCustomKey": "添加自定义键(Key)" }, "record": { "title": "录制设置" @@ -1574,7 +1638,24 @@ "timestamp_style": { "title": "时间戳设置" }, - "searchPlaceholder": "搜索…" + "searchPlaceholder": "搜索…", + "genaiRoles": { + "options": { + "embeddings": "嵌入(Embedding)", + "vision": "视觉(Vision)", + "tools": "工具(Tools)" + } + }, + "semanticSearchModel": { + "placeholder": "选择模型…", + "builtIn": "内置模型", + "genaiProviders": "生成式 AI 服务" + }, + "reviewLabels": { + "summary": "已选择 {{count}} 个标签", + "empty": "暂无可用标签" + }, + "addCustomLabel": "添加自定义标签…" }, "cameraConfig": { "title": "摄像头配置", @@ -1631,7 +1712,9 @@ "review_thumbnails": "核查缩略图", "previews": "预览", "exports": "导出", - "recordings": "录像" + "recordings": "录像", + "verbose": "详细模式", + "verboseDesc": "将所有孤立文件的完整清单写入硬盘以供核查。" }, "regionGrid": { "title": "区域网格", @@ -1668,5 +1751,140 @@ "unsavedChanges": "您有未保存的更改", "confirmReset": "确认重置", "resetToDefaultDescription": "这将把此部分的所有设置重置为默认值。此操作无法撤销。", - "resetToGlobalDescription": "这将把此部分的设置重置为全局默认值。此操作无法撤销。" + "resetToGlobalDescription": "这将把此部分的设置重置为全局默认值。此操作无法撤销。", + "button": { + "overriddenGlobal": "已覆盖全局通用配置", + "overriddenGlobalTooltip": "当前摄像头配置,将优先覆盖全局通用设置", + "overriddenBaseConfigTooltip": "当前 {{profile}} 配置模板会覆盖本节所有设置", + "overriddenBaseConfig": "已覆盖默认配置" + }, + "profiles": { + "title": "配置模板", + "activeProfile": "激活配置模板", + "noActiveProfile": "无激活的配置模板", + "active": "激活", + "activated": "配置模板 {{profile}} 已激活", + "activateFailed": "配置模板设置失败", + "deactivated": "配置模板已停用", + "noProfiles": "未定义任何配置模板。", + "noOverrides": "无覆盖项", + "cameraCount_other": "{{count}} 个摄像头", + "baseConfig": "基础配置", + "addProfile": "添加配置模板", + "newProfile": "新配置模板", + "profileNamePlaceholder": "例如:布防、外出、夜间模式", + "friendlyNameLabel": "配置模板名称", + "profileIdLabel": "配置模板 ID", + "profileIdDescription": "用于配置和自动化的内部标识符", + "nameInvalid": "仅允许使用小写字母、数字和下划线", + "nameDuplicate": "已存在同名配置模板", + "columnCamera": "摄像头", + "columnOverrides": "配置文件覆盖", + "error": { + "mustBeAtLeastTwoCharacters": "至少需要 2 个字符", + "mustNotContainPeriod": "不得包含英文句号(\".\")", + "alreadyExists": "已存在使用此 ID 的配置文件" + }, + "renameProfile": "重命名配置文件", + "renameSuccess": "已将配置文件重命名为 “{{profile}}”", + "deleteProfile": "删除配置文件", + "deleteProfileConfirm": "确定要为所有摄像头删除配置文件“{{profile}}”吗?该步骤无法撤销。", + "deleteSuccess": "配置文件“{{profile}}”已删除", + "createSuccess": "配置文件“{{profile}}”已创建", + "removeOverride": "移除配置文件覆盖", + "deleteSection": "删除节点覆盖", + "deleteSectionConfirm": "是否要移除摄像机 {{camera}} 上针对配置文件 {{profile}} 的 {{section}} 覆盖设置?", + "deleteSectionSuccess": "已移除 {{profile}} 的 {{section}} 覆盖设置", + "enableSwitch": "开启配置文件", + "enabledDescription": "配置文件功能已启用。请在下方创建新的配置文件,进入摄像头配置页面进行修改并保存,修改即可生效。", + "disabledDescription": "配置文件功能可以让你创建一组带名称的摄像头自定义参数(比如布防、离家、夜间模式),并随时切换启用。" + }, + "timestampPosition": { + "tl": "左上角", + "tr": "右上角", + "bl": "左下角", + "br": "右下角" + }, + "go2rtcStreams": { + "title": "go2rtc 视频流", + "description": "管理用于摄像头转流的 go2rtc 流配置。每个视频流包含一个名称以及一个或多个源地址 URL。", + "addStream": "添加视频流", + "addStreamDesc": "为新的视频流输入一个名称,该名称将用于在摄像头配置中引用该视频流。", + "addUrl": "添加 URL 地址", + "streamName": "视频流名称", + "streamNamePlaceholder": "例如:front_door,此处只能使用英文", + "streamUrlPlaceholder": "例如:rtsp://user:pass@192.168.1.100/stream", + "deleteStream": "删除视频流", + "deleteStreamConfirm": "确定要删除视频流 “{{streamName}}” 吗?引用该视频流的摄像头可能会停止工作。", + "noStreams": "未配置任何 go2rtc 流。请添加一个视频流以开始使用。", + "validation": { + "nameRequired": "视频流名称为必填", + "nameDuplicate": "已存在同名的视频流", + "nameInvalid": "视频流名称只能使用字母、数字、下划线和连字符", + "urlRequired": "至少需要填写一个 URL 地址" + }, + "renameStream": "重命名视频流", + "renameStreamDesc": "为此视频流输入新名称。重命名视频流可能会导致通过名称引用它的摄像头或其他流无法正常工作。", + "newStreamName": "新视频流名称", + "ffmpeg": { + "useFfmpegModule": "使用兼容模式(ffmpeg)", + "video": "视频", + "audio": "音频", + "hardware": "硬件加速", + "videoCopy": "直接复制", + "videoH264": "转码为 H.264", + "videoH265": "转码为 H.265", + "videoExclude": "排除", + "audioCopy": "直接复制", + "audioAac": "转码为 AAC", + "audioOpus": "转码为 Opus", + "audioPcmu": "转码为 PCM μ-law", + "audioPcma": "转码为 PCM A-law", + "audioPcm": "转码为 PCM", + "audioMp3": "转码为 MP3", + "audioExclude": "排除", + "hardwareNone": "无硬件加速", + "hardwareAuto": "自动选择硬件加速" + } + }, + "onvif": { + "profileAuto": "自动", + "profileLoading": "正在加载配置文件…" + }, + "configMessages": { + "review": { + "recordDisabled": "录制已禁用,不会生成核查记录项。", + "detectDisabled": "目标检测已禁用。核查记录需要依靠检测到的目标来对警报和检测事件进行分类。", + "allNonAlertDetections": "所有非警报类活动都将被记录为检测事件。" + }, + "lpr": { + "vehicleNotTracked": "车牌识别需要先开启对 “汽车” 或 “摩托车” 的目标追踪。", + "globalDisabled": "车牌识别未在全局开启。请在全局设置中开启该功能,才能在摄像头下单独配置车牌识别是否开启。" + }, + "audio": { + "noAudioRole": "暂无任何流已开启音频(audio)功能(role)。必须在视频流上启用音频功能,音频检测才能正常工作。" + }, + "audioTranscription": { + "audioDetectionDisabled": "该摄像头未开启音频检测功能。音频转录需要先开启音频检测。" + }, + "detect": { + "fpsGreaterThanFive": "不建议设置检测帧率高于 5。" + }, + "faceRecognition": { + "globalDisabled": "人脸识别未在全局开启。请在全局设置中开启该功能,才能在摄像头下单独配置人脸识别是否开启。", + "personNotTracked": "人脸识别需要检测到 “人”(person) 后才能工作。请确保 “person” 已添加到目标追踪列表中。" + }, + "record": { + "noRecordRole": "暂无任何视频流已配置录制功能,录制功能将无法正常工作。" + }, + "birdseye": { + "objectsModeDetectDisabled": "鸟瞰图已设置为 “目标” 模式,但此摄像头未开启目标检测。该摄像头将不会显示在鸟瞰画面中。" + }, + "snapshots": { + "detectDisabled": "目标检测已禁用。快照是根据追踪到的目标生成的,因此将不会创建快照。" + }, + "detectors": { + "mixedTypes": "所有检测器必须为同一类型。若要更换为其他类型,请先移除现有的检测器。" + } + } } diff --git a/web/public/locales/zh-CN/views/system.json b/web/public/locales/zh-CN/views/system.json index 805560be1..6e406674a 100644 --- a/web/public/locales/zh-CN/views/system.json +++ b/web/public/locales/zh-CN/views/system.json @@ -59,7 +59,9 @@ "count": "{{count}} 条消息", "expanded": { "payload": "Payload" - } + }, + "count_one": "{{count}} 条消息", + "count_other": "{{count}} 条消息" } }, "general": { @@ -110,7 +112,8 @@ "description": "这是 Intel 的 GPU 状态报告工具(intel_gpu_top)的已知问题:该工具会失效并反复返回 GPU 使用率为 0%,即使在硬件加速和目标检测已在 (i)GPU 上正常运行的情况下也是如此,这并不是 Frigate 的 bug。你可以通过重启主机来临时修复该问题,并确认 GPU 正常工作。该问题并不会影响性能。" }, "gpuTemperature": "GPU 温度", - "npuTemperature": "NPU 温度" + "npuTemperature": "NPU 温度", + "gpuCompute": "GPU 计算 / 编码" }, "otherProcesses": { "title": "其他进程", @@ -189,7 +192,8 @@ "cameraDetectionsPerSecond": "{{camName}} 每秒检测数", "cameraSkippedDetectionsPerSecond": "{{camName}} 每秒跳过检测数", "cameraFfmpeg": "{{camName}} FFmpeg", - "cameraFramesPerSecond": "{{camName}} 每秒帧数" + "cameraFramesPerSecond": "{{camName}} 每秒帧数", + "cameraGpu": "{{camName}} GPU" }, "toast": { "success": { diff --git a/web/public/locales/zh-Hant/config/cameras.json b/web/public/locales/zh-Hant/config/cameras.json index 0967ef424..8602044aa 100644 --- a/web/public/locales/zh-Hant/config/cameras.json +++ b/web/public/locales/zh-Hant/config/cameras.json @@ -1 +1,35 @@ -{} +{ + "name": { + "description": "必須填寫攝影機名稱", + "label": "攝影機名稱" + }, + "label": "攝影機設定", + "friendly_name": { + "label": "顯示名稱", + "description": "攝影機在 Frigate 介面顯示的名稱" + }, + "enabled": { + "label": "已啟用", + "description": "已啟用" + }, + "audio": { + "label": "音訊事件", + "description": "此攝影機的音訊事件偵測設定。", + "enabled": { + "label": "啟用音訊偵測", + "description": "啟用或停用此攝影機的音訊事件偵測。" + }, + "max_not_heard": { + "label": "結束逾時", + "description": "在未偵測到已設定音訊類型的情況下,經過多少秒後視為音訊事件結束。" + }, + "min_volume": { + "label": "最小音量", + "description": "執行音訊偵測所需的最小 RMS 音量門檻;數值越低,敏感度越高(例如:200 高、500 中、1000 低)。" + }, + "listen": { + "label": "監聽的音訊類型", + "description": "要偵測的音訊事件類型清單(例如:狗吠、火警、尖叫、說話、大叫)。" + } + } +} diff --git a/web/public/locales/zh-Hant/config/global.json b/web/public/locales/zh-Hant/config/global.json index 0967ef424..0f254ab83 100644 --- a/web/public/locales/zh-Hant/config/global.json +++ b/web/public/locales/zh-Hant/config/global.json @@ -1 +1,20 @@ -{} +{ + "audio": { + "label": "音訊事件", + "enabled": { + "label": "啟用音訊偵測" + }, + "max_not_heard": { + "label": "結束逾時", + "description": "在未偵測到已設定音訊類型的情況下,經過多少秒後視為音訊事件結束。" + }, + "min_volume": { + "label": "最小音量", + "description": "執行音訊偵測所需的最小 RMS 音量門檻;數值越低,敏感度越高(例如:200 高、500 中、1000 低)。" + }, + "listen": { + "label": "監聽的音訊類型", + "description": "要偵測的音訊事件類型清單(例如:狗吠、火警、尖叫、說話、大叫)。" + } + } +} diff --git a/web/public/locales/zh-Hant/views/classificationModel.json b/web/public/locales/zh-Hant/views/classificationModel.json index 06aabdf5c..796495f69 100644 --- a/web/public/locales/zh-Hant/views/classificationModel.json +++ b/web/public/locales/zh-Hant/views/classificationModel.json @@ -1,9 +1,9 @@ { "toast": { "success": { - "deletedImage": "已刪除的圖片", + "deletedImage_other": "已刪除的圖片", "deletedModel_other": "已成功刪除 {{count}} 個模型", - "deletedCategory": "已刪除分類", + "deletedCategory_other": "已刪除分類", "categorizedImage": "成功分類圖片", "trainedModel": "訓練模型成功。", "trainingModel": "已開始模型訓練。", diff --git a/web/src/components/Statusbar.tsx b/web/src/components/Statusbar.tsx index d1035dd60..18a0d9ee1 100644 --- a/web/src/components/Statusbar.tsx +++ b/web/src/components/Statusbar.tsx @@ -116,8 +116,7 @@ export default function Statusbar() { case "amd-vaapi": gpuTitle = "AMD GPU"; break; - case "intel-vaapi": - case "intel-qsv": + case "intel-gpu": gpuTitle = "Intel GPU"; break; case "rockchip": diff --git a/web/src/components/audio/AudioLevelGraph.tsx b/web/src/components/audio/AudioLevelGraph.tsx index 4f0e75722..74c3ce0e6 100644 --- a/web/src/components/audio/AudioLevelGraph.tsx +++ b/web/src/components/audio/AudioLevelGraph.tsx @@ -8,6 +8,7 @@ import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { useTranslation } from "react-i18next"; +import { useTimeFormat } from "@/hooks/use-date-utils"; const GRAPH_COLORS = ["#3b82f6", "#ef4444"]; // RMS, dBFS @@ -72,7 +73,7 @@ export function AudioLevelGraph({ cameraName }: AudioLevelGraphProps) { return [last.rms, last.dBFS]; }, [audioData]); - const timeFormat = config?.ui.time_format === "24hour" ? "24hour" : "12hour"; + const timeFormat = useTimeFormat(config); const formatString = useMemo( () => t(`time.formattedTimestampHourMinuteSecond.${timeFormat}`, { diff --git a/web/src/components/button/DownloadVideoButton.tsx b/web/src/components/button/DownloadVideoButton.tsx index 607458af4..93a8e1d8a 100644 --- a/web/src/components/button/DownloadVideoButton.tsx +++ b/web/src/components/button/DownloadVideoButton.tsx @@ -7,6 +7,7 @@ import { useTranslation } from "react-i18next"; import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; import { useDateLocale } from "@/hooks/use-date-locale"; +import { useTimeFormat } from "@/hooks/use-date-utils"; import { useMemo } from "react"; type DownloadVideoButtonProps = { @@ -26,7 +27,7 @@ export function DownloadVideoButton({ const { data: config } = useSWR("config"); const locale = useDateLocale(); - const timeFormat = config?.ui.time_format === "24hour" ? "24hour" : "12hour"; + const timeFormat = useTimeFormat(config); const format = useMemo(() => { return t(`time.formattedTimestampFilename.${timeFormat}`, { ns: "common" }); }, [t, timeFormat]); diff --git a/web/src/components/card/ReviewCard.tsx b/web/src/components/card/ReviewCard.tsx index 6b8b6bb52..bf256ab5e 100644 --- a/web/src/components/card/ReviewCard.tsx +++ b/web/src/components/card/ReviewCard.tsx @@ -1,5 +1,5 @@ import { baseUrl } from "@/api/baseUrl"; -import { useFormattedTimestamp } from "@/hooks/use-date-utils"; +import { useFormattedTimestamp, use24HourTime } from "@/hooks/use-date-utils"; import { FrigateConfig } from "@/types/frigateConfig"; import { REVIEW_PADDING, ReviewSegment } from "@/types/review"; import { getIconForLabel } from "@/utils/iconUtil"; @@ -55,9 +55,10 @@ export default function ReviewCard({ const { t } = useTranslation(["components/dialog"]); const { data: config } = useSWR("config"); const [imgRef, imgLoaded, onImgLoad] = useImageLoaded(); + const is24Hour = use24HourTime(config); const formattedDate = useFormattedTimestamp( event.start_time, - config?.ui.time_format == "24hour" + is24Hour ? t("time.formattedTimestampHourMinute.24hour", { ns: "common" }) : t("time.formattedTimestampHourMinute.12hour", { ns: "common" }), config?.ui.timezone, diff --git a/web/src/components/card/SearchThumbnailFooter.tsx b/web/src/components/card/SearchThumbnailFooter.tsx index 808ad2831..1087a53fb 100644 --- a/web/src/components/card/SearchThumbnailFooter.tsx +++ b/web/src/components/card/SearchThumbnailFooter.tsx @@ -1,7 +1,7 @@ import TimeAgo from "../dynamic/TimeAgo"; import useSWR from "swr"; import { FrigateConfig } from "@/types/frigateConfig"; -import { useFormattedTimestamp } from "@/hooks/use-date-utils"; +import { useFormattedTimestamp, use24HourTime } from "@/hooks/use-date-utils"; import { SearchResult } from "@/types/search"; import ActivityIndicator from "../indicators/activity-indicator"; import SearchResultActions from "../menu/SearchResultActions"; @@ -29,9 +29,10 @@ export default function SearchThumbnailFooter({ const { data: config } = useSWR("config"); // date + const is24Hour = use24HourTime(config); const formattedDate = useFormattedTimestamp( searchResult.start_time, - config?.ui.time_format == "24hour" + is24Hour ? t("time.formattedTimestampMonthDayHourMinute.24hour", { ns: "common" }) : t("time.formattedTimestampMonthDayHourMinute.12hour", { ns: "common" }), config?.ui.timezone, diff --git a/web/src/components/chat/ChatMessage.tsx b/web/src/components/chat/ChatMessage.tsx index a644a9d7d..b21fae435 100644 --- a/web/src/components/chat/ChatMessage.tsx +++ b/web/src/components/chat/ChatMessage.tsx @@ -1,4 +1,4 @@ -import { useState, useEffect, useRef } from "react"; +import { useState, useEffect, useRef, useCallback } from "react"; import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; import { useTranslation } from "react-i18next"; @@ -6,6 +6,7 @@ import copy from "copy-to-clipboard"; import { toast } from "sonner"; import { FaCopy, FaPencilAlt } from "react-icons/fa"; import { FaArrowUpLong } from "react-icons/fa6"; +import { LuCheck } from "react-icons/lu"; import { Button } from "@/components/ui/button"; import { Textarea } from "@/components/ui/textarea"; import { @@ -50,13 +51,17 @@ export function MessageBubble({ } }, [isEditing]); - const handleCopy = () => { + const [copied, setCopied] = useState(false); + + const handleCopy = useCallback(() => { const text = content?.trim() || ""; if (!text) return; if (copy(text)) { + setCopied(true); toast.success(t("button.copiedToClipboard", { ns: "common" })); + setTimeout(() => setCopied(false), 2000); } - }; + }, [content, t]); const handleEditClick = () => { setDraftContent(content); @@ -93,7 +98,7 @@ export function MessageBubble({ value={draftContent} onChange={(e) => setDraftContent(e.target.value)} onKeyDown={handleEditKeyDown} - className="min-h-[80px] w-full resize-y rounded-lg bg-primary px-3 py-2 text-primary-foreground placeholder:text-primary-foreground/60" + className="min-h-[80px] w-full resize-y rounded-2xl bg-primary px-4 py-3 text-primary-foreground placeholder:text-primary-foreground/60" placeholder={t("placeholder")} rows={3} /> @@ -124,44 +129,49 @@ export function MessageBubble({ return (
    {isUser ? ( content ) : ( - ( - - ), - th: ({ node: _n, ...props }) => ( -
    - ), - td: ({ node: _n, ...props }) => ( - - ), - }} - > - {content} - + <> + ( + + ), + th: ({ node: _n, ...props }) => ( +
    + ), + td: ({ node: _n, ...props }) => ( + + ), + }} + > + {content} + + {!isComplete && ( + + )} + )}
    @@ -194,7 +204,11 @@ export function MessageBubble({ disabled={!content?.trim()} aria-label={t("button.copy", { ns: "common" })} > - + {copied ? ( + + ) : ( + + )} diff --git a/web/src/components/chat/ChatStartingState.tsx b/web/src/components/chat/ChatStartingState.tsx index 2d0adaa2f..e6b611bf9 100644 --- a/web/src/components/chat/ChatStartingState.tsx +++ b/web/src/components/chat/ChatStartingState.tsx @@ -22,6 +22,14 @@ export function ChatStartingState({ onSendMessage }: ChatStartingStateProps) { label: t("starting_requests.show_camera_status"), prompt: t("starting_requests_prompts.show_camera_status"), }, + { + label: t("starting_requests.recap"), + prompt: t("starting_requests_prompts.recap"), + }, + { + label: t("starting_requests.watch_camera"), + prompt: t("starting_requests_prompts.watch_camera"), + }, ]; const handleRequestClick = (prompt: string) => { @@ -67,7 +75,7 @@ export function ChatStartingState({ onSendMessage }: ChatStartingStateProps) {
    -
    +
    {open ? ( - + ) : ( - + )} {isLeft ? t("call") : t("result")} {normalizedName} diff --git a/web/src/components/chat/ToolCallsGroup.tsx b/web/src/components/chat/ToolCallsGroup.tsx new file mode 100644 index 000000000..7cb20a1ad --- /dev/null +++ b/web/src/components/chat/ToolCallsGroup.tsx @@ -0,0 +1,103 @@ +import { useMemo, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; +import { LuChevronsUpDown } from "react-icons/lu"; +import type { ToolCall } from "@/types/chat"; + +type ToolCallsGroupProps = { + toolCalls: ToolCall[]; +}; + +function normalizeName(name: string): string { + return name + .replace(/_/g, " ") + .split(" ") + .map((word) => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase()) + .join(" "); +} + +export function ToolCallsGroup({ toolCalls }: ToolCallsGroupProps) { + const grouped = useMemo(() => { + const map = new Map(); + for (const tc of toolCalls) { + const existing = map.get(tc.name); + if (existing) { + existing.push(tc); + } else { + map.set(tc.name, [tc]); + } + } + return map; + }, [toolCalls]); + + if (toolCalls.length === 0) return null; + + return ( +
    + {[...grouped.entries()].map(([name, calls]) => ( + + ))} +
    + ); +} + +type ToolCallRowProps = { + name: string; + calls: ToolCall[]; +}; + +function ToolCallRow({ name, calls }: ToolCallRowProps) { + const { t } = useTranslation(["views/chat"]); + const [open, setOpen] = useState(false); + const displayName = normalizeName(name); + const label = + calls.length > 1 ? `${displayName} (\u00d7${calls.length})` : displayName; + + return ( + + + {label} + + + +
    + {calls.map((tc, idx) => ( +
    1 + ? "space-y-1 border-l-2 border-border pl-3" + : "space-y-1" + } + > + {tc.arguments && Object.keys(tc.arguments).length > 0 && ( +
    +
    + {t("arguments")} +
    +
    +                    {JSON.stringify(tc.arguments, null, 2)}
    +                  
    +
    + )} + {tc.response && tc.response !== "" && ( +
    +
    + {t("response")} +
    +
    +                    {tc.response}
    +                  
    +
    + )} +
    + ))} +
    +
    +
    + ); +} diff --git a/web/src/components/config-form/ConfigFieldMessage.tsx b/web/src/components/config-form/ConfigFieldMessage.tsx new file mode 100644 index 000000000..5c0a5c505 --- /dev/null +++ b/web/src/components/config-form/ConfigFieldMessage.tsx @@ -0,0 +1,48 @@ +import { useTranslation } from "react-i18next"; +import { Alert, AlertDescription } from "@/components/ui/alert"; +import { LuInfo, LuTriangleAlert, LuCircleAlert } from "react-icons/lu"; +import type { MessageSeverity } from "./section-configs/types"; + +const severityVariantMap: Record< + MessageSeverity, + "info" | "warning" | "destructive" +> = { + info: "info", + warning: "warning", + error: "destructive", +}; + +function SeverityIcon({ severity }: { severity: string }) { + switch (severity) { + case "info": + return ; + case "warning": + return ; + case "error": + return ; + default: + return ; + } +} + +type ConfigFieldMessageProps = { + messageKey: string; + severity: string; +}; + +export function ConfigFieldMessage({ + messageKey, + severity, +}: ConfigFieldMessageProps) { + const { t } = useTranslation("views/settings"); + + return ( + + + {t(messageKey)} + + ); +} diff --git a/web/src/components/config-form/ConfigMessageBanner.tsx b/web/src/components/config-form/ConfigMessageBanner.tsx new file mode 100644 index 000000000..f5b828000 --- /dev/null +++ b/web/src/components/config-form/ConfigMessageBanner.tsx @@ -0,0 +1,52 @@ +import { useTranslation } from "react-i18next"; +import { Alert, AlertDescription } from "@/components/ui/alert"; +import { LuInfo, LuTriangleAlert, LuCircleAlert } from "react-icons/lu"; +import type { + ConditionalMessage, + MessageSeverity, +} from "./section-configs/types"; + +const severityVariantMap: Record< + MessageSeverity, + "info" | "warning" | "destructive" +> = { + info: "info", + warning: "warning", + error: "destructive", +}; + +function SeverityIcon({ severity }: { severity: MessageSeverity }) { + switch (severity) { + case "info": + return ; + case "warning": + return ; + case "error": + return ; + } +} + +type ConfigMessageBannerProps = { + messages: ConditionalMessage[]; +}; + +export function ConfigMessageBanner({ messages }: ConfigMessageBannerProps) { + const { t } = useTranslation("views/settings"); + + if (messages.length === 0) return null; + + return ( +
    + {messages.map((msg) => ( + + + {t(msg.messageKey)} + + ))} +
    + ); +} diff --git a/web/src/components/config-form/section-configs/audio.ts b/web/src/components/config-form/section-configs/audio.ts index 740d76f78..31f19e93d 100644 --- a/web/src/components/config-form/section-configs/audio.ts +++ b/web/src/components/config-form/section-configs/audio.ts @@ -3,6 +3,21 @@ import type { SectionConfigOverrides } from "./types"; const audio: SectionConfigOverrides = { base: { sectionDocs: "/configuration/audio_detectors", + messages: [ + { + key: "no-audio-role", + messageKey: "configMessages.audio.noAudioRole", + severity: "warning", + condition: (ctx) => { + if (ctx.level === "camera" && ctx.fullCameraConfig) { + return !ctx.fullCameraConfig.ffmpeg?.inputs?.some((input) => + input.roles?.includes("audio"), + ); + } + return false; + }, + }, + ], restartRequired: [], fieldOrder: [ "enabled", @@ -19,6 +34,16 @@ const audio: SectionConfigOverrides = { hiddenFields: ["enabled_in_config"], advancedFields: ["min_volume", "max_not_heard", "num_threads"], uiSchema: { + filters: { + "ui:options": { + expandable: false, + }, + }, + "filters.*": { + "ui:options": { + additionalPropertyKeyReadonly: true, + }, + }, listen: { "ui:widget": "audioLabels", }, diff --git a/web/src/components/config-form/section-configs/audio_transcription.ts b/web/src/components/config-form/section-configs/audio_transcription.ts index 169a77954..8e8e70d77 100644 --- a/web/src/components/config-form/section-configs/audio_transcription.ts +++ b/web/src/components/config-form/section-configs/audio_transcription.ts @@ -3,6 +3,19 @@ import type { SectionConfigOverrides } from "./types"; const audioTranscription: SectionConfigOverrides = { base: { sectionDocs: "/configuration/audio_detectors#audio-transcription", + messages: [ + { + key: "audio-detection-disabled", + messageKey: "configMessages.audioTranscription.audioDetectionDisabled", + severity: "warning", + condition: (ctx) => { + if (ctx.level === "camera" && ctx.fullCameraConfig) { + return ctx.fullCameraConfig.audio.enabled === false; + } + return false; + }, + }, + ], restartRequired: [], fieldOrder: ["enabled", "language", "device", "model_size"], hiddenFields: ["enabled_in_config", "live_enabled"], diff --git a/web/src/components/config-form/section-configs/birdseye.ts b/web/src/components/config-form/section-configs/birdseye.ts index 63fae75d9..d621c9203 100644 --- a/web/src/components/config-form/section-configs/birdseye.ts +++ b/web/src/components/config-form/section-configs/birdseye.ts @@ -3,6 +3,20 @@ import type { SectionConfigOverrides } from "./types"; const birdseye: SectionConfigOverrides = { base: { sectionDocs: "/configuration/birdseye", + messages: [ + { + key: "objects-mode-detect-disabled", + messageKey: "configMessages.birdseye.objectsModeDetectDisabled", + severity: "info", + condition: (ctx) => { + if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false; + return ( + ctx.formData?.mode === "objects" && + ctx.fullCameraConfig.detect?.enabled === false + ); + }, + }, + ], restartRequired: [], fieldOrder: ["enabled", "mode", "order"], hiddenFields: [], diff --git a/web/src/components/config-form/section-configs/detect.ts b/web/src/components/config-form/section-configs/detect.ts index 778620f1c..ef14d13fd 100644 --- a/web/src/components/config-form/section-configs/detect.ts +++ b/web/src/components/config-form/section-configs/detect.ts @@ -3,6 +3,21 @@ import type { SectionConfigOverrides } from "./types"; const detect: SectionConfigOverrides = { base: { sectionDocs: "/configuration/camera_specific", + fieldMessages: [ + { + key: "fps-greater-than-five", + field: "fps", + messageKey: "configMessages.detect.fpsGreaterThanFive", + severity: "info", + position: "after", + condition: (ctx) => { + if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false; + const detectFps = ctx.formData?.fps as number | undefined; + const streamFps = ctx.fullCameraConfig.detect?.fps; + return detectFps != null && streamFps != null && detectFps > 5; + }, + }, + ], fieldOrder: [ "enabled", "width", diff --git a/web/src/components/config-form/section-configs/detectors.ts b/web/src/components/config-form/section-configs/detectors.ts index 3ca2dd81d..5cc5a929f 100644 --- a/web/src/components/config-form/section-configs/detectors.ts +++ b/web/src/components/config-form/section-configs/detectors.ts @@ -16,7 +16,7 @@ const detectors: SectionConfigOverrides = { uiSchema: { "ui:field": "DetectorHardwareField", "ui:options": { - multiInstanceTypes: ["cpu", "onnx", "openvino"], + multiInstanceTypes: ["cpu", "onnx", "openvino", "edgetpu"], typeOrder: ["onnx", "openvino", "edgetpu"], hiddenByType: {}, hiddenFields: detectorHiddenFields, diff --git a/web/src/components/config-form/section-configs/face_recognition.ts b/web/src/components/config-form/section-configs/face_recognition.ts index ef9e43506..822f6ffe0 100644 --- a/web/src/components/config-form/section-configs/face_recognition.ts +++ b/web/src/components/config-form/section-configs/face_recognition.ts @@ -3,6 +3,26 @@ import type { SectionConfigOverrides } from "./types"; const faceRecognition: SectionConfigOverrides = { base: { sectionDocs: "/configuration/face_recognition", + messages: [ + { + key: "global-disabled", + messageKey: "configMessages.faceRecognition.globalDisabled", + severity: "warning", + condition: (ctx) => { + if (ctx.level !== "camera") return false; + return ctx.fullConfig.face_recognition?.enabled === false; + }, + }, + { + key: "person-not-tracked", + messageKey: "configMessages.faceRecognition.personNotTracked", + severity: "info", + condition: (ctx) => { + if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false; + return !ctx.fullCameraConfig.objects?.track?.includes("person"); + }, + }, + ], restartRequired: [], fieldOrder: ["enabled", "min_area"], hiddenFields: [], diff --git a/web/src/components/config-form/section-configs/genai.ts b/web/src/components/config-form/section-configs/genai.ts index e37478f11..a5f1cd8a3 100644 --- a/web/src/components/config-form/section-configs/genai.ts +++ b/web/src/components/config-form/section-configs/genai.ts @@ -3,16 +3,9 @@ import type { SectionConfigOverrides } from "./types"; const genai: SectionConfigOverrides = { base: { sectionDocs: "/configuration/genai/config", - restartRequired: [ - "*.provider", - "*.api_key", - "*.base_url", - "*.model", - "*.provider_options", - "*.runtime_options", - ], advancedFields: ["*.base_url", "*.provider_options", "*.runtime_options"], hiddenFields: ["genai.enabled_in_config"], + restartRequired: [], uiSchema: { "ui:options": { disableNestedCard: true }, "*": { @@ -37,20 +30,17 @@ const genai: SectionConfigOverrides = { "ui:options": { size: "lg" }, }, "*.model": { + "ui:widget": "genaiModel", "ui:options": { size: "xs" }, }, "*.provider": { "ui:options": { size: "xs" }, }, "*.provider_options": { - additionalProperties: { - "ui:options": { size: "lg" }, - }, + "ui:field": "DictAsYamlField", }, "*.runtime_options": { - additionalProperties: { - "ui:options": { size: "lg" }, - }, + "ui:field": "DictAsYamlField", }, }, }, diff --git a/web/src/components/config-form/section-configs/lpr.ts b/web/src/components/config-form/section-configs/lpr.ts index 514dba9be..4997d766f 100644 --- a/web/src/components/config-form/section-configs/lpr.ts +++ b/web/src/components/config-form/section-configs/lpr.ts @@ -3,6 +3,28 @@ import type { SectionConfigOverrides } from "./types"; const lpr: SectionConfigOverrides = { base: { sectionDocs: "/configuration/license_plate_recognition", + messages: [ + { + key: "global-disabled", + messageKey: "configMessages.lpr.globalDisabled", + severity: "warning", + condition: (ctx) => { + if (ctx.level !== "camera") return false; + return ctx.fullConfig.lpr?.enabled === false; + }, + }, + { + key: "vehicle-not-tracked", + messageKey: "configMessages.lpr.vehicleNotTracked", + severity: "info", + condition: (ctx) => { + if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false; + if (ctx.fullCameraConfig.type === "lpr") return false; + const tracked = ctx.fullCameraConfig.objects?.track ?? []; + return !tracked.some((o) => ["car", "motorcycle"].includes(o)); + }, + }, + ], fieldDocs: { enhancement: "/configuration/license_plate_recognition#enhancement", }, diff --git a/web/src/components/config-form/section-configs/objects.ts b/web/src/components/config-form/section-configs/objects.ts index bf5f6c350..e30ddf9d9 100644 --- a/web/src/components/config-form/section-configs/objects.ts +++ b/web/src/components/config-form/section-configs/objects.ts @@ -29,6 +29,11 @@ const objects: SectionConfigOverrides = { ], advancedFields: ["genai"], uiSchema: { + filters: { + "ui:options": { + expandable: false, + }, + }, "filters.*.min_area": { "ui:options": { suppressMultiSchema: true, diff --git a/web/src/components/config-form/section-configs/onvif.ts b/web/src/components/config-form/section-configs/onvif.ts index b8be693d6..c08cd7a58 100644 --- a/web/src/components/config-form/section-configs/onvif.ts +++ b/web/src/components/config-form/section-configs/onvif.ts @@ -3,20 +3,12 @@ import type { SectionConfigOverrides } from "./types"; const onvif: SectionConfigOverrides = { base: { sectionDocs: "/configuration/cameras#setting-up-camera-ptz-controls", - restartRequired: [ - "host", - "port", - "user", - "password", - "tls_insecure", - "ignore_time_mismatch", - "autotracking.calibrate_on_startup", - ], fieldOrder: [ "host", "port", "user", "password", + "profile", "tls_insecure", "ignore_time_mismatch", "autotracking", @@ -27,10 +19,14 @@ const onvif: SectionConfigOverrides = { ], advancedFields: ["tls_insecure", "ignore_time_mismatch"], overrideFields: [], + restartRequired: ["autotracking.calibrate_on_startup"], uiSchema: { host: { "ui:options": { size: "sm" }, }, + profile: { + "ui:widget": "onvifProfile", + }, autotracking: { required_zones: { "ui:widget": "zoneNames", diff --git a/web/src/components/config-form/section-configs/record.ts b/web/src/components/config-form/section-configs/record.ts index 9cfc92127..35f3b1ef7 100644 --- a/web/src/components/config-form/section-configs/record.ts +++ b/web/src/components/config-form/section-configs/record.ts @@ -3,6 +3,19 @@ import type { SectionConfigOverrides } from "./types"; const record: SectionConfigOverrides = { base: { sectionDocs: "/configuration/record", + messages: [ + { + key: "no-record-role", + messageKey: "configMessages.record.noRecordRole", + severity: "warning", + condition: (ctx) => { + if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false; + return !ctx.fullCameraConfig.ffmpeg?.inputs?.some((i) => + i.roles?.includes("record"), + ); + }, + }, + ], restartRequired: [], fieldOrder: [ "enabled", @@ -23,7 +36,7 @@ const record: SectionConfigOverrides = { uiSchema: { export: { hwaccel_args: { - "ui:options": { size: "lg" }, + "ui:options": { suppressMultiSchema: true, size: "lg" }, }, }, }, diff --git a/web/src/components/config-form/section-configs/review.ts b/web/src/components/config-form/section-configs/review.ts index 7d7a84756..ce0d7b911 100644 --- a/web/src/components/config-form/section-configs/review.ts +++ b/web/src/components/config-form/section-configs/review.ts @@ -3,14 +3,55 @@ import type { SectionConfigOverrides } from "./types"; const review: SectionConfigOverrides = { base: { sectionDocs: "/configuration/review", + messages: [ + { + key: "record-disabled", + messageKey: "configMessages.review.recordDisabled", + severity: "warning", + condition: (ctx) => { + if (ctx.level === "camera" && ctx.fullCameraConfig) { + return ctx.fullCameraConfig.record.enabled === false; + } + return ctx.fullConfig.record?.enabled === false; + }, + }, + { + key: "detect-disabled", + messageKey: "configMessages.review.detectDisabled", + severity: "info", + condition: (ctx) => { + if (ctx.level === "camera" && ctx.fullCameraConfig) { + return ctx.fullCameraConfig.detect?.enabled === false; + } + return false; + }, + }, + ], + fieldMessages: [ + { + key: "detections-all-non-alert", + field: "detections.labels", + messageKey: "configMessages.review.allNonAlertDetections", + severity: "info", + position: "after", + condition: (ctx) => { + const labels = ( + ctx.formData?.detections as Record | undefined + )?.labels; + return !Array.isArray(labels) || labels.length === 0; + }, + }, + ], + fieldDocs: { + "alerts.labels": "/configuration/review/#alerts-and-detections", + "detections.labels": "/configuration/review/#alerts-and-detections", + }, restartRequired: [], - fieldOrder: ["alerts", "detections", "genai"], + fieldOrder: ["alerts", "detections", "genai", "genai.enabled"], fieldGroups: {}, hiddenFields: [ "enabled_in_config", - "alerts.labels", "alerts.enabled_in_config", - "detections.labels", "detections.enabled_in_config", "genai.enabled_in_config", ], @@ -18,20 +59,33 @@ const review: SectionConfigOverrides = { uiSchema: { alerts: { "ui:before": { render: "CameraReviewStatusToggles" }, + labels: { + "ui:widget": "reviewLabels", + "ui:options": { + suppressMultiSchema: true, + }, + }, required_zones: { "ui:widget": "hidden", }, }, detections: { + labels: { + "ui:widget": "reviewLabels", + "ui:options": { + suppressMultiSchema: true, + }, + }, required_zones: { "ui:widget": "hidden", }, }, genai: { additional_concerns: { - "ui:widget": "textarea", + "ui:widget": "ArrayAsTextWidget", "ui:options": { size: "full", + multiline: true, }, }, activity_context_prompt: { diff --git a/web/src/components/config-form/section-configs/snapshots.ts b/web/src/components/config-form/section-configs/snapshots.ts index 126ecd496..7d08cc728 100644 --- a/web/src/components/config-form/section-configs/snapshots.ts +++ b/web/src/components/config-form/section-configs/snapshots.ts @@ -3,6 +3,17 @@ import type { SectionConfigOverrides } from "./types"; const snapshots: SectionConfigOverrides = { base: { sectionDocs: "/configuration/snapshots", + messages: [ + { + key: "detect-disabled", + messageKey: "configMessages.snapshots.detectDisabled", + severity: "info", + condition: (ctx) => { + if (ctx.level !== "camera" || !ctx.fullCameraConfig) return false; + return ctx.fullCameraConfig.detect?.enabled === false; + }, + }, + ], restartRequired: [], fieldOrder: [ "enabled", diff --git a/web/src/components/config-form/section-configs/timestamp_style.ts b/web/src/components/config-form/section-configs/timestamp_style.ts index 2f51b2416..e43373c26 100644 --- a/web/src/components/config-form/section-configs/timestamp_style.ts +++ b/web/src/components/config-form/section-configs/timestamp_style.ts @@ -4,12 +4,13 @@ const timestampStyle: SectionConfigOverrides = { base: { sectionDocs: "/configuration/reference", restartRequired: [], - fieldOrder: ["position", "format", "color", "thickness"], + fieldOrder: ["position", "format", "thickness", "color"], hiddenFields: ["effect", "enabled_in_config"], advancedFields: [], uiSchema: { position: { "ui:size": "xs", + "ui:options": { enumI18nPrefix: "timestampPosition" }, }, format: { "ui:size": "xs", @@ -17,7 +18,7 @@ const timestampStyle: SectionConfigOverrides = { }, }, global: { - restartRequired: ["position", "format", "color", "thickness", "effect"], + restartRequired: [], }, camera: { restartRequired: [], diff --git a/web/src/components/config-form/section-configs/types.ts b/web/src/components/config-form/section-configs/types.ts index e2b308e08..9efeb2b32 100644 --- a/web/src/components/config-form/section-configs/types.ts +++ b/web/src/components/config-form/section-configs/types.ts @@ -1,5 +1,39 @@ +import type { FrigateConfig, CameraConfig } from "@/types/frigateConfig"; +import type { ConfigSectionData } from "@/types/configForm"; import type { SectionConfig } from "../sections/BaseSection"; +/** Context provided to message condition functions */ +export type MessageConditionContext = { + fullConfig: FrigateConfig; + fullCameraConfig?: CameraConfig; + level: "global" | "camera"; + cameraName?: string; + formData: ConfigSectionData; +}; + +/** Severity levels for conditional messages */ +export type MessageSeverity = "info" | "warning" | "error"; + +/** A conditional message definition */ +export type ConditionalMessage = { + /** Unique key for React list rendering and deduplication */ + key: string; + /** Translation key resolved via t() in the views/settings namespace */ + messageKey: string; + /** Severity level controlling visual styling */ + severity: MessageSeverity; + /** Function returning true when the message should be shown */ + condition: (ctx: MessageConditionContext) => boolean; +}; + +/** Field-level conditional message, adds field targeting */ +export type FieldConditionalMessage = ConditionalMessage & { + /** Dot-separated field path (e.g., "enabled", "alerts.labels") */ + field: string; + /** Whether to render before or after the field (default: "before") */ + position?: "before" | "after"; +}; + export type SectionConfigOverrides = { base?: SectionConfig; global?: Partial; diff --git a/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx b/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx index 0f186e105..b97c90448 100644 --- a/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx +++ b/web/src/components/config-form/sectionExtras/NotificationsSettingsExtras.tsx @@ -43,6 +43,7 @@ import { SelectItem, } from "@/components/ui/select"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; +import { use24HourTime } from "@/hooks/use-date-utils"; import FilterSwitch from "@/components/filter/FilterSwitch"; import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; import { Trans, useTranslation } from "react-i18next"; @@ -752,6 +753,7 @@ export function CameraNotificationSwitch({ }; const locale = useDateLocale(); + const is24Hour = use24HourTime(config); const formatSuspendedUntil = (timestamp: string) => { if (timestamp === "0") return t("time.untilForRestart", { ns: "common" }); @@ -760,14 +762,13 @@ export function CameraNotificationSwitch({ time_style: "medium", date_style: "medium", timezone: config?.ui.timezone, - date_format: - config?.ui.time_format == "24hour" - ? t("time.formattedTimestampMonthDayHourMinute.24hour", { - ns: "common", - }) - : t("time.formattedTimestampMonthDayHourMinute.12hour", { - ns: "common", - }), + date_format: is24Hour + ? t("time.formattedTimestampMonthDayHourMinute.24hour", { + ns: "common", + }) + : t("time.formattedTimestampMonthDayHourMinute.12hour", { + ns: "common", + }), locale: locale, }); return t("time.untilForTime", { ns: "common", time }); diff --git a/web/src/components/config-form/sections/BaseSection.tsx b/web/src/components/config-form/sections/BaseSection.tsx index 8ff5daa4f..cc1448703 100644 --- a/web/src/components/config-form/sections/BaseSection.tsx +++ b/web/src/components/config-form/sections/BaseSection.tsx @@ -71,6 +71,13 @@ import { } from "@/utils/configUtil"; import RestartDialog from "@/components/overlay/dialog/RestartDialog"; import { useRestart } from "@/api/ws"; +import type { + ConditionalMessage, + FieldConditionalMessage, + MessageConditionContext, +} from "../section-configs/types"; +import { useConfigMessages } from "@/hooks/use-config-messages"; +import { ConfigMessageBanner } from "../ConfigMessageBanner"; export interface SectionConfig { /** Field ordering within the section */ @@ -100,6 +107,10 @@ export interface SectionConfig { formData: unknown, errors: FormValidation, ) => FormValidation; + /** Conditional messages displayed as banners above the section form */ + messages?: ConditionalMessage[]; + /** Conditional messages displayed inline with specific fields */ + fieldMessages?: FieldConditionalMessage[]; } export interface BaseSectionProps { @@ -152,6 +163,10 @@ export interface BaseSectionProps { profileBorderColor?: string; /** Callback to delete the current profile's overrides for this section */ onDeleteProfileSection?: () => void; + /** Whether a SaveAll operation is in progress (disables individual Save) */ + isSavingAll?: boolean; + /** Callback when this section's saving state changes */ + onSavingChange?: (isSaving: boolean) => void; } export interface CreateSectionOptions { @@ -186,6 +201,8 @@ export function ConfigSection({ profileFriendlyName, profileBorderColor, onDeleteProfileSection, + isSavingAll = false, + onSavingChange, }: ConfigSectionProps) { // For replay level, treat as camera-level config access const effectiveLevel = level === "replay" ? "camera" : level; @@ -246,6 +263,7 @@ export function ConfigSection({ [onPendingDataChange, effectiveSectionPath, cameraName], ); const [isSaving, setIsSaving] = useState(false); + const [isResettingToDefault, setIsResettingToDefault] = useState(false); const [hasValidationErrors, setHasValidationErrors] = useState(false); const [extraHasChanges, setExtraHasChanges] = useState(false); const [formKey, setFormKey] = useState(0); @@ -529,6 +547,65 @@ export function ConfigSection({ const currentFormData = pendingData || formData; const effectiveBaselineFormData = baselineSnapshot; + // Build context for conditional messages + const messageContext = useMemo(() => { + if (!config || !currentFormData) return undefined; + return { + fullConfig: config, + fullCameraConfig: + effectiveLevel === "camera" && cameraName + ? config.cameras?.[cameraName] + : undefined, + level: effectiveLevel, + cameraName, + formData: currentFormData as ConfigSectionData, + }; + }, [config, currentFormData, effectiveLevel, cameraName]); + + const { activeMessages, activeFieldMessages } = useConfigMessages( + sectionConfig.messages, + sectionConfig.fieldMessages, + messageContext, + ); + + // Merge field-level conditional messages into uiSchema + const effectiveUiSchema = useMemo(() => { + if (activeFieldMessages.length === 0) return sectionConfig.uiSchema; + const merged = { ...(sectionConfig.uiSchema ?? {}) }; + for (const msg of activeFieldMessages) { + const segments = msg.field.split("."); + // Navigate to the nested uiSchema node, shallow-cloning along the way + let node = merged; + for (let i = 0; i < segments.length - 1; i++) { + const seg = segments[i]; + node[seg] = { ...(node[seg] as Record) }; + node = node[seg] as Record; + } + const leafKey = segments[segments.length - 1]; + const existing = node[leafKey] as Record | undefined; + const existingMessages = ((existing?.["ui:messages"] as unknown[]) ?? + []) as Array<{ + key: string; + messageKey: string; + severity: string; + position?: string; + }>; + node[leafKey] = { + ...existing, + "ui:messages": [ + ...existingMessages, + { + key: msg.key, + messageKey: msg.messageKey, + severity: msg.severity, + position: msg.position ?? "before", + }, + ], + }; + } + return merged; + }, [sectionConfig.uiSchema, activeFieldMessages]); + const currentOverrides = useMemo(() => { if (!currentFormData || typeof currentFormData !== "object") { return undefined; @@ -577,6 +654,7 @@ export function ConfigSection({ if (!pendingData) return; setIsSaving(true); + onSavingChange?.(true); try { const basePath = effectiveLevel === "camera" && cameraName @@ -659,8 +737,8 @@ export function ConfigSection({ ); } + await refreshConfig(); setPendingData(null); - refreshConfig(); onSave?.(); } catch (error) { // Parse Pydantic validation errors from API response @@ -699,6 +777,7 @@ export function ConfigSection({ } } finally { setIsSaving(false); + onSavingChange?.(false); } }, [ sectionPath, @@ -718,12 +797,14 @@ export function ConfigSection({ setPendingData, requiresRestartForOverrides, skipSave, + onSavingChange, ]); // Handle reset to global/defaults - removes camera-level override or resets global to defaults const handleResetToGlobal = useCallback(async () => { if (effectiveLevel === "camera" && !cameraName) return; + setIsResettingToDefault(true); try { const basePath = effectiveLevel === "camera" && cameraName @@ -758,6 +839,8 @@ export function ConfigSection({ defaultValue: "Failed to reset settings", }), ); + } finally { + setIsResettingToDefault(false); } }, [ effectiveSectionPath, @@ -861,6 +944,7 @@ export function ConfigSection({ const sectionContent = (
    + setIsResetDialogOpen(true)} variant="outline" - disabled={isSaving || disabled} + disabled={isSaving || isResettingToDefault || disabled} className="flex flex-1 gap-2" > + {isResettingToDefault && ( + + )} {effectiveLevel === "global" ? t("button.resetToDefault", { ns: "common", @@ -980,7 +1067,7 @@ export function ConfigSection({ ); } diff --git a/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx b/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx index 871131111..b9e15ced2 100644 --- a/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx +++ b/web/src/components/config-form/theme/fields/DetectorHardwareField.tsx @@ -374,6 +374,18 @@ export function DetectorHardwareField(props: FieldProps) { [detectors], ); + const getExistingType = useCallback( + (excludeKey?: string): string | undefined => { + for (const [key, value] of Object.entries(detectors)) { + if (excludeKey && key === excludeKey) continue; + const type = getInstanceType(value); + if (type) return type; + } + return undefined; + }, + [detectors], + ); + const handleAdd = useCallback(() => { if (!addType) { setAddError( @@ -400,6 +412,28 @@ export function DetectorHardwareField(props: FieldProps) { return; } + const existingType = getExistingType(); + if (existingType && existingType !== addType) { + const canAddExisting = + multiInstanceSet.has(existingType) || + !resolveDuplicateType(existingType); + setAddError( + canAddExisting + ? t("configMessages.detectors.mixedTypesSuggestion", { + ns: "views/settings", + defaultValue: + "All detectors must use the same type. Remove existing detectors or select {{type}}.", + type: getTypeLabel(existingType), + }) + : t("configMessages.detectors.mixedTypes", { + ns: "views/settings", + defaultValue: + "All detectors must use the same type. Remove existing detectors to use a different type.", + }), + ); + return; + } + const baseKey = addType; let nextKey = baseKey; let index = 2; @@ -427,8 +461,10 @@ export function DetectorHardwareField(props: FieldProps) { configNamespace, detectors, getDetectorDefaults, + getExistingType, getTypeLabel, isSingleInstanceType, + multiInstanceSet, resolveDuplicateType, updateDetectors, ]); @@ -523,6 +559,29 @@ export function DetectorHardwareField(props: FieldProps) { return; } + const existingType = getExistingType(key); + if (existingType && existingType !== nextType) { + const canAddExisting = + multiInstanceSet.has(existingType) || + !resolveDuplicateType(existingType, key); + setTypeErrors((prev) => ({ + ...prev, + [key]: canAddExisting + ? t("configMessages.detectors.mixedTypesSuggestion", { + ns: "views/settings", + defaultValue: + "All detectors must use the same type. Remove existing detectors or select {{type}}.", + type: getTypeLabel(existingType), + }) + : t("configMessages.detectors.mixedTypes", { + ns: "views/settings", + defaultValue: + "All detectors must use the same type. Remove existing detectors to use a different type.", + }), + })); + return; + } + setTypeErrors((prev) => { const { [key]: _, ...rest } = prev; return rest; @@ -538,8 +597,10 @@ export function DetectorHardwareField(props: FieldProps) { [ detectors, getDetectorDefaults, + getExistingType, getTypeLabel, isSingleInstanceType, + multiInstanceSet, resolveDuplicateType, t, updateDetectors, @@ -556,6 +617,10 @@ export function DetectorHardwareField(props: FieldProps) { const nestedOverrides = { "ui:options": { disableNestedCard: true, + addButtonText: t("configForm.detectors.addCustomKey", { + ns: "views/settings", + defaultValue: "Add custom key", + }), }, } as UiSchema; @@ -567,7 +632,7 @@ export function DetectorHardwareField(props: FieldProps) { ); return mergeUiSchema(withTypeHiddenAndOptions, nestedOverrides); }, - [globalHiddenFields, hiddenByType, uiSchema?.additionalProperties], + [globalHiddenFields, hiddenByType, t, uiSchema?.additionalProperties], ); const renderInstanceForm = useCallback( diff --git a/web/src/components/config-form/theme/fields/DictAsYamlField.tsx b/web/src/components/config-form/theme/fields/DictAsYamlField.tsx new file mode 100644 index 000000000..ff1145cfc --- /dev/null +++ b/web/src/components/config-form/theme/fields/DictAsYamlField.tsx @@ -0,0 +1,122 @@ +import type { FieldPathList, FieldProps } from "@rjsf/utils"; +import yaml from "js-yaml"; +import { Textarea } from "@/components/ui/textarea"; +import { cn } from "@/lib/utils"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; + +function formatYaml(value: unknown): string { + if ( + value == null || + (typeof value === "object" && + !Array.isArray(value) && + Object.keys(value as Record).length === 0) + ) { + return ""; + } + try { + return yaml.dump(value, { indent: 2, lineWidth: -1 }).trimEnd(); + } catch { + return ""; + } +} + +function parseYaml(text: string): { + value: Record | undefined; + error: string | undefined; +} { + const trimmed = text.trim(); + if (trimmed === "") { + return { value: {}, error: undefined }; + } + try { + const parsed = yaml.load(trimmed); + if ( + typeof parsed !== "object" || + parsed === null || + Array.isArray(parsed) + ) { + return { value: undefined, error: "Must be a YAML mapping" }; + } + return { value: parsed as Record, error: undefined }; + } catch (e) { + const msg = e instanceof yaml.YAMLException ? e.reason : "Invalid YAML"; + return { value: undefined, error: msg }; + } +} + +export function DictAsYamlField(props: FieldProps) { + const { formData, onChange, readonly, disabled, idSchema, schema } = props; + + const emptyPath = useMemo(() => [] as FieldPathList, []); + const fieldPath = + (props as { fieldPathId?: { path?: FieldPathList } }).fieldPathId?.path ?? + emptyPath; + + const [text, setText] = useState(() => formatYaml(formData)); + const [error, setError] = useState(); + const focusedRef = useRef(false); + + useEffect(() => { + // Only sync from external formData changes, not our own onChange + if (!focusedRef.current) { + setText(formatYaml(formData)); + setError(undefined); + } + }, [formData]); + + const handleChange = useCallback( + (e: React.ChangeEvent) => { + const raw = e.target.value; + setText(raw); + const { value, error: parseError } = parseYaml(raw); + setError(parseError); + if (value !== undefined) { + onChange(value, fieldPath); + } + }, + [onChange, fieldPath], + ); + + const handleFocus = useCallback(() => { + focusedRef.current = true; + }, []); + + const handleBlur = useCallback( + (_e: React.FocusEvent) => { + focusedRef.current = false; + // Reformat on blur if valid + const { value } = parseYaml(text); + if (value !== undefined) { + setText(formatYaml(value)); + } + }, + [text], + ); + + const id = idSchema?.$id ?? props.name; + + return ( +
    + {schema.title && ( + + )} +